diff --git a/servers/build.xml b/servers/build.xml new file mode 100644 index 0000000000000000000000000000000000000000..aa5a87717eaf7c654d92d8ba598f8459c1a725e5 --- /dev/null +++ b/servers/build.xml @@ -0,0 +1,69 @@ + + + + + + Builds, tests, and runs the project XtreemFS. + + + diff --git a/servers/config/dirconfig.properties b/servers/config/dirconfig.properties new file mode 100644 index 0000000000000000000000000000000000000000..5c608ae51476e481cd4857ed5bcffe28303ba172 --- /dev/null +++ b/servers/config/dirconfig.properties @@ -0,0 +1,32 @@ +# debug level (0 = error, 1 = warn, 2 = info, 3 = debug, 4 = trace) +debug_level = 1 + +# port for the service to listen on +listen.port = 32638 + +# optional address for network device ("any" if not specified) +# listen.address = 127.0.0.1 + +# directory containing the database +database.dir = /var/lib/xtreemfs/dir/database + +# specify whether SSL is required +ssl.enabled = false + +# server credentials for SSL handshakes +ssl.service_creds = /etc/xos/xtreemfs/truststore/certs/ds.p12 +ssl.service_creds.pw = xtreemfs +ssl.service_creds.container = pkcs12 + +# trusted certificates for SSL handshakes +ssl.trusted_certs = /etc/xos/xtreemfs/truststore/certs/xosrootca.jks +ssl.trusted_certs.pw = xtreemfs +ssl.trusted_certs.container = jks + +#authentication providers are used to retrieve the user identities +#from the client or from certificate +#default provider is org.xtreemfs.common.auth.NullAuthProvider +#which just takes the information provided by the client +authentication_provider = org.xtreemfs.common.auth.NullAuthProvider + +uuid = http://localhost:32638 \ No newline at end of file diff --git a/servers/config/mrcconfig.properties b/servers/config/mrcconfig.properties new file mode 100644 index 0000000000000000000000000000000000000000..8eb1c74b98f1dd49a2697fef9221cb0c6d71cbfd --- /dev/null +++ b/servers/config/mrcconfig.properties @@ -0,0 +1,83 @@ +# degub level (0 = error, 1 = warn, 2 = info, 3 = debug, 4 = trace) +debug_level = 1 + +# port for the service to listen on +listen.port = 32636 + +# optional address for network device, "any" if not specified +# listen.address = 127.0.0.1 + +# interval for querying the Directory Service for new OSDs +osd_check_interval = 10 + +# Directory Service endpoint +dir_service.host = localhost +dir_service.port = 32638 + +# directory for append log +database.log = /var/lib/xtreemfs/mrc/db-log + +# directory for volume databases +database.dir = /var/lib/xtreemfs/mrc/database + +# specify whether access time stamps are updated +no_atime = true + +# granularity of the local clock (in ms) +local_clock_renewal = 50 + +# interval between two remote clock syncs (in ms) +remote_time_sync = 60000 + +# specify whether SSL is required +ssl.enabled = false + +# server credentials for SSL handshakes +ssl.service_creds = /etc/xos/xtreemfs/truststore/certs/mrc.p12 +ssl.service_creds.pw = xtreemfs +ssl.service_creds.container = pkcs12 + +# trusted certificates for SSL handshakes +ssl.trusted_certs = /etc/xos/xtreemfs/truststore/certs/xosrootca.jks +ssl.trusted_certs.pw = xtreemfs +ssl.trusted_certs.container = jks + +# time span between two database checkpoint attempts (in ms) +database.checkpoint.interval = 1800000 + +# time span for which no requests must have been received to create a checkpoint (in ms) +database.checkpoint.idle_interval = 1000 + +# minimum size in bytes the log file must have to create a checkpoint +database.checkpoint.logfile_size = 16384 + +# Authentication providers are used to retrieve the user identities +# from the client or from certificate. +# The default provider is org.xtreemfs.mrc.auth.NullAuthProvider, which just +# takes the information provided by the client. The name of a pluggable +# provider can be used here. +authentication_provider = org.xtreemfs.common.auth.NullAuthProvider + +# Optional directory containing deployable MRC policy implementations. +# Policies can be directly deployed as .java or .class files in this directory +# or one of its subdirectories. They will be compiled at startup time and +# loaded at runtime. Policies may have external dependencies that can be +# deployed either as .java, .class or .jar files. While Java and Class files +# may be located in subdirectories, JAR files mustn't. So far, pluggable +# policies have to inherit from either org.xtreemfs.mrc.ac.FileAccessPolicy, +# org.xtreemfs.mrc.osdstatus.OSDSelectionPolicy, or +# org.xtreemfs.common.auth.AuthenticationProvider. Policies identified by +# policy IDs (OSDSelectionPolicy and FileAccessPolicy) require a public static +# long field called POLICY_ID that assigns the policy a unique number. +policy_dir = /etc/xos/xtreemfs/policies + +# Shared secret between the MRC and all OSDs. +# The secret is used by the MRC to sign capabilities, i.e. security tokens for +# data access at OSDs. In turn, an OSD uses the secret to verify that the +# capability has been issued by the MRC. The shared secret will be replaced by +# a public key infrastructure in future releases. +capability_secret = secretPassphrase + +# UUID for the OSD +# IMPORTANT: replace 'localhost' with a reachable host name/IP address +uuid = http://localhost:32636 \ No newline at end of file diff --git a/servers/config/osdconfig.properties b/servers/config/osdconfig.properties new file mode 100644 index 0000000000000000000000000000000000000000..34ca5567fc240ce70935103902b37d3df82c3df2 --- /dev/null +++ b/servers/config/osdconfig.properties @@ -0,0 +1,56 @@ +# degub level (0 = error, 1 = warn, 2 = info, 3 = debug, 4 = trace) +debug_level = 1 + +# port for the service to listen on +listen.port = 32640 + +# optional address for network device, "any" if not specified +# listen.address = 127.0.0.1 + +# Directory Service endpoint +dir_service.host = localhost +dir_service.port = 32638 + +# directory containing XtreemFS file content +object_dir = /var/lib/xtreemfs/objs/ + +# granularity of the local clock (in ms) +local_clock_renewal = 50 + +# interval between two remote clock syncs (in ms) +remote_time_sync = 60000 + +# specify whether SSL is required +ssl.enabled = false + +# server credentials for SSL handshakes +ssl.service_creds = /etc/xos/xtreemfs/truststore/certs/osd.p12 +ssl.service_creds.pw = xtreemfs +ssl.service_creds.container = pkcs12 + +# trusted certificates for SSL handshakes +ssl.trusted_certs = /etc/xos/xtreemfs/truststore/certs/xosrootca.jks +ssl.trusted_certs.pw = xtreemfs +ssl.trusted_certs.container = jks + +report_free_space = true + +# specify whether internal OSD checksums are required +# if the flag is set to true, the OSD will calculate checksums for +# newly created objects, which will be checked when the object is read +checksums.enabled = false + +# algorithm used for checksum calculation +# by default, Adler32, CRC32, MD5 and SHA-1 are supported +checksums.algorithm = Adler32 + +# Shared secret between the MRC and all OSDs. +# The secret is used by the MRC to sign capabilities, i.e. security tokens for +# data access at OSDs. In turn, an OSD uses the secret to verify that the +# capability has been issued by the MRC. The shared secret will be replaced by +# a public key infrastructure in future releases. +capability_secret = secretPassphrase + +# UUID for the OSD +# IMPORTANT: replace 'localhost' with a reachable host name/IP address +uuid = http://localhost:32640 \ No newline at end of file diff --git a/servers/lib/hsqldb.jar b/servers/lib/hsqldb.jar new file mode 100644 index 0000000000000000000000000000000000000000..dc3055e4463b389c9d6170b1f01950af9cd1cf3a Binary files /dev/null and b/servers/lib/hsqldb.jar differ diff --git a/servers/lib/test/commons-codec-1.3.jar b/servers/lib/test/commons-codec-1.3.jar new file mode 100644 index 0000000000000000000000000000000000000000..957b6752af9a60c1bb2a4f65db0e90e5ce00f521 Binary files /dev/null and b/servers/lib/test/commons-codec-1.3.jar differ diff --git a/servers/lib/test/commons-httpclient-3.0.1-contrib.jar b/servers/lib/test/commons-httpclient-3.0.1-contrib.jar new file mode 100644 index 0000000000000000000000000000000000000000..4fa5f5d2bfe8cef1e6d75347ae58e0de1d7fd8ab Binary files /dev/null and b/servers/lib/test/commons-httpclient-3.0.1-contrib.jar differ diff --git a/servers/lib/test/commons-httpclient-3.0.1.jar b/servers/lib/test/commons-httpclient-3.0.1.jar new file mode 100644 index 0000000000000000000000000000000000000000..cfc777c71d600a90001b7b2dcd68993d0977b0cb Binary files /dev/null and b/servers/lib/test/commons-httpclient-3.0.1.jar differ diff --git a/servers/lib/test/commons-logging-1.1.jar b/servers/lib/test/commons-logging-1.1.jar new file mode 100644 index 0000000000000000000000000000000000000000..2ff9bbd90d63f92cdffea944869ed9bea7ead49c Binary files /dev/null and b/servers/lib/test/commons-logging-1.1.jar differ diff --git a/servers/lib/test/junit-4.3.1.jar b/servers/lib/test/junit-4.3.1.jar new file mode 100644 index 0000000000000000000000000000000000000000..ff5d1888fc7a8b1501711594c70dc80fe07dce29 Binary files /dev/null and b/servers/lib/test/junit-4.3.1.jar differ diff --git a/servers/man/man1/xtfs_cleanup.1 b/servers/man/man1/xtfs_cleanup.1 new file mode 100644 index 0000000000000000000000000000000000000000..98e64ed200ea687936bebb91a4292e01567ef577 --- /dev/null +++ b/servers/man/man1/xtfs_cleanup.1 @@ -0,0 +1,48 @@ +.TH xtfs_cleanup 1 "September 2008" "The XtreemFS Distributed File System" "XtreemFS server" +.SH NAME +xtfs_cleanup \- checks for each file on the OSD, if it has an entry at the Metadata Server (MRC). +.SH SYNOPSIS +\fBxtfs_cleanup [ \fIoptions\fB ] \fI\fB | \fIuuid: +.BR + +.SH DESCRIPTION +.I xtfs_cleanup +performs a check of each file on the given Object Storage Device (OSD) whether it is registered at a MRC or not. +Returns a list of those files which properly can be deleted. + +.SH EXAMPLE USAGE +.B "xtfs_cleanup [options] uuid:" +.PP +Cleans up the OSD with the given uuid. + +.SH OPTIONS +.TP +.TP +\fB-h\fP show usage info +.TP +\fB-v\fP verbose mode - no listing of file details +.TP +\fB-h\fP restore mode - all files will be restored to directory '/Lost+Found/' at the volume they are located +.TP +\fB-e\fP !erase mode - all files will be removed without further request! +.TP +\fB-d\fP directory service (DIR) to use (e.g. 'http://localhost:32638') +If no DIR URI is specified, URI and security settings are taken from '/etc/xos/xtreemfs/default_dir'. + + + +.SH "SEE ALSO" +.BR xtfs_mount (1), +.BR xtfs_umount (1), +.BR xtfs_showmount (1), +.BR xtfs_mkvol (1), +.BR xtfs_rmvol (1), +.BR xtfs_lsvol (1), +.BR xtfs_stat (1), +.BR xtfs_sp (1), +.BR xtfs_mrcdbtool (1), +.BR xtfs_scrub (1) +.BR + +.SH AVAILABILITY +The xtfs_cleanup command is part of the XtreemFS-server package and is available from http://www.XtreemFS.org. diff --git a/servers/man/man1/xtfs_mrcdbtool.1 b/servers/man/man1/xtfs_mrcdbtool.1 new file mode 100644 index 0000000000000000000000000000000000000000..30ac2f9370adb9e1481a35db7abd50d2474492df --- /dev/null +++ b/servers/man/man1/xtfs_mrcdbtool.1 @@ -0,0 +1,55 @@ +.TH xtfs_mrcdbtool 1 "July 2008" "The XtreemFS Distributed File System" "XtreemFS server" +.SH NAME +xtfs_mrcdbtool \- dump and restore MRC databases. +.SH SYNOPSIS +\fBxtfs_mrcdbtool [\fIoptions\fB] \fIdump|restore dump_file +.br + +.SH DESCRIPTION +.I xtfs_mrcdbtool +dumps an MRC database to a file, or restores an MRC database from a dump file. An XML dump of the MRC database is created if the \fIdump\fP parameter is present. When dumping an MRC database, the XML file containing the dump will be created on the server at the path \fIdump_file\fP. Dumps can be restored by using the \fIrestore\fP parameter. For safety reasons, this is only possible if the target MRC does not have a database yet. + +.SH EXAMPLE USAGE +.B "xtfs_mrcdbtool -mrc http://localhost:32636 dump /tmp/dump.xml" +.PP +Dumps the database of the MRC running on \fIlocalhost:32636\fP to \fI/tmp/dump.xml\fP + +.B "xtfs_mrcdbtool -mrc http://localhost:32636 restore /tmp/dump.xml" +.PP +Restores the database of the MRC running on \fIlocalhost:32636\fP from the dump in \fI/tmp/dump.xml\fP + +.SH OPTIONS +.TP +.B \-h +Print help. +.TP +.B \-mrc \fImrc_url +The URL the MRC, e.g. http://localhost:32636. +.TP +.B \-c \fIcreds_file +Path to a PKCS#12 credentials file (private key + certificate) to use for SSL authentication. Must be present when MRC URL starts with https://. +.TP +.B \-cp \fIcreds_passphrase +An optional passphrase to access the credentials file. +.TP +.B \-t \fItrusted_CAs +Path to a PKCS#12 file containing a set of certificates from trusted certification authorities. These certificates will be used to authenticate the MRC. Must be present when MRC URL starts with https://. +.TP +.B \-tp \fItrusted_passphrase +An optional passphrase to access the truststore file. +.RE + +.SH "SEE ALSO" +.BR xtfs_mount (1), +.BR xtfs_umount (1), +.BR xtfs_showmount (1), +.BR xtfs_mkvol (1), +.BR xtfs_rmvol (1), +.BR xtfs_lsvol (1), +.BR xtfs_stat (1), +.BR xtfs_sp (1), +.BR xtfs_scrub (1) +.BR + +.SH AVAILABILITY +The xtfs_mrcdbtool command is part of the XtreemFS-server package and is available from http://www.XtreemFS.org \ No newline at end of file diff --git a/servers/man/man1/xtfs_scrub.1 b/servers/man/man1/xtfs_scrub.1 new file mode 100644 index 0000000000000000000000000000000000000000..9e9c41ac286cff09bb58e7a52470aa917836d213 --- /dev/null +++ b/servers/man/man1/xtfs_scrub.1 @@ -0,0 +1,54 @@ +.TH xtfs_scrub 1 "July 2008" "The XtreemFS Distributed File System" "XtreemFS server" +.SH NAME +xtfs_scrub \- checks for each file in a volume if the MRC file size is outdated and if the checksum is correct. +.SH SYNOPSIS +\fBxtfs_scrub [ \fIoptions\fB ] \fI\fB | \fIuuid: +.br + +.SH DESCRIPTION +.I xtfs_scrub +performs a consistency check of each file in a given volume. The check compares the file size stored as part of the metadata to the actual file size defined by all objects of the file. If the metadata file size is outdated, it is corrected, granted that no \fI\-chk\fP switch is provided. In addition, an error is indicated if the file's checksum is not correct. The volume to scrub can either be specified by a name or a UUID 'uuid:' + +.SH EXAMPLE USAGE +.B "xtfs_scrub -dir http://localhost:32638 myVolume +.PP +Scrubs all files in the volume named 'myVolume' registered at the Directory Service 'http://localhost:32638'. + +.SH OPTIONS +.TP +.TP +\fB-h\fP show usage info +.TP +\fB-dir\fP directory service to use (e.g. 'http://localhost:32638') +If no URI is specified, URI and security settings are taken from '/etc/xos/xtreemfs/default_dir' +In case of a secured URI ('https://...'), it is necessary to also specify SSL settings (-c, -cp, -t, -tp). +.TP +\fB-c\fP a PKCS#12 file containing user credentials +.TP +\fB-cp\fP a pass phrase to decrypt the the user credentials file +.TP +\fB-t\fP a PKCS#12 file containing a set of certificates from trusted CAs +.TP +\fB-tp\fP a pass phrase to decrypt the trusted CAs file +.TP +\fB\-chk\fP check only (do not update file sizes on the MRC in case of inconsistencies) +.TP +\fB\-cons\fP n number of connections per OSD (default=10) +.TP +\fB\-files\fP n number of files to fetch at once from MRC (default=100) + + +.SH "SEE ALSO" +.BR xtfs_mount (1), +.BR xtfs_umount (1), +.BR xtfs_showmount (1), +.BR xtfs_mkvol (1), +.BR xtfs_rmvol (1), +.BR xtfs_lsvol (1), +.BR xtfs_stat (1), +.BR xtfs_sp (1), +.BR xtfs_mrcdbtool (1) +.BR + +.SH AVAILABILITY +The xtfs_mrcdbtool command is part of the XtreemFS-server package and is available from http://www.XtreemFS.org diff --git a/servers/nbproject/build-impl.xml b/servers/nbproject/build-impl.xml new file mode 100644 index 0000000000000000000000000000000000000000..ca3d10a2280de081cca66f8118866555991f571e --- /dev/null +++ b/servers/nbproject/build-impl.xml @@ -0,0 +1,627 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Must set src.dir + Must set test.src.dir + Must set build.dir + Must set dist.dir + Must set build.classes.dir + Must set dist.javadoc.dir + Must set build.test.classes.dir + Must set build.test.results.dir + Must set build.classes.excludes + Must set dist.jar + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Must set javac.includes + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Must select some files in the IDE or set javac.includes + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + To run this application from the command line without Ant, try: + + + + + + + java -cp "${run.classpath.with.dist.jar}" ${main.class} + + + + + + + + + + + + + + + + + + + + + + + To run this application from the command line without Ant, try: + + java -jar "${dist.jar.resolved}" + + + + + + + + + + + + + + + + + + + Must select one file in the IDE or set run.class + + + + + + + + + + + + + + + + + + + + Must select one file in the IDE or set debug.class + + + + + Must set fix.includes + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Must select some files in the IDE or set javac.includes + + + + + + + + + + + + + + + + + + + + Some tests failed; see details above. + + + + + + + + + Must select some files in the IDE or set test.includes + + + + Some tests failed; see details above. + + + + + Must select one file in the IDE or set test.class + + + + + + + + + + + + + + + + + + + + + + + + + + + Must select one file in the IDE or set applet.url + + + + + + + + + Must select one file in the IDE or set applet.url + + + + + + + + + + + + + + + + + + + diff --git a/servers/nbproject/genfiles.properties b/servers/nbproject/genfiles.properties new file mode 100644 index 0000000000000000000000000000000000000000..7a45e2acae17a096edfdcacc55d970bb14ef1785 --- /dev/null +++ b/servers/nbproject/genfiles.properties @@ -0,0 +1,11 @@ +build.xml.data.CRC32=4a9eff70 +build.xml.script.CRC32=d0dcb2dc +build.xml.stylesheet.CRC32=240b97a2 +# This file is used by a NetBeans-based IDE to track changes in generated files such as build-impl.xml. +# Do not edit this file. You may delete it but then the IDE will never regenerate such files for you. +nbproject/build-impl.xml.data.CRC32=4a9eff70 +nbproject/build-impl.xml.script.CRC32=b1628b34 +nbproject/build-impl.xml.stylesheet.CRC32=f1d9da08 +nbproject/profiler-build-impl.xml.data.CRC32=4a9eff70 +nbproject/profiler-build-impl.xml.script.CRC32=abda56ed +nbproject/profiler-build-impl.xml.stylesheet.CRC32=a5b6598e diff --git a/servers/nbproject/project.properties b/servers/nbproject/project.properties new file mode 100644 index 0000000000000000000000000000000000000000..ee6aa55fe2b97c655d1078dfb781a69250c4395b --- /dev/null +++ b/servers/nbproject/project.properties @@ -0,0 +1,79 @@ +application.args= +application.title=XtreemFS +application.vendor=bjko +build.classes.dir=${build.dir}/classes +build.classes.excludes=**/*.java,**/*.form +# This directory is removed when the project is cleaned: +build.dir=build +build.generated.dir=${build.dir}/generated +# Only compile against the classpath explicitly listed here: +build.sysclasspath=ignore +build.test.classes.dir=${build.dir}/test/classes +build.test.results.dir=${build.dir}/test/results +debug.classpath=\ + ${run.classpath} +debug.test.classpath=\ + ${run.test.classpath} +# This directory is removed when the project is cleaned: +dist.dir=dist +dist.jar=${dist.dir}/XtreemFS.jar +dist.javadoc.dir=${dist.dir}/javadoc +excludes= +file.reference.bcprov-jdk16-139.jar=lib/bcprov-jdk16-139.jar +file.reference.cdaclient.jar=lib/cdaclient.jar +file.reference.commons-codec-1.3.jar=lib/test/commons-codec-1.3.jar +file.reference.commons-httpclient-3.0.1-contrib.jar=lib/test/commons-httpclient-3.0.1-contrib.jar +file.reference.commons-logging-1.1.jar=lib/test/commons-logging-1.1.jar +file.reference.hsqldb.jar=lib/hsqldb.jar +file.reference.junit-4.3.1.jar=lib/test/junit-4.3.1.jar +file.reference.commons-httpclient-3.0.1.jar=lib/test/commons-httpclient-3.0.1.jar +file.reference.config.jar=lib/config.jar +file.reference.je-3.2.13.jar=lib/je-3.2.13.jar +file.reference.xbean.jar=lib/xbean.jar +includes=** +jar.compress=false +javac.classpath=\ + ${file.reference.je-3.2.13.jar}:\ + ${file.reference.hsqldb.jar}:\ + ${file.reference.cdaclient.jar}:\ + ${file.reference.bcprov-jdk16-139.jar} +# Space-separated list of extra javac options +javac.compilerargs= +javac.deprecation=false +javac.source=1.5 +javac.target=1.6 +javac.test.classpath=\ + ${javac.classpath}:\ + ${build.classes.dir}:\ + ${file.reference.commons-httpclient-3.0.1.jar}:\ + ${file.reference.commons-codec-1.3.jar}:\ + ${file.reference.commons-logging-1.1.jar}:\ + ${libs.junit.classpath}:\ + ${file.reference.commons-httpclient-3.0.1-contrib.jar} +javadoc.additionalparam= +javadoc.author=false +javadoc.encoding= +javadoc.noindex=false +javadoc.nonavbar=false +javadoc.notree=false +javadoc.private=false +javadoc.splitindex=true +javadoc.use=true +javadoc.version=false +javadoc.windowtitle= +main.class= +manifest.file=manifest.mf +meta.inf.dir=${src.dir}/META-INF +platform.active=default_platform +run.classpath=\ + ${javac.classpath}:\ + ${build.classes.dir} +# Space-separated list of JVM arguments used when running the project +# (you may also define separate properties like run-sys-prop.name=value instead of -Dname=value +# or test-sys-prop.name=value to set system properties for unit tests): +run.jvmargs=-ea +run.test.classpath=\ + ${javac.test.classpath}:\ + ${build.test.classes.dir} +src.dir=src +test.src.dir=test diff --git a/servers/nbproject/project.xml b/servers/nbproject/project.xml new file mode 100644 index 0000000000000000000000000000000000000000..dae09abe30923274de28f73f459466d1d1041828 --- /dev/null +++ b/servers/nbproject/project.xml @@ -0,0 +1,16 @@ + + + org.netbeans.modules.java.j2seproject + + + XtreemFS + 1.6.5 + + + + + + + + + diff --git a/servers/src/org/xtreemfs/common/Capability.java b/servers/src/org/xtreemfs/common/Capability.java new file mode 100644 index 0000000000000000000000000000000000000000..0281279b13e83f971f6b1e94df4af22c29f5b181 --- /dev/null +++ b/servers/src/org/xtreemfs/common/Capability.java @@ -0,0 +1,272 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin and + Barcelona Supercomputing Center - Centro Nacional de Supercomputacion. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB), Jesús Malo (BSC) + */ + +package org.xtreemfs.common; + +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.List; + +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.util.OutputUtils; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.json.JSONString; + +/** + * This class implements a Java representation of a capability. + * + * In general, a capability can be seen as a token granting the permission to + * carry out an operation on a remote server. + * + * When a client wants open a file, the MRC checks whether the respective kind + * of access is granted. If so, the MRC sends a capability to the client, which + * in turn sends the capability to the OSD when file contents are accessed or + * modified. The OSD has to check whether the capability is valid. A capability + * is valid as long as it has a correct signature and has not expired yet. + * Capabilities can be renewed in order to extend their validity. + * + * Each capability contains a file ID, a string representing the access mode, an + * expiration time stamp representing the time in seconds from 1/1/1970, a + * string containing data that can be used to verify the client identity, as + * well as a signature added by the MRC. + * + * + * @author stender + * + */ +public class Capability { + + /** + * default validity for capabilities in seconds + */ + public static final long DEFAULT_VALIDITY = 10 * 60; + + private final String fileId; + + private final String accessMode; + + private final long expires; + + private final String sharedSecret; + + private final String signature; + + private long epochNo; + + /** + * Creates a capability from a given set of data. The expiration time stamp + * will be generated automatically by means of the local system time, and a + * signature will be added. This constructor is meant to initially create a + * capability at the MRC. + * + * @param fileId + * the file ID + * @param accessMode + * the access mode + * @param epochNo + * the epoch number associated with the capability; epoch numbers + * are incremented each time the file is truncated or deleted + * @param sharedSecret + * the shared secret to be used to sign the capability + */ + public Capability(String fileId, String accessMode, long epochNo, String sharedSecret) { + + this.fileId = fileId; + this.accessMode = accessMode; + this.epochNo = epochNo; + this.sharedSecret = sharedSecret; + + this.expires = System.currentTimeMillis() / 1000 + DEFAULT_VALIDITY; + this.signature = calcSignature(); + } + + /** + * Creates a capability from a given set of data. A signature will be added + * automatically. This constructor is meant to initially create a capability + * at the MRC. + * + * @param fileId + * the file ID + * @param accessMode + * the access mode + * @param expires + * the expiration time stamp + * @param epochNo + * the epoch number associated with the capability; epoch numbers + * are incremented each time the file is truncated or deleted + * @param sharedSecret + * the shared secret to be used to sign the capability + */ + public Capability(String fileId, String accessMode, long expires, long epochNo, + String sharedSecret) { + this.fileId = fileId; + this.accessMode = accessMode; + this.expires = expires; + this.epochNo = epochNo; + this.sharedSecret = sharedSecret; + + this.signature = calcSignature(); + } + + /** + * Creates a capability from a string representation. This constructor is + * meant to be used to verify the validity of a capability string received + * from a remote host. + * + * @param capability + * the capability string + * @param sharedSecret + * the shared secret to be used to verify the capability + * @throws JSONException + * if parsing the capability failed + */ + public Capability(String capability, String sharedSecret) throws JSONException { + + List cap = (List) JSONParser.parseJSON(new JSONString(capability)); + assert (cap.size() == 6 || cap.size() == 5); + + this.sharedSecret = sharedSecret; + this.fileId = (String) cap.get(0); + this.accessMode = (String) cap.get(1); + this.expires = (Long) cap.get(2); + // ignore the client identity; it cannot be used because OSDs can act as + // client proxies + this.epochNo = (Long) cap.get(4); + this.signature = (String) cap.get(5); + } + + /** + * Creates a capability from a string representation. This constructor is + * meant to be used to parse and forward a received capability.
+ * It cannot be used to verify capabilities! For this purpose, please + * use Capability(String capability, String sharedSecret). + * + * @param capability + * the capability string + * @throws JSONException + * if parsing the capability failed + */ + public Capability(String capability) throws JSONException { + + List cap = (List) JSONParser.parseJSON(new JSONString(capability)); + assert (cap.size() == 6); + + this.sharedSecret = null; + this.fileId = (String) cap.get(0); + this.accessMode = (String) cap.get(1); + this.expires = (Long) cap.get(2); + // ignore the client identity; it cannot be used because OSDs can act as + // client proxies + this.epochNo = (Long) cap.get(4); + this.signature = (String) cap.get(5); + } + + public String getFileId() { + return fileId; + } + + public String getAccessMode() { + return accessMode; + } + + public long getExpires() { + return expires; + } + + public String getClientIdentity() { + return "*"; + } + + public long getEpochNo() { + return epochNo; + } + + public String getSignature() { + return signature; + } + + /** + * Checks whether the capability is valid. + * + * @return true, if it hasn't expired yet and the signature is + * valid, false, otherwise + */ + public boolean isValid() { + return !hasExpired() && hasValidSignature(); + } + + /** + * Checks whether the capability has expired. + * + * @return true, if the current system time is after the + * expiration time stamp false, otherwise + */ + public boolean hasExpired() { + return System.currentTimeMillis() / 1000 > expires; + } + + /** + * Checks whether the capability has a valid signature. + * + * @return true, if the signature is valid, false, + * otherwise + */ + public boolean hasValidSignature() { + return signature.equals(calcSignature()); + } + + /** + * Returns a string representation of the capability. + * + * @return a JSON-formatted string representing the capability. + */ + public String toString() { + return "[\"" + fileId + "\",\"" + accessMode + "\"," + expires + ",\"" + + getClientIdentity() + "\"," + epochNo + ",\"" + signature + "\"]"; + } + + protected String calcSignature() { + + // right now, we use a shared secret between MRC and OSDs + // as soon as we have a Public Key Infrastructure, signatures + // will be generated and checked by means of asymmetric encryption + // techniques + + String plainText = fileId + accessMode + expires + epochNo + sharedSecret; + + try { + MessageDigest md5 = MessageDigest.getInstance("MD5"); + md5.update(plainText.getBytes()); + byte[] digest = md5.digest(); + + return OutputUtils.byteArrayToHexString(digest); + } catch (NoSuchAlgorithmException exc) { + Logging.logMessage(Logging.LEVEL_ERROR, this, exc); + return null; + } + } + +} diff --git a/servers/src/org/xtreemfs/common/ClientLease.java b/servers/src/org/xtreemfs/common/ClientLease.java new file mode 100644 index 0000000000000000000000000000000000000000..051e60761a9124dfb7abf982934bce4968a5ce24 --- /dev/null +++ b/servers/src/org/xtreemfs/common/ClientLease.java @@ -0,0 +1,241 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package org.xtreemfs.common; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.json.JSONString; + +/** + * + * @author bjko + */ +public final class ClientLease implements Cloneable { + + /** + * Default time span for the client lease validity. + * Must be smaller than a intra-OSD lease, if replication is + * active! + */ + public static final long LEASE_VALIDITY = 15000; + + /** + * Indicates that a lease spans to EOF "append lease". + * a lease from 0 to -1 spans the whole file, even if data is appended. + */ + public static final long TO_EOF = -1; + + /** + * timestamp when the lease expires + */ + private long firstObject; + /** + * last object the lease is valid for + */ + private long lastObject; + + /** + * UUID of the client owning the lease + */ + private String clientId; + + /** + * timestamp when the lease expires (in seconds since 01/01/70) + * must be XtreemFS global time! + */ + private long expires; + + + /** + * fileId this lease was issued for + */ + private final String fileId; + + /** + * sequenceNo, used to generate unique leaseId = fileId+"/"+sequenceNo + */ + private long sequenceNo; + + /** + * lease type/operation + */ + private String operation; + + public static final String EXCLUSIVE_LEASE = "w"; + + + public ClientLease(final String fileId) { + this.fileId = fileId; + } + + public static ClientLease parseFromJSON(String json) throws JSONException { + try { + Map m = (Map) JSONParser.parseJSON(new JSONString(json)); + return parseFromMap(m); + } catch (ClassCastException e) { + throw new JSONException("expected a Lease (see the XtreemFS protocol spec) object"); + } + } + + public static ClientLease parseFromList(List arguments) throws JSONException { + try { + final Map m = (Map)arguments.get(0); + return parseFromMap(m); + } catch (IndexOutOfBoundsException ex) { + throw new JSONException("expected lease object"); + } catch (ClassCastException ex) { + throw new JSONException("expected lease object: "+ex); + } + } + + public static ClientLease parseFromMap(Map m) throws JSONException { + try { + final String fileId = (String) m.get("fileId"); + if (fileId == null) + throw new JSONException("Lease object must have a fileId field"); + ClientLease l = new ClientLease(fileId); + + + if (!m.containsKey("clientId")) + throw new JSONException("Lease object must have a clientId field"); + + String tmp = (String) m.get("clientId"); + l.setClientId(tmp); + + tmp = (String) m.get("leaseId"); + if (tmp == null) + l.setSequenceNo(0); + else + l.setSequenceNo(Long.valueOf(tmp)); + + Long tmp2 = (Long) m.get("firstObject"); + if (tmp2 == null) + throw new JSONException("Lease object must have a firstObject field"); + l.setFirstObject(tmp2); + + tmp2 = (Long) m.get("lastObject"); + if (tmp2 == null) + throw new JSONException("Lease object must have a lastObject field"); + l.setLastObject(tmp2); + + tmp2 = (Long) m.get("expires"); + if (tmp2 == null) + l.setExpires(0); + else + l.setExpires(tmp2); + + tmp = (String) m.get("operation"); + if (tmp == null) + throw new JSONException("Lease object must have an operation field"); + l.setOperation(tmp); + + + return l; + } catch (ClassCastException e) { + throw new JSONException("expected a Lease (see the XtreemFS protocol spec) object"); + } + } + + public Map encodeAsMap() throws JSONException { + Map m = new HashMap(); + m.put("clientId",clientId); + m.put("leaseId",Long.toString(sequenceNo)); + m.put("fileId",fileId); + m.put("firstObject",firstObject); + m.put("lastObject",lastObject); + m.put("expires",expires); + m.put("operation", operation); + return m; + } + + public String encodeAsJSON() throws JSONException { + return JSONParser.writeJSON(encodeAsMap()); + } + + /** + * Checks if two leases have conflicting (i.e. overlapping ranges) + * @param other other lease for the same file + * @return true, if there is an overlap in the ranges + */ + public boolean isConflicting(ClientLease other) { + //checks + if ( ((this.lastObject < other.firstObject) && (this.lastObject != TO_EOF)) || + ((other.lastObject < this.firstObject) && (other.lastObject != TO_EOF)) ) { + return false; + } else { + return true; + } + } + + @Override + public ClientLease clone() { + ClientLease l = new ClientLease(this.fileId); + l.clientId = this.clientId; + l.expires = this.expires; + l.firstObject = this.firstObject; + l.lastObject = this.lastObject; + l.operation = this.operation; + l.sequenceNo = this.sequenceNo; + return l; + } + + public long getFirstObject() { + return firstObject; + } + + public void setFirstObject(long firstObject) { + this.firstObject = firstObject; + } + + public long getLastObject() { + return lastObject; + } + + public void setLastObject(long lastObject) { + this.lastObject = lastObject; + } + + public String getClientId() { + return clientId; + } + + public void setClientId(String clientId) { + this.clientId = clientId; + } + + public long getExpires() { + return expires; + } + + public void setExpires(long expires) { + this.expires = expires; + } + + public String getFileId() { + return fileId; + } + + public long getSequenceNo() { + return sequenceNo; + } + + public void setSequenceNo(long sequenceNo) { + this.sequenceNo = sequenceNo; + } + + public String getOperation() { + return operation; + } + + public void setOperation(String operation) { + this.operation = operation; + } + + +} diff --git a/servers/src/org/xtreemfs/common/DualQueue.java b/servers/src/org/xtreemfs/common/DualQueue.java new file mode 100644 index 0000000000000000000000000000000000000000..23227f38c0dbe815397851a1d0690197cdf120c2 --- /dev/null +++ b/servers/src/org/xtreemfs/common/DualQueue.java @@ -0,0 +1,131 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.common; + +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.ReentrantLock; + +/** + * + * @author bjko + */ +public final class DualQueue { + + private final LinkedBlockingQueue highPriority; + private final LinkedBlockingQueue lowPriority; + + private final AtomicInteger totalQueueLength; + + private final ReentrantLock waitLock; + + private final Condition notEmpty; + + public DualQueue() { + highPriority = new LinkedBlockingQueue(); + lowPriority = new LinkedBlockingQueue(); + totalQueueLength = new AtomicInteger(0); + waitLock = new ReentrantLock(); + notEmpty = waitLock.newCondition(); + } + + public void putHighPriority(Object item) { + highPriority.add(item); + incrementAndWakeup(); + } + + public void putLowPriority(Object item) { + lowPriority.add(item); + incrementAndWakeup(); + } + + private void incrementAndWakeup() { + if (totalQueueLength.incrementAndGet() == 1) { + try { + waitLock.lock(); + notEmpty.signalAll(); + } finally { + waitLock.unlock(); + } + } + } + + public Object poll() throws InterruptedException { + + if (totalQueueLength.get() == 0) { + try { + waitLock.lockInterruptibly(); + notEmpty.await(); + } finally { + waitLock.unlock(); + } + } + + Object item = highPriority.poll(); + if (item != null) { + totalQueueLength.decrementAndGet(); + return item; + } + item = lowPriority.poll(); + if (item != null) { + totalQueueLength.decrementAndGet(); + return item; + } + + throw new RuntimeException("totalQueueCount is incorrect (> 0) while all queues are empty!"); + } + + public Object poll(long waitTimeInMs) throws InterruptedException { + + if (totalQueueLength.get() == 0) { + try { + waitLock.lockInterruptibly(); + notEmpty.await(waitTimeInMs,TimeUnit.MILLISECONDS); + } finally { + waitLock.unlock(); + } + } + + if (totalQueueLength.get() == 0) + return null; + + Object item = highPriority.poll(); + if (item != null) { + totalQueueLength.decrementAndGet(); + return item; + } + item = lowPriority.poll(); + if (item != null) { + totalQueueLength.decrementAndGet(); + return item; + } + + throw new RuntimeException("totalQueueCount is incorrect (> 0) while all queues are empty!"); + } + + +} diff --git a/servers/src/org/xtreemfs/common/HeartbeatThread.java b/servers/src/org/xtreemfs/common/HeartbeatThread.java new file mode 100644 index 0000000000000000000000000000000000000000..c8840152094d2a236c896f53df54921fc758015e --- /dev/null +++ b/servers/src/org/xtreemfs/common/HeartbeatThread.java @@ -0,0 +1,301 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin, + Barcelona Supercomputing Center - Centro Nacional de Supercomputacion + and Consiglio Nazionale delle Ricerche. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB), Jesús Malo (BSC) + * Eugenio Cesario (CNR) + */ + +package org.xtreemfs.common; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import org.xtreemfs.common.clients.HttpErrorException; +import org.xtreemfs.common.clients.RPCClient; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.clients.dir.DIRClient; +import org.xtreemfs.common.config.ServiceConfig; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.util.NetUtils; +import org.xtreemfs.common.util.OutputUtils; +import org.xtreemfs.common.uuids.ServiceUUID; +import org.xtreemfs.foundation.LifeCycleThread; +import org.xtreemfs.foundation.json.JSONException; + +/** + * A thread that regularly sends a heartbeat signal with fresh service data to + * the Directory Service. + */ +public class HeartbeatThread extends LifeCycleThread { + + /** + * An interface that generates service data to be sent to the Directory + * Service. Each time a heartbeat signal is sent, new service data will be + * generated by means of invoking getServiceData(). + */ + public interface ServiceDataGenerator { + public Map> getServiceData(); + } + + private static final long UPDATE_INTERVAL = 60 * 1000; // 60s + + private ServiceUUID uuid; + + private ServiceDataGenerator serviceDataGen; + + private DIRClient client; + + private String authString; + + private volatile boolean quit; + + private final ServiceConfig config; + + public HeartbeatThread(String name, DIRClient client, ServiceUUID uuid, + ServiceDataGenerator serviceDataGen, String authString, ServiceConfig config) { + + super(name); + + this.client = client; + this.uuid = uuid; + this.serviceDataGen = serviceDataGen; + this.authString = authString; + this.config = config; + } + + public void shutdown() { + this.quit = true; + this.interrupt(); + try { + if (client.getSpeedy().isAlive()) { + RPCResponse r = client.deregisterEntity(uuid.toString(), authString); + r.waitForResponse(2000); + Logging.logMessage(Logging.LEVEL_INFO, this, uuid + " dergistered"); + } + } catch (Exception ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, "cannot deregister at DIR: " + ex); + } + } + + public void run() { + + List responses = new LinkedList(); + Map verMap = new HashMap(); + + // initially, ... + try { + + // ... for each UUID, ... + for (Entry> mapEntry : serviceDataGen.getServiceData() + .entrySet()) { + + // ... remove old DS entry if necessary + if (!"volume".equals(mapEntry.getValue().get("type"))) { + RPCResponse r = client.deregisterEntity(mapEntry.getKey(), authString); + r.waitForResponse(); + responses.add(r); + } + + // ... register the entity + registerEntity(mapEntry.getKey(), mapEntry.getValue(), verMap, authString, + responses); + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, uuid + + " successfully registered at Directory Service"); + } + + // ... register the address mapping for the service + + List> endpoints = null; + + //check if a listen.address is set + if (config.getAddress() == null) { + endpoints = NetUtils.getReachableEndpoints(uuid.getAddress() + .getPort(), uuid.getProtocol()); + } else { + //if it is set, we should use that for UUID mapping! + endpoints = new ArrayList(1); + Map m = RPCClient.generateMap("address", config.getAddress().toString(), + "port", uuid.getAddress().getPort(), "protocol", uuid.getProtocol(), + "ttl", 3600, "match_network", "*"); + endpoints.add(m); + } + + // fetch the latest address mapping version from the Directory + // Serivce + long version = 0; + RPCResponse r2 = client.getAddressMapping(uuid.toString(), authString); + try { + Map> result = (Map>) r2.get(); + + // retrieve the version number from the address mapping + Collection>> entries = result.entrySet(); + if (entries.size() != 0) { + List valueList = entries.iterator().next().getValue(); + version = (Long) valueList.get(0); + } + } finally { + responses.add(r2); + } + + // register/update the current address mapping + RPCResponse r3 = client.registerAddressMapping(uuid.toString(), endpoints, version, + authString); + try { + r3.waitForResponse(); + } finally { + responses.add(r3); + } + + } catch (Exception ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, + "an error occurred while initially contacting the Directory Service: " + + OutputUtils.stackTraceToString(ex)); + notifyCrashed(ex); + } finally { + for (RPCResponse resp : responses) + resp.freeBuffers(); + } + + notifyStarted(); + + // periodically, ... + while (!quit) { + + responses.clear(); + + try { + + // ... for each UUID, ... + for (Entry> mapEntry : serviceDataGen.getServiceData() + .entrySet()) { + + // ... update the Directory Service entry for the service + registerEntity(mapEntry.getKey(), mapEntry.getValue(), verMap, authString, + responses); + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, uuid + + " successfully updated at Directory Service"); + } + + } catch (IOException ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + } catch (JSONException ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + } catch (InterruptedException ex) { + quit = true; + break; + } finally { + for (RPCResponse resp : responses) + resp.freeBuffers(); + } + + if (quit) + break; + + try { + Thread.sleep(UPDATE_INTERVAL); + } catch (InterruptedException e) { + // ignore + } + } + + notifyStopped(); + } + + private void registerEntity(String uuid, Map data, + Map versionMap, String authString, List responses) + throws HttpErrorException, InterruptedException, IOException, JSONException { + + Long ver = versionMap.get(uuid); + if (ver == null) + ver = 0L; + + // in case the data object refers to a volume, check whether a volume + // with the same name exists already + if ("volume".equals(data.get("type"))) { + + RPCResponse>> response = client.getEntities(RPCClient + .generateMap("name", data.get("name")), + RPCClient.generateStringList("version"), authString); + + try { + Map> entities = response.get(); + + if (!entities.isEmpty()) { + String id = entities.keySet().iterator().next(); + + if (id != null && !id.equals(uuid)) { + Logging.logMessage(Logging.LEVEL_WARN, this, + "WARNING: could not register volume '" + data.get("name") + + "', as it has already been registered with a different UUID: " + + id + "!"); + return; + + } else + ver = Long.parseLong((String) entities.get(id).get("version")); + } + } finally { + responses.add(response); + } + } + + RPCResponse response = client.registerEntity(uuid, data, ver, authString); + try { + versionMap.put(uuid, response.get()); + } catch (HttpErrorException exc) { + + // if the version number was outdated for some reason, fetch the + // latest version of the entity + if (exc.getStatusCode() == 420) { + + RPCResponse>> r = client.getEntities(RPCClient + .generateMap("uuid", uuid), RPCClient.generateStringList("version"), + authString); + try { + Map> result = r.get(); + + // retrieve the version number + Collection>> entries = result.entrySet(); + if (entries.size() != 0) { + Map valueMap = entries.iterator().next().getValue(); + versionMap.put(uuid, Long.valueOf((String) valueMap.get("version"))); + } + + } finally { + responses.add(r); + } + } + + throw exc; + } finally { + responses.add(response); + } + } +} diff --git a/servers/src/org/xtreemfs/common/LRUCache.java b/servers/src/org/xtreemfs/common/LRUCache.java new file mode 100644 index 0000000000000000000000000000000000000000..47a5404aca07f5858f9b8f3c20c8b3ba194db8b7 --- /dev/null +++ b/servers/src/org/xtreemfs/common/LRUCache.java @@ -0,0 +1,49 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin and + Barcelona Supercomputing Center - Centro Nacional de Supercomputacion. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB), Jesús Malo (BSC) + */ + +package org.xtreemfs.common; + +import java.util.LinkedHashMap; +import java.util.Map; + +/** + * This class implements a LRU cache + * + * @author jmalo + */ +public class LRUCache extends LinkedHashMap { + private int maximumSize; + + /** Creates a new instance of LRUCache */ + public LRUCache(int size) { + super(size, (float)0.75, true); + + maximumSize = size; + } + + protected boolean removeEldestEntry(Map.Entry eldest) { + return size() > maximumSize; + } +} diff --git a/servers/src/org/xtreemfs/common/PrioritizableMessage.java b/servers/src/org/xtreemfs/common/PrioritizableMessage.java new file mode 100644 index 0000000000000000000000000000000000000000..5e55901eb41b255d310abddaa954236b6a5b9d04 --- /dev/null +++ b/servers/src/org/xtreemfs/common/PrioritizableMessage.java @@ -0,0 +1,36 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin and + Barcelona Supercomputing Center - Centro Nacional de Supercomputacion. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB), Jesús Malo (BSC) + */ + +package org.xtreemfs.common; + +/** + * + * @author bjko + */ +public interface PrioritizableMessage extends Comparable { + + public int getMessagePriority(); + +} diff --git a/servers/src/org/xtreemfs/common/Request.java b/servers/src/org/xtreemfs/common/Request.java new file mode 100644 index 0000000000000000000000000000000000000000..2cd8e23d05528eebddf3e322b43d7bd396f1a059 --- /dev/null +++ b/servers/src/org/xtreemfs/common/Request.java @@ -0,0 +1,103 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Christian Lorenz (ZIB), Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ +package org.xtreemfs.common; + +import org.xtreemfs.foundation.pinky.PinkyRequest; +import org.xtreemfs.osd.ErrorRecord; + +/** + * + * 29.09.2008 + * + * @author clorenz + */ +public abstract class Request { + + /** + * The HTTP request object. + */ + private PinkyRequest pinkyRequest; + + /** + * request id used for tracking. + */ + protected long requestId; + + /** + * error record, if an error occurred + */ + protected ErrorRecord error; + + private Object attachment; + + private long enqueueNanos, finishNanos; + + protected Request(PinkyRequest pr) { + this.setPinkyRequest(pr); + } + + public long getRequestId() { + return requestId; + } + + public ErrorRecord getError() { + return error; + } + + public void setError(ErrorRecord error) { + this.error = error; + } + + public PinkyRequest getPinkyRequest() { + return pinkyRequest; + } + + public void setPinkyRequest(PinkyRequest pinkyRequest) { + this.pinkyRequest = pinkyRequest; + } + + public Object getAttachment() { + return attachment; + } + + public void setAttachment(Object attachment) { + this.attachment = attachment; + } + + public long getEnqueueNanos() { + return enqueueNanos; + } + + public void setEnqueueNanos(long enqueueNanos) { + this.enqueueNanos = enqueueNanos; + } + + public long getFinishNanos() { + return finishNanos; + } + + public void setFinishNanos(long finishNanos) { + this.finishNanos = finishNanos; + } +} diff --git a/servers/src/org/xtreemfs/common/RingBuffer.java b/servers/src/org/xtreemfs/common/RingBuffer.java new file mode 100644 index 0000000000000000000000000000000000000000..bb99a2dfc334051c90e14de3125726b6e9a6c32a --- /dev/null +++ b/servers/src/org/xtreemfs/common/RingBuffer.java @@ -0,0 +1,87 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package org.xtreemfs.common; + +import java.util.Iterator; + +/** + * + * @author bjko + */ +public class RingBuffer implements Iterable { + + protected T[] items; + protected int pointer; + protected int start; + + public RingBuffer(int capacity) { + items = (T[]) new Object[capacity]; + pointer = 0; + start = 0; + } + + public RingBuffer(int capacity, T initialValue) { + this(capacity); + for (int i = 0; i < capacity; i++) + items[i] = initialValue; + } + + public void insert(T item) { + final T tmp = items[pointer]; + if (tmp != null) { + //overwriting + start++; + if (start == items.length) + start = 0; + } + items[pointer++] = item; + if (pointer == items.length) + pointer = 0; + } + + private class RingBufferIterator implements Iterator { + + private int position; + + public RingBufferIterator() { + position = 0; + } + + public boolean hasNext() { + if (position >= items.length) + return false; + return items[ (position+start) % items.length] != null; + } + + public T next() { + return (T) items[ ((position++)+start) % items.length]; + } + + public void remove() { + throw new UnsupportedOperationException("Not supported yet."); + } + + } + + public Iterator iterator() { + return new RingBufferIterator(); + } + + public String toString() { + StringBuilder contents = new StringBuilder(); + contents.append("[ "); + for (int i = 0; i < items.length; i++) { + T item = items[ (i+start) % items.length]; + if (item == null) + break; + contents.append(item); + contents.append(", "); + } + contents.append("] "); + return contents.toString(); + } + +} diff --git a/servers/src/org/xtreemfs/common/TimeSync.java b/servers/src/org/xtreemfs/common/TimeSync.java new file mode 100644 index 0000000000000000000000000000000000000000..778c92355cb53fcc704738782f103dbb56d77945 --- /dev/null +++ b/servers/src/org/xtreemfs/common/TimeSync.java @@ -0,0 +1,250 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin and + Barcelona Supercomputing Center - Centro Nacional de Supercomputacion. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB), Jesús Malo (BSC) + */ + +package org.xtreemfs.common; + +import java.net.InetSocketAddress; + +import org.xtreemfs.common.auth.NullAuthProvider; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.clients.dir.DIRClient; +import org.xtreemfs.common.logging.Logging; + +/** + * A class that offers a local time w/ adjustable granularity and a global time + * based on the time reported by the DIR. Global time is adjusted periodically. + * This class should be used to minimize the number of calls to + * System.currentTimeMillis which is a costly system call on Linux. Moreover it + * offers a system-global time. + * + * @author bjko + */ +public final class TimeSync extends Thread { + + /** + * A dir client used to synchronize clocks + */ + private final DIRClient dir; + + /** + * interval in ms to wait between to synchronizations. + */ + private final int timeSyncInterval; + + /** + * interval between updates of the local system clock. + */ + private final int localTimeRenew; + + /** + * local sys time as of last update + */ + private volatile long localSysTime; + + /** + * drift between local clock and global time as of last resync() operation. + */ + private volatile long currentDrift; + + /** + * set to true to stop thread + */ + private volatile boolean quit; + + /** + * timestamp of last resync operation + */ + private long lastSync; + + /** + * authentication string sent to the DIR + */ + private final String authStr; + + private static TimeSync theInstance; + + /** Creates a new instance of TimeSync + @dir a directory server to use for synchronizing clocks, can be null for test setups only + */ + private TimeSync(DIRClient dir, int timeSyncInterval, int localTimeRenew, + String dirAuthStr) { + super("TimeSync Thread"); + setDaemon(true); + this.localTimeRenew = localTimeRenew; + this.timeSyncInterval = timeSyncInterval; + this.dir = dir; + this.authStr = dirAuthStr; + TimeSync.theInstance = this; + } + + /** + * main loop + */ + @Override + public void run() { + while (!quit) { + localSysTime = System.currentTimeMillis(); + if (localSysTime - lastSync > timeSyncInterval) { + resync(); + } + try { + TimeSync.sleep(localTimeRenew); + } catch (InterruptedException ex) { + } + + } + theInstance = null; + } + + /** + * Initializes the time synchronizer. Note that only the first invocation of + * this method has an effect, any further invocations will be ignored. + * + * @param dir + * @param timeSyncInterval + * @param localTimeRenew + * @param dirAuthStr + */ + public static void initialize(DIRClient dir, int timeSyncInterval, + int localTimeRenew, String dirAuthStr) { + + if (theInstance != null) + return; + + TimeSync s = new TimeSync(dir, timeSyncInterval, localTimeRenew, + dirAuthStr); + s.start(); + } + + public static void close() { + if (theInstance == null) + return; + theInstance.shutdown(); + } + + /** + * stop the thread + */ + public void shutdown() { + quit = true; + this.interrupt(); + } + + /** + * returns the current value of the local system time variable. Has a + * resolution of localTimeRenew ms. + */ + public static long getLocalSystemTime() { + return getInstance().localSysTime; + } + + /** + * returns the current value of the local system time adjusted to global + * time. Has a resolution of localTimeRenew ms. + */ + public static long getGlobalTime() { + return getInstance().localSysTime + getInstance().currentDrift; + } + + public static long getLocalRenewInterval() { + return getInstance().localTimeRenew; + } + + public static int getTimeSyncInterval() { + return getInstance().timeSyncInterval; + } + + /** + * returns the current clock drift. + */ + public long getDrift() { + return this.currentDrift; + } + + /** + * resynchronizes with the global time obtained from the DIR + */ + private void resync() { + if (dir == null) + return; + try { + long tStart = localSysTime; + + long oldDrift = currentDrift; + RPCResponse r = dir.getGlobalTime(authStr); + Long globalTime = r.get(); + r.freeBuffers(); + long tEnd = System.currentTimeMillis(); + // add half a roundtrip to estimate the delay + globalTime += (tEnd - tStart) / 2; + + currentDrift = globalTime - tEnd; + lastSync = tEnd; + + if (Math.abs(oldDrift - currentDrift) > 5000) { + Logging.logMessage(Logging.LEVEL_ERROR, this, + "STRANGE DRIFT CHANGE from " + oldDrift + " to " + + currentDrift); + } + + } catch (Exception ex) { + ex.printStackTrace(); + lastSync = System.currentTimeMillis(); + } + } + + public static TimeSync getInstance() { + if (theInstance == null) + throw new RuntimeException("TimeSync not initialized!"); + return theInstance; + } + + /** + * Simple demonstration routine + */ + public static void main(String[] args) { + try { + // simple test + Logging.start(Logging.LEVEL_INFO); + + DIRClient dir = new DIRClient(null, new InetSocketAddress( + "xtreem.zib.de", 32638)); + TimeSync ts = new TimeSync(dir, 1000, 50, NullAuthProvider.createAuthString("me", "me")); + ts.start(); + + for (;;) { + Logging.logMessage(Logging.LEVEL_INFO, null, "local time = " + + ts.getLocalSystemTime()); + Logging.logMessage(Logging.LEVEL_INFO, null, "global time = " + + ts.getGlobalTime() + " +" + ts.getDrift()); + Thread.sleep(1000); + } + + } catch (Exception ex) { + ex.printStackTrace(); + } + } + +} diff --git a/servers/src/org/xtreemfs/common/VersionManagement.java b/servers/src/org/xtreemfs/common/VersionManagement.java new file mode 100644 index 0000000000000000000000000000000000000000..fc2c1a2d929736eee48b194f2dd0c62e58c6e6a4 --- /dev/null +++ b/servers/src/org/xtreemfs/common/VersionManagement.java @@ -0,0 +1,90 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin and + Barcelona Supercomputing Center - Centro Nacional de Supercomputacion. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB), Jesús Malo (BSC) + */ + +package org.xtreemfs.common; + +import java.util.List; + +/** + * This class is meant to maintain version numbers for different components used + * in XtreemFS, in order to be able to detect possible incompatibilities between + * different versions. + * + * When a new version of the protocol, database, etc. has been implemented, the + * corresponding version number should be replaced. XtreemFS will rely on this + * class to find out what the current version numbers are. + * + */ +public class VersionManagement { + + private static final long[] supportedProtocolVersions = { 39 }; + + private static final long mrcDataVersion = 2; + + private static final long osdDataVersion = 1; + + public static long getMatchingProtVers(List proposedVersions) { + + int i = 0; + int j = 0; + long result = -1; + + // find the largest element contained in both lists + if (proposedVersions.size() > 0) { + + while (i < proposedVersions.size() && j < supportedProtocolVersions.length) { + long diff = proposedVersions.get(i) - supportedProtocolVersions[j]; + + if (diff == 0) { + result = supportedProtocolVersions[j]; + break; + } else if (diff > 0) + i++; + else + j++; + } + } + + return result; + } + + public static String getSupportedProtVersAsString() { + String str = "["; + for (int v = 0; v < supportedProtocolVersions.length; v++) + str += supportedProtocolVersions[v] + + (v == supportedProtocolVersions.length - 1 ? "]" : ", "); + + return str; + } + + public static long getMrcDataVersion() { + return mrcDataVersion; + } + + public static long getOsdDataVersion() { + return osdDataVersion; + } + +} diff --git a/servers/src/org/xtreemfs/common/auth/AuthenticationException.java b/servers/src/org/xtreemfs/common/auth/AuthenticationException.java new file mode 100644 index 0000000000000000000000000000000000000000..58f6afed3eb8c9665fdcc133d0cc2bc6dacac533 --- /dev/null +++ b/servers/src/org/xtreemfs/common/auth/AuthenticationException.java @@ -0,0 +1,40 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Bjoern Kolbeck (ZIB) + */ +package org.xtreemfs.common.auth; + +/** + * Thrown by an authentication provide when authentication is not + * possible for any reason. + * @author bjko + */ +public class AuthenticationException extends Exception { + + /** creates a new exception. + * + * @param msg an error message that should be meaningful to users! + */ + public AuthenticationException(String msg) { + super(msg); + } +} diff --git a/servers/src/org/xtreemfs/common/auth/AuthenticationProvider.java b/servers/src/org/xtreemfs/common/auth/AuthenticationProvider.java new file mode 100644 index 0000000000000000000000000000000000000000..0744a4a5c2a96a9e0eb4f0252e33b3f07800f6c1 --- /dev/null +++ b/servers/src/org/xtreemfs/common/auth/AuthenticationProvider.java @@ -0,0 +1,52 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Bjoern Kolbeck (ZIB) + */ + +package org.xtreemfs.common.auth; + +import org.xtreemfs.foundation.pinky.channels.ChannelIO; + +/** + * Authentication Providers extract the credentials (UID/GIDs/SuperUser) + * from the authentication header and the certificates. + * @author bjko + */ +public interface AuthenticationProvider { + + /** initializes the provider class + * + * @param useSSL true, if SSL is enabled. + * @throws java.lang.RuntimeException if the provider cannot be initialized. + */ + void initialize(boolean useSSL) throws RuntimeException; + + /** + * Get the effective credentials for an operation. + * @param authHeader content of the Authentication header sent by the client + * @param channel the channel used, can be used to store attachments and to get certificates + * @return the effective user credentials + * @throws org.xtreemfs.common.auth.AuthenticationException if authentication is not possible + */ + UserCredentials getEffectiveCredentials(String authHeader, ChannelIO channel) throws AuthenticationException; + +} diff --git a/servers/src/org/xtreemfs/common/auth/BackwdCompatNullAuthProvider.java b/servers/src/org/xtreemfs/common/auth/BackwdCompatNullAuthProvider.java new file mode 100644 index 0000000000000000000000000000000000000000..14b4c60be77acf3b14e939b04706350b43612fa2 --- /dev/null +++ b/servers/src/org/xtreemfs/common/auth/BackwdCompatNullAuthProvider.java @@ -0,0 +1,106 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. +This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based +Grid Operating System, see for more details. +The XtreemOS project has been developed with the financial support of the +European Commission's IST program under contract #FP6-033576. +XtreemFS is free software: you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation, either version 2 of the License, or (at your option) +any later version. +XtreemFS is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. +You should have received a copy of the GNU General Public License +along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Bjoern Kolbeck (ZIB) + */ +package org.xtreemfs.common.auth; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.StringTokenizer; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.json.JSONString; +import org.xtreemfs.foundation.pinky.channels.ChannelIO; +import org.xtreemfs.common.auth.AuthenticationException; +import org.xtreemfs.common.auth.AuthenticationProvider; +import org.xtreemfs.common.auth.UserCredentials; + +/** + * A simple provider that parses the JSON string sent in the authentication header + * as described in the protocol spec. + * @author bjko + */ +public class BackwdCompatNullAuthProvider implements AuthenticationProvider { + + public BackwdCompatNullAuthProvider() { + + } + + public UserCredentials getEffectiveCredentials(String authHeader, ChannelIO channel) + throws AuthenticationException { + + if (authHeader.startsWith("{")) { + //new JSON header format + + String GUID = null; + List GGIDs = null; + String mech = null; + try { + //parse the JSON string in header field + JSONString authStr = new JSONString(authHeader); + Map authInfo = (Map) JSONParser.parseJSON(authStr); + mech = (String) authInfo.get("mechanism"); + GUID = (String) authInfo.get("guid"); + GGIDs = (List) authInfo.get("ggids"); + } catch (Exception ex) { + throw new AuthenticationException("malformed authentication credentials: " + ex); + } + + if (!mech.equals("nullauth")) { + throw new AuthenticationException("unknown authorization mechanism: " + mech); + } + + return new UserCredentials(GUID, GGIDs, GUID.equals("root")); + } else { + String GUID = null; + List GGIDs = null; + //old header format for comapatability! + StringTokenizer st = new StringTokenizer( + authHeader, " "); + String mech = st.nextToken(); + + if (mech.equals("nullauth")) { + + if (!st.hasMoreTokens()) { + throw new AuthenticationException("nullauth: user ID required"); + } + + // set the user ID + GUID = st.nextToken(); + + if (!st.hasMoreTokens()) { + throw new AuthenticationException("nullauth: at least one group ID required"); + } + + // set the group IDs + GGIDs = new ArrayList(); + while (st.hasMoreTokens()) { + GGIDs.add(st.nextToken()); + } + + return new UserCredentials(GUID, GGIDs, GUID.equals("root")); + } else { + throw new AuthenticationException("unknown authorization mechanism: " + mech); + } + + } + } + + public void initialize(boolean useSSL) throws RuntimeException { + } +} diff --git a/servers/src/org/xtreemfs/common/auth/NullAuthProvider.java b/servers/src/org/xtreemfs/common/auth/NullAuthProvider.java new file mode 100644 index 0000000000000000000000000000000000000000..daf50f3faef162cd928a834e3c12e594f50c497b --- /dev/null +++ b/servers/src/org/xtreemfs/common/auth/NullAuthProvider.java @@ -0,0 +1,112 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Bjoern Kolbeck (ZIB) + */ + +package org.xtreemfs.common.auth; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.json.JSONString; +import org.xtreemfs.foundation.pinky.channels.ChannelIO; + +/** + * A simple provider that parses the JSON string sent in the authentication + * header as described in the protocol spec. + * + * @author bjko + */ +public class NullAuthProvider implements AuthenticationProvider { + + public NullAuthProvider() { + + } + + public UserCredentials getEffectiveCredentials(String authHeader, ChannelIO channel) + throws AuthenticationException { + String GUID = null; + List GGIDs = null; + String mech = null; + try { + // parse the JSON string in header field + JSONString authStr = new JSONString(authHeader); + Map authInfo = (Map) JSONParser.parseJSON(authStr); + mech = (String) authInfo.get("mechanism"); + GUID = (String) authInfo.get("guid"); + GGIDs = (List) authInfo.get("ggids"); + } catch (Exception ex) { + throw new AuthenticationException("malformed authentication credentials: " + ex); + } + + if (!mech.equals("nullauth")) + throw new AuthenticationException("unknown authorization mechanism: " + mech); + + return new UserCredentials(GUID, GGIDs, GUID.equals("root")); + + } + + public void initialize(boolean useSSL) throws RuntimeException { + } + + /** + * Generates a NullAuthProvicer-specific authorization string. + * + * @param guid + * the global user ID + * @param ggid + * a list of global group IDs + * @return an authorization string + * @throws JSONException + */ + public static String createAuthString(String guid, List ggid) throws JSONException { + StringBuilder sb = new StringBuilder(); + sb.append("{\"mechanism\":"); + sb.append(JSONParser.writeJSON("nullauth")); + sb.append(",\"guid\":"); + sb.append(JSONParser.writeJSON(guid)); + sb.append(",\"ggids\":"); + sb.append(JSONParser.writeJSON(ggid)); + sb.append("}"); + return sb.toString(); + } + + /** + * Generates a NullAuthProvicer-specific authorization string. + * + * @param guid + * the global user ID + * @param ggid + * the global group ID + * @return an authorization string + * @throws JSONException + */ + public static String createAuthString(String guid, String ggid) throws JSONException { + ArrayList ggids = new ArrayList(1); + ggids.add(ggid); + return createAuthString(guid, ggids); + } + +} diff --git a/servers/src/org/xtreemfs/common/auth/SimpleX509AuthProvider.java b/servers/src/org/xtreemfs/common/auth/SimpleX509AuthProvider.java new file mode 100644 index 0000000000000000000000000000000000000000..cd274010d87e05aaa612da1f4c3a5ced9242005b --- /dev/null +++ b/servers/src/org/xtreemfs/common/auth/SimpleX509AuthProvider.java @@ -0,0 +1,113 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Bjoern Kolbeck (ZIB) + */ +package org.xtreemfs.common.auth; + +import java.security.cert.Certificate; +import java.security.cert.X509Certificate; +import java.util.ArrayList; +import java.util.List; + +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.pinky.channels.ChannelIO; + +/** + * authentication provider for XOS certificates. + * @author bjko + */ +public class SimpleX509AuthProvider implements AuthenticationProvider { + + private NullAuthProvider nullAuth; + + public UserCredentials getEffectiveCredentials(String authHeader, ChannelIO channel) throws AuthenticationException { + //use cached info! + assert(nullAuth != null); + if (channel.getAttachment() != null) { + Logging.logMessage(Logging.LEVEL_DEBUG,this,"using attachment..."); + final Object[] cache = (Object[])channel.getAttachment(); + final Boolean serviceCert = (Boolean)cache[0]; + if (serviceCert) { + Logging.logMessage(Logging.LEVEL_DEBUG,this,"service cert..."); + return nullAuth.getEffectiveCredentials(authHeader, channel); + } else { + Logging.logMessage(Logging.LEVEL_DEBUG,this,"using cached creds: "+cache[1]); + return (UserCredentials)cache[1]; + } + } + //parse cert if no cached info is present + try { + final Certificate[] certs = channel.getCerts(); + if (certs.length > 0) { + final X509Certificate cert = ((X509Certificate) certs[0]); + String fullDN = cert.getSubjectX500Principal().getName(); + String commonName = getNameElement(cert.getSubjectX500Principal().getName(),"CN"); + + if (commonName.startsWith("host/") || commonName.startsWith("xtreemfs-service/")) { + Logging.logMessage(Logging.LEVEL_DEBUG, this, "X.509-host cert present"); + channel.setAttachment(new Object[]{new Boolean(true)}); + //use NullAuth in this case to parse JSON header + return nullAuth.getEffectiveCredentials(authHeader, null); + } else { + + final String globalUID = fullDN; + final String globalGID = getNameElement(cert.getSubjectX500Principal().getName(),"OU"); + List gids = new ArrayList(1); + gids.add(globalGID); + + Logging.logMessage(Logging.LEVEL_DEBUG, this, "X.509-User cert present: " + globalUID + "," + globalGID); + + boolean isSuperUser = gids.contains("xtreemfs-admin"); + final UserCredentials creds = new UserCredentials(globalUID, gids, isSuperUser); + channel.setAttachment(new Object[]{new Boolean(false),creds}); + return creds; + } + } else { + throw new AuthenticationException("no X.509-certificates present"); + } + } catch (Exception ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + throw new AuthenticationException("invalid credentials "+ex); + } + + } + + private String getNameElement(String principal, String element) { + String[] elems = principal.split(","); + for (String elem: elems) { + String[] kv = elem.split("="); + if (kv.length != 2) + continue; + if (kv[0].equals(element)) + return kv[1]; + } + return null; + } + + public void initialize(boolean useSSL) throws RuntimeException { + if (!useSSL) { + throw new RuntimeException(this.getClass().getName() + " can only be used if use_ssl is enabled!"); + } + nullAuth = new NullAuthProvider(); + nullAuth.initialize(useSSL); + } +} diff --git a/servers/src/org/xtreemfs/common/auth/UserCredentials.java b/servers/src/org/xtreemfs/common/auth/UserCredentials.java new file mode 100644 index 0000000000000000000000000000000000000000..98aec3fa01d15b72910eb412d1f9b36e9a57841e --- /dev/null +++ b/servers/src/org/xtreemfs/common/auth/UserCredentials.java @@ -0,0 +1,69 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Bjoern Kolbeck (ZIB) + */ + +package org.xtreemfs.common.auth; + +import java.util.List; + +/** + * User credentials. + * @author bjko + */ +public class UserCredentials { + protected String userID; + protected List groupIDs; + protected boolean superUser; + + public UserCredentials(String userID,List groupIDs, boolean superUser) { + this.userID = userID; + this.groupIDs = groupIDs; + this.superUser = superUser; + } + + public String getUserID() { + return userID; + } + + public void setUserID(String userID) { + this.userID = userID; + } + + public List getGroupIDs() { + return groupIDs; + } + + public void setGroupIDs(List groupIDs) { + this.groupIDs = groupIDs; + } + + public boolean isSuperUser() { + return superUser; + } + + public void setSuperUser(boolean superUser) { + this.superUser = superUser; + } + + +} diff --git a/servers/src/org/xtreemfs/common/buffer/ASCIIString.java b/servers/src/org/xtreemfs/common/buffer/ASCIIString.java new file mode 100644 index 0000000000000000000000000000000000000000..22a39f951f2faebb63edfa23a45330d23fd58387 --- /dev/null +++ b/servers/src/org/xtreemfs/common/buffer/ASCIIString.java @@ -0,0 +1,115 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.common.buffer; + +import java.io.Serializable; + +/** + * + * @author bjko + */ +public final class ASCIIString implements Serializable { + + private byte[] data; + + private int hash; + + protected ASCIIString() { + + } + + /** + * Creates a new instance of ASCIIString + */ + public ASCIIString(String str) { + this.data = str.getBytes(); + } + + /** + * Creates a new instance of ASCIIString + */ + protected ASCIIString(byte[] data) { + this.data = data; + } + + public String toString() { + return new String(data); + } + + public char charAt(int index) { + + return (char)data[index]; + } + + private byte unckeckedGetByte(int index) { + return data[index]; + } + + public boolean equals(Object o) { + if (o == null) return false; + try { + ASCIIString other = (ASCIIString)o; + + for (int i = 0; i < data.length; i++) { + if (this.unckeckedGetByte(i) != other.unckeckedGetByte(i)) + return false; + } + return true; + } catch (ClassCastException ex) { + return false; + } + } + + public void marshall(ReusableBuffer target) { + target.putInt(data.length); + target.put(data); + + } + + public static ASCIIString unmarshall(ReusableBuffer target) { + + int length = target.getInt(); + if (length < 0) + return null; + byte[] tmp = new byte[length]; + + target.get(tmp); + + return new ASCIIString(tmp); + } + + public int hashCode() { + int h = hash; + if (h == 0) { + + for (int i = 0; i < data.length; i++) { + h = 31*h + data[i]; + } + hash = h; + } + return h; + } + +} diff --git a/servers/src/org/xtreemfs/common/buffer/BufferConversionUtils.java b/servers/src/org/xtreemfs/common/buffer/BufferConversionUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..e2f48d7ee215dad0522647c6faf5be8e667770e3 --- /dev/null +++ b/servers/src/org/xtreemfs/common/buffer/BufferConversionUtils.java @@ -0,0 +1,60 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.common.buffer; + +import java.nio.ByteBuffer; + +/** This class contains some convenience methods for very diverses uses + * + * @author Jesus Malo (jmalo) + */ +public class BufferConversionUtils { + + /** + * Creates a new instance of BufferConversionUtils + */ + public BufferConversionUtils() { + } + + /** It gets the array of bytes of a ByteBuffer + * @param source The object containing the require array of bytes + * @return The array of bytes contained in the given ByteBuffer + */ + public static byte [] arrayOf(ByteBuffer source) { + byte [] array; + + if (source.hasArray()) { + array = source.array(); + } else { + array = new byte[source.capacity()]; + source.position(0); + source.get(array); + } + + return array; + } + + +} diff --git a/servers/src/org/xtreemfs/common/buffer/BufferPool.java b/servers/src/org/xtreemfs/common/buffer/BufferPool.java new file mode 100644 index 0000000000000000000000000000000000000000..28704391ce05b7d79f2c3c21f5de4c3a17b7be13 --- /dev/null +++ b/servers/src/org/xtreemfs/common/buffer/BufferPool.java @@ -0,0 +1,264 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.common.buffer; + +import java.nio.ByteBuffer; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * A concurrent pool for buffer recycling. + * @author bjko + */ +public final class BufferPool { + + /** size of buffers for each class. + */ + public static final int[] BUFF_SIZES = { 8192, 65536, 524288, 2097152 }; + + /** max pool size for each class + */ + public static final int[] MAX_POOL_SIZES = { 2000, 6, 10, 5 }; + + /** queues to store buffers in + */ + private final ConcurrentLinkedQueue[] pools; + + /** pool sizes to avoid counting elements on each access + */ + private final AtomicInteger[] poolSizes; + + /** stats for num requests and creates of buffers per class + */ + private long[] requests, creates, deletes; + + /** singleton pattern. + */ + private static final BufferPool instance = new BufferPool(); + + /** + * if true all allocate/free operations record the stack trace. + * Useful to find memory leaks but slow. + */ + protected static final boolean recordStackTraces = false; + + /** + * Creates a new instance of BufferPool + */ + private BufferPool() { + pools = new ConcurrentLinkedQueue[BUFF_SIZES.length]; + requests = new long[BUFF_SIZES.length+1]; + creates = new long[BUFF_SIZES.length]; + deletes = new long[BUFF_SIZES.length+1]; + poolSizes = new AtomicInteger[BUFF_SIZES.length]; + for (int i = 0; i < BUFF_SIZES.length; i++) { + pools[i] = new ConcurrentLinkedQueue(); + poolSizes[i] = new AtomicInteger(0); + } + } + + /** Get a new buffer. The Buffer is taken from the pool or created if none + * is available or the size exceedes the largest class. + * @param size the buffer's size in bytes + * @return a buffer of requested size + * @throws OutOfMemoryError if a buffer cannot be allocated + */ + public static ReusableBuffer allocate(int size) { + ReusableBuffer tmp = instance.getNewBuffer(size); + + if (recordStackTraces) { + try { + throw new Exception("allocate stack trace"); + } catch (Exception e) { + tmp.allocStack = "\n"; + for (StackTraceElement elem : e.getStackTrace()) + tmp.allocStack += elem.toString()+"\n"; + } + } + return tmp; + } + + /** Returns a buffer to the pool, if the buffer is reusable. Other + * buffers are ignored. + * @param buf the buffer to return + */ + public static void free(ReusableBuffer buf) { + if (buf != null) { + instance.returnBuffer(buf); + } + } + + /** Returns a buffer which has at least size bytes. + * @attention The returned buffer can be larger than requested! + */ + private ReusableBuffer getNewBuffer(int size) { + try { + ByteBuffer buf = null; + + if (size <= BUFF_SIZES[0]) { + buf = pools[0].poll(); + if (buf == null) { + buf = ByteBuffer.allocateDirect(BUFF_SIZES[0]); + creates[0]++; + } else { + poolSizes[0].decrementAndGet(); + } + requests[0]++; + return new ReusableBuffer(buf,size); + } else if (size <= BUFF_SIZES[1]) { + buf = pools[1].poll(); + if (buf == null) { + buf = ByteBuffer.allocateDirect(BUFF_SIZES[1]); + creates[1]++; + } else { + poolSizes[1].decrementAndGet(); + } + requests[1]++; + return new ReusableBuffer(buf,size); + } else if (size <= BUFF_SIZES[2]) { + buf = pools[2].poll(); + if (buf == null) { + buf = ByteBuffer.allocateDirect(BUFF_SIZES[2]); + creates[2]++; + } else { + poolSizes[2].decrementAndGet(); + } + requests[2]++; + return new ReusableBuffer(buf,size); + } else if (size <= BUFF_SIZES[3]) { + buf = pools[3].poll(); + if (buf == null) { + buf = ByteBuffer.allocateDirect(BUFF_SIZES[3]); + creates[3]++; + } else { + poolSizes[3].decrementAndGet(); + } + requests[3]++; + return new ReusableBuffer(buf,size); + } else { + requests[4]++; + buf = ByteBuffer.allocateDirect(size); + return new ReusableBuffer(buf,size); + } + } catch (OutOfMemoryError ex) { + System.out.println(this.getStatus()); + throw ex; + } + } + + /** return a buffer to the pool + */ + private void returnBuffer(ReusableBuffer buffer) { + if (!buffer.isReusable()) + return; + + if (buffer.viewParent != null) { + // view buffer + if (recordStackTraces) { + try { + throw new Exception("free stack trace"); + } catch (Exception e) { + buffer.freeStack = "\n"; + for (StackTraceElement elem : e.getStackTrace()) + buffer.freeStack += elem.toString()+"\n"; + } + } + assert(!buffer.returned) : "buffer was already released: "+buffer.freeStack; + buffer.returned = true; + returnBuffer(buffer.viewParent); + + } else { + + if (buffer.refCount.getAndDecrement() > 1) { + return; + } + + assert(!buffer.returned) : "buffer was already released: "+buffer.freeStack; + buffer.returned = true; + + + if (recordStackTraces) { + try { + throw new Exception("free stack trace"); + } catch (Exception e) { + buffer.freeStack = "\n"; + for (StackTraceElement elem : e.getStackTrace()) + buffer.freeStack += elem.toString()+"\n"; + } + } + + ByteBuffer buf = buffer.getParent(); + + buf.clear(); + if (buf.capacity() == BUFF_SIZES[0]) { + if (poolSizes[0].get() < MAX_POOL_SIZES[0]) { + poolSizes[0].incrementAndGet(); + pools[0].add(buf); + } else { + deletes[0]++; + } + } else if (buf.capacity() == BUFF_SIZES[1]) { + if (poolSizes[1].get() < MAX_POOL_SIZES[1]) { + poolSizes[1].incrementAndGet(); + pools[1].add(buf); + } else { + deletes[1]++; + } + } else if (buf.capacity() == BUFF_SIZES[2]) { + if (poolSizes[2].get() < MAX_POOL_SIZES[2]) { + poolSizes[2].incrementAndGet(); + pools[2].add(buf); + } else { + deletes[2]++; + } + } else if (buf.capacity() == BUFF_SIZES[3]) { + if (poolSizes[3].get() < MAX_POOL_SIZES[3]) { + poolSizes[3].incrementAndGet(); + pools[3].add(buf); + } else { + deletes[3]++; + } + } else { + deletes[4]++; + } + } + } + + /** Returns a textual representation of the pool status. + * @return a textual representation of the pool status. + */ + public static String getStatus() { + + String str = ""; + for (int i = 0; i < 4; i++) { + str += String.format("%8d: poolSize = %5d numRequests = %8d creates = %8d deletes = %8d\n", + instance.BUFF_SIZES[i], instance.poolSizes[i].get(), + instance.requests[i], instance.creates[i], instance.deletes[i]); + } + str += String.format("unpooled (> %8d) numRequests = creates = %8d deletes = %8d",instance.BUFF_SIZES[3],instance.requests[4],instance.deletes[4]); + return str; + } + +} diff --git a/servers/src/org/xtreemfs/common/buffer/ReusableBuffer.java b/servers/src/org/xtreemfs/common/buffer/ReusableBuffer.java new file mode 100644 index 0000000000000000000000000000000000000000..6dfdfeac1a873e4e9ca70eace4a20706691de903 --- /dev/null +++ b/servers/src/org/xtreemfs/common/buffer/ReusableBuffer.java @@ -0,0 +1,590 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.common.buffer; + +import java.nio.ByteBuffer; +import java.util.concurrent.atomic.AtomicInteger; + +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.util.OutputUtils; +import org.xtreemfs.foundation.pinky.HTTPUtils; + +/** + * + * @author bjko + */ +public final class ReusableBuffer { + + /** A view buffer of parentBuffer with the requested size. + * For non-reusable buffers this is the buffer itself + */ + private ByteBuffer buffer; + + /** A parent buffer which is returned to the pool + */ + private final ByteBuffer parentBuffer; + + /** True if the buffer can be returned to the pool + */ + private final boolean reusable; + + /** set to true after a buffer was returned to the pool + */ + protected volatile boolean returned; + + /** size (as requested), might be smaller than parentBuffer size but + * is always equal to the (view) buffer size. + */ + private int size; + + protected ReusableBuffer viewParent; + + protected String freeStack, allocStack; + + /** + * reference count + */ + AtomicInteger refCount; + + /** Creates a new instance of ReusableBuffer. + * A view buffer of size is created. + * @param buffer the parent buffer + * @param size the requested size + */ + protected ReusableBuffer(ByteBuffer buffer, int size) { + buffer.position(0); + buffer.limit(size); + this.buffer = buffer.slice(); + this.parentBuffer = buffer; + this.size = size; + this.reusable = true; + this.refCount = new AtomicInteger(1); + returned = false; + viewParent = null; + } + + /** A wrapper for a non-reusable buffer. + * The buffer is not used by the pool when returned. + */ + public ReusableBuffer(ByteBuffer nonManaged) { + this.buffer = nonManaged; + this.size = buffer.limit(); + this.reusable = false; + this.parentBuffer = null; + returned = false; + this.refCount = new AtomicInteger(1); + viewParent = null; + } + + /** + * Creates a non-reusable buffer around a byte array. + * Uses the ByteBuffer.wrap method. + * + * @param data the byte arry containing the data + * @return + */ + public static ReusableBuffer wrap(byte[] data) { + return new ReusableBuffer(ByteBuffer.wrap(data)); + } + + public static ReusableBuffer wrap(byte[] data, int offset, int length) { + assert(offset >= 0); + assert(length >= 0); + if (offset+length > data.length) + throw new IllegalArgumentException("offset+length > buffer size ("+offset+"+"+length+" > "+data.length); + ByteBuffer tmp = ByteBuffer.wrap(data); + tmp.position(offset); + tmp.limit(offset+length); + return new ReusableBuffer(tmp.slice()); + } + + /** + * Creates a new view buffer. This view buffer shares the same data (i.e. + * backing byte buffer) but has independet position, limit etc. + */ + public ReusableBuffer createViewBuffer() { + + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + + if (this.viewParent == null) { + + if (parentBuffer == null) { + // wraped buffers + ReusableBuffer view = new ReusableBuffer(this.buffer, this.size); + view.viewParent = this; + + return view; + + } else { + // regular buffer + ReusableBuffer view = new ReusableBuffer(this.parentBuffer, this.size); + view.viewParent = this; + this.refCount.incrementAndGet(); + + if (BufferPool.recordStackTraces) { + try { + throw new Exception("allocate stack trace"); + } catch (Exception e) { + view.allocStack = "\n"; + for (StackTraceElement elem : e.getStackTrace()) + view.allocStack += elem.toString() + "\n"; + } + } + + return view; + } + + } else { + + if (parentBuffer == null) { + // wraped buffers + ReusableBuffer view = new ReusableBuffer(this.buffer, this.size); + view.viewParent = this.viewParent; + + return view; + + } else { + // regular buffer: use the parent to create a view buffer + ReusableBuffer view = new ReusableBuffer(this.buffer, this.size); + view.viewParent = this.viewParent; + this.viewParent.refCount.incrementAndGet(); + + if (BufferPool.recordStackTraces) { + try { + throw new Exception("allocate stack trace"); + } catch (Exception e) { + view.allocStack = "\n"; + for (StackTraceElement elem : e.getStackTrace()) + view.allocStack += elem.toString() + "\n"; + } + } + + return view; + } + } + } + + /** @see java.nio.Buffer#capacity + */ + public int capacity() { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + return this.size; + } + + /** @see java.nio.ByteBuffer#hasArray + */ + public boolean hasArray() { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + return buffer.hasArray(); + } + + /** Returns the byte array of the buffer, creating a copy if the buffer is not backed by an array + * @return a byte array with a copy of the data + */ + public byte [] array() { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + byte [] array; + + if (this.hasArray()) { + array = buffer.array(); + } else { + array = new byte[this.limit()]; + final int oldPos = this.position(); + this.position(0); + this.get(array); + this.position(oldPos); + } + + return array; + } + + /** @see java.nio.Buffer#flip + */ + public void flip() { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + buffer.flip(); + } + + /** @see java.nio.Buffer#compact + */ + public void compact() { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + buffer.compact(); + } + + /** @see java.nio.Buffer#limit(int) + */ + public void limit(int l) { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + buffer.limit(l); + } + + /** @see java.nio.Buffer#limit() + */ + public int limit() { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + return buffer.limit(); + } + + /** @see java.nio.Buffer#position(int) + */ + public void position(int p) { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + buffer.position(p); + } + + /** @see java.nio.Buffer#position() + */ + public int position() { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + return buffer.position(); + } + + /** @see java.nio.Buffer#hasRemaining + */ + public boolean hasRemaining() { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + return buffer.hasRemaining(); + } + + /** Returns the view buffer encapsulated by this ReusableBuffer. + * @return the view buffer + */ + public ByteBuffer getBuffer() { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + return this.buffer; + } + + /** Returns true, if this buffer is re-usable and can be returned to the pool. + * @return true, if this buffer is re-usable + */ + public boolean isReusable() { + //assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + return this.reusable; + } + + /** Returns the parent buffer. + * @return the parent buffer + */ + protected ByteBuffer getParent() { + return this.parentBuffer; + } + + /** @see java.nio.ByteBuffer#get() + */ + public byte get() { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + return buffer.get(); + } + + public byte get(int index) { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + return buffer.get(index); + } + + + /** @see java.nio.ByteBuffer#get(byte[]) + */ + public ReusableBuffer get(byte[] dst) { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + buffer.get(dst); + return this; + } + + /** @see java.nio.ByteBuffer#get(byte[], int offset, int length) + */ + public ReusableBuffer get(byte[] dst, int offset, int length) { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + buffer.get(dst, offset, length); + return this; + } + + /** @see java.nio.ByteBuffer#put(byte) + */ + public ReusableBuffer put(byte b) { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + buffer.put(b); + return this; + } + + /** @see java.nio.ByteBuffer#put(byte[]) + */ + public ReusableBuffer put(byte[] src) { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + buffer.put(src); + return this; + } + + /** @see java.nio.ByteBuffer#put(ByteBuffer) + */ + public ReusableBuffer put(ByteBuffer src) { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + buffer.put(src); + return this; + } + + /** Writes the content of src into this buffer. + * @param src the buffer to read from + * @return this ReusableBuffer after reading + * @see java.nio.ByteBuffer#put(ByteBuffer) + */ + public ReusableBuffer put(ReusableBuffer src) { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + buffer.put(src.buffer); + return this; + } + + /** @see java.nio.ByteBuffer#getInt + */ + public int getInt() { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + return buffer.getInt(); + } + + /** @see java.nio.ByteBuffer#putInt(int) + */ + public ReusableBuffer putInt(int i) { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + buffer.putInt(i); + return this; + } + + public long getLong() { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + return buffer.getLong(); + } + + public ReusableBuffer putLong(long l) { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + buffer.putLong(l); + return this; + } + + public String getString() { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + int length = buffer.getInt(); + if (length > 0) { + byte[] bytes = new byte[length]; + buffer.get(bytes); + return new String(bytes,HTTPUtils.ENC_UTF8); + } else if (length == 0) { + return ""; + } else { + return null; + } + } + + public ReusableBuffer putString(String str) { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + if (str != null) { + byte[] bytes = str.getBytes(HTTPUtils.ENC_UTF8); + buffer.putInt(bytes.length); + buffer.put(bytes); + } else { + buffer.putInt(-1); + } + return this; + } + + public ASCIIString getBufferBackedASCIIString() { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + return ASCIIString.unmarshall(this); + } + + public ReusableBuffer putBufferBackedASCIIString(ASCIIString str) { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + if (str != null) { + str.marshall(this); + } else { + buffer.putInt(-1); + } + return this; + } + + public ReusableBuffer putShort(short s) { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + buffer.putShort(s); + return this; + } + + public short getShort() { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + return buffer.getShort(); + } + + /** @see java.nio.ByteBuffer#isDirect + */ + public boolean isDirect() { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + return buffer.isDirect(); + } + + + /** @see java.nio.Buffer#remaining + */ + public int remaining() { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + return buffer.remaining(); + } + + /** @see java.nio.Buffer#clear + */ + public void clear() { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + buffer.clear(); + } + + + public byte[] getData() { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + byte[] array = new byte[this.position()]; + this.position(0); + this.get(array); + return array; + } + + public void shrink(int newSize) { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + if (newSize > size) { + throw new IllegalArgumentException("new size must not be larger than old size"); + } + this.size = newSize; + int oldPos = buffer.position(); + if (oldPos > newSize) + oldPos = 0; + + // save parent position and limit + int position = parentBuffer.position(); + int limit = parentBuffer.limit(); + + parentBuffer.position(0); + parentBuffer.limit(newSize); + this.buffer = parentBuffer.slice(); + buffer.position(oldPos); + + // restore parent position and limit + parentBuffer.position(position); + parentBuffer.limit(limit); + } + + + public boolean enlarge(int newSize) { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + if (newSize > this.parentBuffer.capacity()) { + return false; + } else { + + this.size = newSize; + int oldPos = buffer.position(); + if (oldPos > newSize) + oldPos = 0; + + // save parent position and limit + int position = parentBuffer.position(); + int limit = parentBuffer.limit(); + + parentBuffer.position(0); + parentBuffer.limit(newSize); + this.buffer = parentBuffer.slice(); + buffer.position(oldPos); + + // restore parent position and limit + parentBuffer.position(position); + parentBuffer.limit(limit); + + return true; + } + } + + public void range(int offset, int length) { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + + //useless call! + if ((offset == 0) && (length == this.size)) + return; + + if (offset > size) { + throw new IllegalArgumentException("offset must be < size. offset="+offset+" siz="+size); + } + if (offset+length > size) { + throw new IllegalArgumentException("offset+length must be <= size. size="+size+" offset="+offset+" length="+length); + } + + this.size = length; + + // save parent position and limit + int position = parentBuffer.position(); + int limit = parentBuffer.limit(); + + parentBuffer.position(offset); + parentBuffer.limit(offset+length); + this.buffer = parentBuffer.slice(); + assert(this.buffer.capacity() == length); + + // restore parent position and limit + parentBuffer.position(position); + parentBuffer.limit(limit); + } + + public ReusableBuffer putBoolean(boolean bool) { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + buffer.put(bool ? (byte)1 : (byte)0); + return this; + } + + public boolean getBoolean() { + assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack; + return buffer.get() == 1; + } + + public int getRefCount() { + if (this.viewParent == null) { + return this.refCount.get(); + } else { + return this.viewParent.refCount.get(); + } + } + + protected void finalize() { + if (!returned && reusable) { + Logging.logMessage(Logging.LEVEL_ERROR, this, + "buffer was finalized but not freed before! this=" + this); + + String content = new String(this.array()); + + Logging.logMessage(Logging.LEVEL_ERROR, this, "content: " + content); + Logging.logMessage(Logging.LEVEL_ERROR, this, "stacktrace: " + allocStack); + + if (this.viewParent != null) { + Logging.logMessage(Logging.LEVEL_ERROR, this, "view parent: " + this.viewParent); + Logging.logMessage(Logging.LEVEL_ERROR, this, "ref count: " + + this.viewParent.refCount.get()); + } else { + Logging.logMessage(Logging.LEVEL_ERROR, this, "ref count: " + this.refCount.get()); + } + + } + } +} diff --git a/servers/src/org/xtreemfs/common/checksums/ChecksumAlgorithm.java b/servers/src/org/xtreemfs/common/checksums/ChecksumAlgorithm.java new file mode 100644 index 0000000000000000000000000000000000000000..3488769b531f16a61e2eea84670c26aeda6f6d3f --- /dev/null +++ b/servers/src/org/xtreemfs/common/checksums/ChecksumAlgorithm.java @@ -0,0 +1,71 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Christian Lorenz (ZIB) + */ +package org.xtreemfs.common.checksums; + +import java.nio.ByteBuffer; + +/** + * An interface which must be implemented by checksum algorithms for XtreemFS. + * + * 19.08.2008 + * + * @author clorenz + */ +public interface ChecksumAlgorithm extends Cloneable { + /** + * Returns a string that identifies the algorithm, independent of + * implementation details. + * + * @return name of algorithm + */ + public String getName(); + + /** + * Returns checksum value (as Hex-String) and resets the Algorithm. + * + * @return checksum + */ + public String getValue(); + + /** + * Resets checksum to initial value. + * + * @return + */ + public void reset(); + + /** + * Updates checksum with specified data. + * + * @param data + */ + public void update(ByteBuffer data); + + /** + * returns a new instance of the checksum algorithm + * + * @return + */ + public ChecksumAlgorithm clone(); +} diff --git a/servers/src/org/xtreemfs/common/checksums/ChecksumFactory.java b/servers/src/org/xtreemfs/common/checksums/ChecksumFactory.java new file mode 100644 index 0000000000000000000000000000000000000000..c940a977c5ed2a00260b44ada9a6e4dba9ca577c --- /dev/null +++ b/servers/src/org/xtreemfs/common/checksums/ChecksumFactory.java @@ -0,0 +1,165 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Christian Lorenz (ZIB) + */ +package org.xtreemfs.common.checksums; + +import java.security.NoSuchAlgorithmException; +import java.util.HashMap; +import java.util.concurrent.ConcurrentLinkedQueue; + +/** + * A Factory for getting checksum algorithms from checksum provider. Implemented + * as a Singleton. + * + * 19.08.2008 + * + * @author clorenz + */ +public class ChecksumFactory { + /** + * amount of cached instances/algorithm + */ + private static int MAX_CACHE_SIZE = 20; + + private static ChecksumFactory self; + + /** + * Contains all available checksum algorithms (only one instance). + */ + private HashMap algorithms; + + /** + * Contains all known checksum provider + */ + private HashMap knownProvider; + + /** + * Contains cached instances for all available checksum algorithms. + */ + private HashMap> pool; + + /** + * creates a new ChecksumFactory + */ + private ChecksumFactory() { + super(); + this.algorithms = new HashMap(); + this.pool = new HashMap>(); + this.knownProvider = new HashMap(); + } + + /** + * Get the instance of ChecksumFactory. + * + * @return the instance + */ + public static ChecksumFactory getInstance() { + if (self == null) { + self = new ChecksumFactory(); + } + return self; + } + + /** + * Get an instance of a specific checksum algorithm, if supported. + * + * @param name + * of the algorithm + * @return algorithm object or null, if algorithm is not supported + */ + public ChecksumAlgorithm getAlgorithm(String name) + throws NoSuchAlgorithmException { + ConcurrentLinkedQueue cache = pool.get(name); + if (cache == null) + throw new NoSuchAlgorithmException("algorithm " + name + + " not supported"); + + ChecksumAlgorithm algorithm = cache.poll(); + if (algorithm == null) { // cache is empty + return algorithms.get(name).clone(); // create new instance + } else { + return algorithm; // return caches instance + } + } + + /** + * Returns an instance of a specific checksum algorithm for caching. + * + * @param instance + * of the algorithm + */ + public void returnAlgorithm(ChecksumAlgorithm algorithm) { + ConcurrentLinkedQueue cache = pool.get(algorithm + .getName()); + if (cache.size() < MAX_CACHE_SIZE) { + algorithm.reset(); + cache.add(algorithm); + } + } + + /** + * Adds a new provider to factory and adds all supported algorithms from the + * provider to the algorithms-list. NOTE: Existing algorithms will be + * overridden when the new provider contains the same algorithm (maybe + * another implementation). + * + * @param provider + */ + public void addProvider(ChecksumProvider provider) { + knownProvider.put(provider.getName(), provider); + for (ChecksumAlgorithm algorithm : provider.getSupportedAlgorithms()) { + addAlgorithm(algorithm); + } + } + + /** + * Adds a new Algorithm to factory. NOTE: The same existing algorithm will + * be overridden. + * + * @param algorithm + */ + public void addAlgorithm(ChecksumAlgorithm algorithm) { + algorithms.put(algorithm.getName(), algorithm); + pool.put(algorithm.getName(), + new ConcurrentLinkedQueue()); + } + + /** + * Removes a provider, but not the added algorithms. + * + * @param provider + */ + public void removeProvider(ChecksumProvider provider) { + knownProvider.remove(provider.getName()); + } + + /** + * Removes an algorithm. + * + * @param algorithm + */ + public void removeAlgorithm(String algorithm) { + algorithms.remove(algorithm); + pool.remove(algorithm); + } +} diff --git a/servers/src/org/xtreemfs/common/checksums/ChecksumProvider.java b/servers/src/org/xtreemfs/common/checksums/ChecksumProvider.java new file mode 100644 index 0000000000000000000000000000000000000000..0852e7adc58733312139bff2bbc61c8f4961a37b --- /dev/null +++ b/servers/src/org/xtreemfs/common/checksums/ChecksumProvider.java @@ -0,0 +1,72 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Christian Lorenz (ZIB) + */ +package org.xtreemfs.common.checksums; + +import java.util.Collection; +import java.util.HashMap; + +/** + * An abstract class which must be implemented by a checksum provider for + * XtreemFS. + * + * 19.08.2008 + * + * @author clorenz + */ +public abstract class ChecksumProvider { + /** + * contains the supported algorithms + */ + protected HashMap algorithms; + + protected ChecksumProvider() { + super(); + this.algorithms = new HashMap(); + } + + /** + * Returns the name of the provider. + * + * @return name + */ + public abstract String getName(); + + /** + * Returns all from this provider supported checksum algorithms. + * + * @return a collection with ChecksumAlgorithms + */ + public Collection getSupportedAlgorithms() { + return algorithms.values(); + } + + /** + * adds an algorithm to the map + * + * @param newAlgorithm + */ + protected void addAlgorithm(ChecksumAlgorithm newAlgorithm) { + this.algorithms.put(newAlgorithm.getName(), newAlgorithm); + } +} diff --git a/servers/src/org/xtreemfs/common/checksums/StringChecksumAlgorithm.java b/servers/src/org/xtreemfs/common/checksums/StringChecksumAlgorithm.java new file mode 100644 index 0000000000000000000000000000000000000000..2f33dd45ae52e4e137cd01e8f8fed94807f410ac --- /dev/null +++ b/servers/src/org/xtreemfs/common/checksums/StringChecksumAlgorithm.java @@ -0,0 +1,41 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Christian Lorenz (ZIB) + */ +package org.xtreemfs.common.checksums; + +/** + * An interface for checksum algorithms, which are based on computations on + * strings. + * + * 02.09.2008 + * + * @author clorenz + */ +public interface StringChecksumAlgorithm extends ChecksumAlgorithm { + /** + * Updates checksum with specified data. + * + * @param data + */ + public void digest(String data); +} diff --git a/servers/src/org/xtreemfs/common/checksums/algorithms/Adler32.java b/servers/src/org/xtreemfs/common/checksums/algorithms/Adler32.java new file mode 100644 index 0000000000000000000000000000000000000000..61a893d298a2390c88f6c9cca84f054e9b8065f0 --- /dev/null +++ b/servers/src/org/xtreemfs/common/checksums/algorithms/Adler32.java @@ -0,0 +1,47 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Christian Lorenz (ZIB) + */ +package org.xtreemfs.common.checksums.algorithms; + +/** + * The Adler32 algorithm. It uses the Java internal implementation. + * + * 19.08.2008 + * + * @author clorenz + */ +public class Adler32 extends JavaChecksumAlgorithm { + public Adler32() { + super(new java.util.zip.Adler32(), "Adler32"); + } + + /* + * (non-Javadoc) + * + * @see org.xtreemfs.common.checksum.ChecksumAlgorithm#clone() + */ + @Override + public Adler32 clone() { + return new Adler32(); + } +} diff --git a/servers/src/org/xtreemfs/common/checksums/algorithms/CRC32.java b/servers/src/org/xtreemfs/common/checksums/algorithms/CRC32.java new file mode 100644 index 0000000000000000000000000000000000000000..7da87a4e3dbd2d9bc81346221379f885fcfbda3a --- /dev/null +++ b/servers/src/org/xtreemfs/common/checksums/algorithms/CRC32.java @@ -0,0 +1,47 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Christian Lorenz (ZIB) + */ +package org.xtreemfs.common.checksums.algorithms; + +/** + * The CRC32 algorithm. It uses the Java internal implementation. + * + * 19.08.2008 + * + * @author clorenz + */ +public class CRC32 extends JavaChecksumAlgorithm { + public CRC32() { + super(new java.util.zip.CRC32(), "CRC32"); + } + + /* + * (non-Javadoc) + * + * @see org.xtreemfs.common.checksum.ChecksumAlgorithm#clone() + */ + @Override + public CRC32 clone() { + return new CRC32(); + } +} diff --git a/servers/src/org/xtreemfs/common/checksums/algorithms/JavaChecksumAlgorithm.java b/servers/src/org/xtreemfs/common/checksums/algorithms/JavaChecksumAlgorithm.java new file mode 100644 index 0000000000000000000000000000000000000000..ea487b41610180ae6a39158c8310788fa1e123f0 --- /dev/null +++ b/servers/src/org/xtreemfs/common/checksums/algorithms/JavaChecksumAlgorithm.java @@ -0,0 +1,114 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Christian Lorenz (ZIB) + */ +package org.xtreemfs.common.checksums.algorithms; + +import java.nio.ByteBuffer; +import java.util.zip.Checksum; + +import org.xtreemfs.common.checksums.ChecksumAlgorithm; + +/** + * An abstract wrapper for Java internal checksums. + * + * 19.08.2008 + * + * @author clorenz + */ +abstract public class JavaChecksumAlgorithm + implements ChecksumAlgorithm { + /** + * the class, which really implements the selected algorithm + */ + protected RealJavaAlgorithm realAlgorithm; + + protected String name; + + public JavaChecksumAlgorithm(RealJavaAlgorithm realAlgorithm, String name) { + super(); + this.realAlgorithm = realAlgorithm; + this.name = name; + } + + /* + * (non-Javadoc) + * + * @see org.xtreemfs.common.checksum.ChecksumAlgorithm#digest(java.nio.ByteBuffer) + */ + @Override + public void update(ByteBuffer data) { + byte[] array; + + if (data.hasArray()) { + array = data.array(); + } else { + array = new byte[data.capacity()]; + final int oldPos = data.position(); + data.position(0); + data.get(array); + data.position(oldPos); + } + + realAlgorithm.update(array, 0, array.length); + } + + /* + * (non-Javadoc) + * + * @see org.xtreemfs.common.checksum.ChecksumAlgorithm#getName() + */ + @Override + public String getName() { + return name; + } + + /* + * (non-Javadoc) + * + * @see org.xtreemfs.common.checksum.ChecksumAlgorithm#getValue() + */ + @Override + public String getValue() { + String value = Long.toHexString(realAlgorithm.getValue()); + realAlgorithm.reset(); + return value; + } + + /* + * (non-Javadoc) + * + * @see org.xtreemfs.common.checksum.ChecksumAlgorithm#reset() + */ + @Override + public void reset() { + realAlgorithm.reset(); + } + + /* + * (non-Javadoc) + * + * @see org.xtreemfs.common.checksum.ChecksumAlgorithm#clone() + */ + @Override + public abstract JavaChecksumAlgorithm clone(); +} diff --git a/servers/src/org/xtreemfs/common/checksums/algorithms/JavaHash.java b/servers/src/org/xtreemfs/common/checksums/algorithms/JavaHash.java new file mode 100644 index 0000000000000000000000000000000000000000..01e5edf1791c676c2211050e47943564837032ba --- /dev/null +++ b/servers/src/org/xtreemfs/common/checksums/algorithms/JavaHash.java @@ -0,0 +1,119 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Christian Lorenz (ZIB) + */ +package org.xtreemfs.common.checksums.algorithms; + +import java.nio.ByteBuffer; + +import org.xtreemfs.common.checksums.StringChecksumAlgorithm; + +/** + * The Java algorithm, which is used for string.hashCode(). It uses the Java + * internal implementation. + * + * 02.09.2008 + * + * @author clorenz + */ +public class JavaHash implements StringChecksumAlgorithm { + private String hash = null; + + private String name = "Java-Hash"; + + /** + * Updates checksum with specified data. + * + * @param data + */ + public void digest(String data) { + this.hash = Integer.toHexString(data.hashCode()); + } + + /* + * (non-Javadoc) + * + * @see org.xtreemfs.common.checksums.ChecksumAlgorithm#digest(java.nio.ByteBuffer) + */ + @Override + public void update(ByteBuffer data) { + byte[] array; + + if (data.hasArray()) { + array = data.array(); + } else { + array = new byte[data.capacity()]; + final int oldPos = data.position(); + data.position(0); + data.get(array); + data.position(oldPos); + } + + this.hash = Integer.toHexString(new String(array).hashCode()); + } + + /* + * (non-Javadoc) + * + * @see org.xtreemfs.common.checksums.ChecksumAlgorithm#getName() + */ + @Override + public String getName() { + return this.name; + } + + /* + * (non-Javadoc) + * + * @see org.xtreemfs.common.checksums.ChecksumAlgorithm#getValue() + */ + @Override + public String getValue() { + String value; + if (this.hash != null) + value = this.hash; + else + value = ""; + reset(); + return value; + } + + /* + * (non-Javadoc) + * + * @see org.xtreemfs.common.checksums.ChecksumAlgorithm#reset() + */ + @Override + public void reset() { + hash = null; + } + + /* + * (non-Javadoc) + * + * @see org.xtreemfs.common.checksum.ChecksumAlgorithm#clone() + */ + @Override + public JavaHash clone() { + return new JavaHash(); + } +} diff --git a/servers/src/org/xtreemfs/common/checksums/algorithms/JavaMessageDigestAlgorithm.java b/servers/src/org/xtreemfs/common/checksums/algorithms/JavaMessageDigestAlgorithm.java new file mode 100644 index 0000000000000000000000000000000000000000..4e8f84aecd6e378a32eb64d533c051cea1046e6c --- /dev/null +++ b/servers/src/org/xtreemfs/common/checksums/algorithms/JavaMessageDigestAlgorithm.java @@ -0,0 +1,143 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Christian Lorenz (ZIB) + */ +package org.xtreemfs.common.checksums.algorithms; + +import java.nio.ByteBuffer; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; + +import org.xtreemfs.common.checksums.ChecksumAlgorithm; + +/** + * An wrapper for Java internal message digest algorithms. + * + * 01.09.2008 + * + * @author clorenz + */ +public class JavaMessageDigestAlgorithm implements ChecksumAlgorithm { + /** + * the class, which really implements the selected algorithm + */ + protected MessageDigest realAlgorithm; + + protected String name; + + /** + * used for converting the byte-array to a hexString + */ + protected StringBuffer hexString; + + public JavaMessageDigestAlgorithm(String realAlgorithm, String name) + throws NoSuchAlgorithmException { + super(); + this.realAlgorithm = MessageDigest.getInstance(realAlgorithm); + this.name = name; + this.hexString = new StringBuffer(); + } + + /* + * (non-Javadoc) + * + * @see org.xtreemfs.common.checksum.ChecksumAlgorithm#digest(java.nio.ByteBuffer) + */ + @Override + public void update(ByteBuffer data) { + byte[] array; + + if (data.hasArray()) { + array = data.array(); + } else { + array = new byte[data.capacity()]; + final int oldPos = data.position(); + data.position(0); + data.get(array); + data.position(oldPos); + } + + realAlgorithm.update(array, 0, array.length); + } + + /* + * (non-Javadoc) + * + * @see org.xtreemfs.common.checksum.ChecksumAlgorithm#getName() + */ + @Override + public String getName() { + return name; + } + + /* + * (non-Javadoc) + * + * @see org.xtreemfs.common.checksum.ChecksumAlgorithm#getValue() + */ + @Override + public String getValue() { + return toHexString(realAlgorithm.digest()); + } + + /* + * (non-Javadoc) + * + * @see org.xtreemfs.common.checksum.ChecksumAlgorithm#reset() + */ + @Override + public void reset() { + realAlgorithm.reset(); + } + + /* + * (non-Javadoc) + * + * @see org.xtreemfs.common.checksum.ChecksumAlgorithm#clone() + */ + @Override + public JavaMessageDigestAlgorithm clone() { + try { + return new JavaMessageDigestAlgorithm(this.realAlgorithm + .getAlgorithm(), this.name); + } catch (NoSuchAlgorithmException e) { + // cannot appear, because there is also one instance + return null; + } + } + + /** + * converts a hash to a hex-string + * + * @param hash + * @return + */ + protected String toHexString(byte[] hash) { + for (int i = 0; i < hash.length; i++) { + hexString.append(Integer.toHexString(0xFF & hash[i])); + } + String checksum = hexString.toString(); + this.hexString.setLength(0); + return checksum; + } + +} diff --git a/servers/src/org/xtreemfs/common/checksums/algorithms/SDBM.java b/servers/src/org/xtreemfs/common/checksums/algorithms/SDBM.java new file mode 100644 index 0000000000000000000000000000000000000000..c2a34feb065ff723bb806cf9d4042ab3a85e9e87 --- /dev/null +++ b/servers/src/org/xtreemfs/common/checksums/algorithms/SDBM.java @@ -0,0 +1,132 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Christian Lorenz (ZIB) + */ +package org.xtreemfs.common.checksums.algorithms; + +import java.nio.ByteBuffer; + +import org.xtreemfs.common.checksums.StringChecksumAlgorithm; + +/** + * The SDBM algorithm. + * + * 02.09.2008 + * + * @author clorenz + */ +public class SDBM implements StringChecksumAlgorithm { + private String hash = null; + + private String name = "SDBM"; + + /** + * Updates checksum with specified data. + * + * @param data + */ + public void digest(String data) { + this.hash = sdbmHash(data); + } + + /* + * (non-Javadoc) + * + * @see org.xtreemfs.common.checksums.ChecksumAlgorithm#digest(java.nio.ByteBuffer) + */ + @Override + public void update(ByteBuffer data) { + byte[] array; + + if (data.hasArray()) { + array = data.array(); + } else { + array = new byte[data.capacity()]; + final int oldPos = data.position(); + data.position(0); + data.get(array); + data.position(oldPos); + } + + this.hash = sdbmHash(new String(array)); + } + + /* + * (non-Javadoc) + * + * @see org.xtreemfs.common.checksums.ChecksumAlgorithm#getName() + */ + @Override + public String getName() { + return this.name; + } + + /* + * (non-Javadoc) + * + * @see org.xtreemfs.common.checksums.ChecksumAlgorithm#getValue() + */ + @Override + public String getValue() { + String value; + if (this.hash != null) + value = this.hash; + else + value = ""; + reset(); + return value; + } + + /* + * (non-Javadoc) + * + * @see org.xtreemfs.common.checksums.ChecksumAlgorithm#reset() + */ + @Override + public void reset() { + hash = null; + } + + /* + * (non-Javadoc) + * + * @see org.xtreemfs.common.checksum.ChecksumAlgorithm#clone() + */ + @Override + public SDBM clone() { + return new SDBM(); + } + + /** + * SDBM algorithm + * + * @param str + * @return + */ + protected static String sdbmHash(String str) { + long hash = 0; + for (int c : str.toCharArray()) { + hash = c + (hash << 6) + (hash << 16) - hash; + } + return Long.toHexString(hash); + } +} diff --git a/servers/src/org/xtreemfs/common/checksums/provider/JavaChecksumProvider.java b/servers/src/org/xtreemfs/common/checksums/provider/JavaChecksumProvider.java new file mode 100644 index 0000000000000000000000000000000000000000..12c2fa20ce47b9690558dae246962a058dc73b21 --- /dev/null +++ b/servers/src/org/xtreemfs/common/checksums/provider/JavaChecksumProvider.java @@ -0,0 +1,71 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Christian Lorenz (ZIB) + */ +package org.xtreemfs.common.checksums.provider; + +import java.security.NoSuchAlgorithmException; + +import org.xtreemfs.common.checksums.ChecksumProvider; +import org.xtreemfs.common.logging.Logging; + +/** + * A provider for Java internal checksums. offers the following algorithms: + * Adler32, CRC32, MD5, Java-Hash + * + * 19.08.2008 + * + * @author clorenz + */ +public class JavaChecksumProvider extends ChecksumProvider { + private static String NAME = "Java Checksum Provider"; + + /** + * creates a new JavaChecksumProvider + */ + public JavaChecksumProvider() { + super(); + + addAlgorithm(new org.xtreemfs.common.checksums.algorithms.Adler32()); + addAlgorithm(new org.xtreemfs.common.checksums.algorithms.CRC32()); + try { + addAlgorithm(new org.xtreemfs.common.checksums.algorithms.JavaMessageDigestAlgorithm( + "MD5", "MD5")); + addAlgorithm(new org.xtreemfs.common.checksums.algorithms.JavaMessageDigestAlgorithm( + "SHA1", "SHA-1")); + } catch (NoSuchAlgorithmException e) { + Logging.logMessage(Logging.LEVEL_WARN, this, e.getMessage() + + " in your java-installation"); + } + addAlgorithm(new org.xtreemfs.common.checksums.algorithms.JavaHash()); + } + + /* + * (non-Javadoc) + * + * @see org.xtreemfs.common.checksums.ChecksumProvider#getName() + */ + @Override + public String getName() { + return NAME; + } +} diff --git a/servers/src/org/xtreemfs/common/clients/HttpErrorException.java b/servers/src/org/xtreemfs/common/clients/HttpErrorException.java new file mode 100644 index 0000000000000000000000000000000000000000..350b44d8f0f068ea28140cddaf2f77e270058771 --- /dev/null +++ b/servers/src/org/xtreemfs/common/clients/HttpErrorException.java @@ -0,0 +1,112 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.common.clients; + +import java.io.IOException; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.json.JSONString; +import org.xtreemfs.foundation.pinky.HTTPUtils; + +/** + * Exception thrown by RPCClient for invalid + * server responses (i.e. other than status code 200). + * @author bjko + */ +public class HttpErrorException extends IOException { + + /** + * The status code returned by the server. + */ + protected final int statusCode; + + /** + * The response body sent by the server or null if none was sent. + */ + protected final byte[] responseBody; + + /** + * Creates a new instance of HttpErrorException + * @param statusCode the status code sent by the server + * @param responseBody the body sent by the server + */ + public HttpErrorException(int statusCode, byte[] responseBody) { + super("status code is "+statusCode + ", error=" + new String(responseBody)); + this.statusCode = statusCode; + this.responseBody = responseBody; + } + + /** + * Creates a new instance of HttpErrorException + * @param statusCode the status code sent by the server + */ + public HttpErrorException(int statusCode) { + super("status code is "+statusCode); + this.statusCode = statusCode; + this.responseBody = null; + } + + /** + * Returns the status code sent by the server. + * @return the status code sent by the server + */ + public int getStatusCode() { + return this.statusCode; + } + + /** + * Returns the response body sent by the server. + * @return the response body sent by the server + */ + public byte[] getResponseBody() { + return responseBody; + } + + /** + * Returns the response body's content parsed by the JSON parser. + * @throws org.xtreemfs.foundation.json.JSONException if the body does not contain valid JSON + * @return the object read from the body + */ + public Object parseJSONResponse() throws JSONException { + String body = new String(responseBody, HTTPUtils.ENC_UTF8); + return JSONParser.parseJSON(new JSONString(body)); + } + + /** + * A string representation of the exception. + * @return a string representation of the exception. + */ + public String toString() { + if (responseBody != null) + return this.getClass().getSimpleName()+": status code is "+statusCode+", error=" + new String(responseBody); + else + return this.getClass().getSimpleName()+": status code is "+statusCode; + } + + public boolean authenticationRequest() { + return this.statusCode == HTTPUtils.SC_UNAUTHORIZED; + } + +} diff --git a/servers/src/org/xtreemfs/common/clients/RPCClient.java b/servers/src/org/xtreemfs/common/clients/RPCClient.java new file mode 100644 index 0000000000000000000000000000000000000000..2ca451c06e1620a9cf10bc9ef1d78a528f82f561 --- /dev/null +++ b/servers/src/org/xtreemfs/common/clients/RPCClient.java @@ -0,0 +1,306 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.common.clients; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.security.MessageDigest; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.util.OutputUtils; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.pinky.HTTPHeaders; +import org.xtreemfs.foundation.pinky.HTTPUtils; +import org.xtreemfs.foundation.pinky.SSLOptions; +import org.xtreemfs.foundation.speedy.MultiSpeedy; +import org.xtreemfs.foundation.speedy.SpeedyRequest; + +/** + * Generic RPC over HTTP client. Can send JSON and binary requests via HTTP. + * Can also be used as a very simple HTTP client. + * @author bjko + */ +public class RPCClient { + + /** + * The speedy used by the client. + */ + private final MultiSpeedy speedy; + + /** + * timeout to use for RPCs + */ + private int timeout; + + /** + * default timeout used + */ + public static final int DEFAULT_TIMEOUT = 10000; + + /** + * Creates a new client with a private speedy instance. + * @throws java.io.IOException if speedy cannot be started. + */ + public RPCClient() throws IOException { + this(null); + } + + /** + * Creates a new instance of the RPCClient + * @param sharedSpeedy a speedy shared among several clients. If null, a new speedy instance is created. + * @throws java.io.IOException if speedy cannot be started + */ + public RPCClient(final MultiSpeedy sharedSpeedy) + throws IOException { + + this.timeout = DEFAULT_TIMEOUT; + + if (sharedSpeedy != null) { + speedy = sharedSpeedy; + } else { + speedy = new MultiSpeedy(); + speedy.start(); + } + + Thread.yield(); + } + + /** + * Creates a new instance of the RPCClient + * @param sharedSpeedy a speedy shared among several clients. If null, a new speedy instance is created. + * @throws java.io.IOException if speedy cannot be started + */ + public RPCClient(MultiSpeedy sharedSpeedy, int timeout) + throws IOException { + this(sharedSpeedy); + this.timeout = timeout; + } + + /** + * Creates a new instance of the RPCClient + * A new speedy instance with SSL support will be created. + * @param sslOptions options for ssl connection, null for no SSL + * @throws java.io.IOException if speedy cannot be started + */ + public RPCClient(int timeout, SSLOptions sslOptions) + throws IOException { + speedy = new MultiSpeedy(sslOptions); + speedy.start(); + + this.timeout = timeout; + Thread.yield(); + } + + /** + * Shuts down the speedy used by this client. + * @attention Shuts down the speedy also if it is shared! + */ + public void shutdown() { + speedy.shutdown(); + } + + public void waitForShutdown() { + try { + speedy.waitForShutdown(); + } catch (Exception e) { + Logging.logMessage(Logging.LEVEL_ERROR, this, e); + } + } + + /** + * Sends an xtreemfs JSON RPC request. + * @param authString authentication string to send to the remote server + * @param addHdrs additional headers to include in the request. + * Cannot override headers set by the HTTP client automatically. + * @param server the server to send the request to + * @param method the RPC method to call (which is the URI sent as part of the HTTP request). + * @param data The request's parameters. If null, an empty body is sent. If data is a ReusableBuffer + * the data is sent as a binary body. Everything else is sent as a JSON encoded + * object. + * @return a RPCResponse for asynchrous requests + * @throws org.xtreemfs.foundation.json.JSONException if data cannot be translated into a JSON object. + * @throws java.io.IOException if the request cannot be sent. + */ + public RPCResponse sendRPC(InetSocketAddress server, + String method, Object data, String authString, + HTTPHeaders addHdrs) throws IOException,JSONException { + + if (data == null) { + return send(server, method, null, addHdrs, authString, HTTPUtils.DATA_TYPE.JSON,HTTPUtils.POST_TOKEN); + } else { + ReusableBuffer body = null; + HTTPUtils.DATA_TYPE type = HTTPUtils.DATA_TYPE.JSON; + if (data instanceof ReusableBuffer) { + Logging.logMessage(Logging.LEVEL_DEBUG,this,"request body contains binary data"); + body = (ReusableBuffer)data; + type = HTTPUtils.DATA_TYPE.BINARY; + } else { + Logging.logMessage(Logging.LEVEL_DEBUG,this,"request body contains JSON data"); + String json = JSONParser.writeJSON(data); + body = ReusableBuffer.wrap(json.getBytes(HTTPUtils.ENC_UTF8)); + } + return send(server, method, body, addHdrs,authString, type,HTTPUtils.POST_TOKEN); + } + } + + /** + * Uses the underlying speedy to check if the server is blocked because it is not + * responding. + * @param server the server to check + * @return true, if server is not blocked, false otherwise + * @see MultiSpeedy + */ + public boolean serverIsAvailable(InetSocketAddress server) { + return speedy.serverIsAvailable(server); + } + + + /** + * internal method for sending requests. + */ + protected RPCResponse send(InetSocketAddress server, String uri, + ReusableBuffer body, HTTPHeaders headers, String authString, + HTTPUtils.DATA_TYPE type, String httpMethod) + throws IOException { + + assert(uri != null); + //FIXME: should be activated + //assert(authString != null); + + SpeedyRequest sr = null; + + if (body != null) { + if (headers != null) { + sr = new SpeedyRequest(httpMethod, uri, null, authString, body, + type, headers); + } else { + sr = new SpeedyRequest(httpMethod, uri, null, authString, body, + type); + } + } else { + if (headers != null) { + sr = new SpeedyRequest(httpMethod, uri, null, authString, null, + type, headers); + } else { + sr = new SpeedyRequest(httpMethod, uri, null, authString ); + } + } + sr.setTimeout(timeout); + RPCResponse resp = new RPCResponse(sr,server); + sr.listener = resp; + synchronized (speedy) { + speedy.sendRequest(sr, server); + } + return resp; + } + + public MultiSpeedy getSpeedy() { + return speedy; + } + + /**Generates a HashMap from the arguments passed. + * e.g. generateMap("key1",value1,"key2",value2) + */ + public static Map generateMap(Object ...args) { + if (args.length % 2 != 0) { + throw new IllegalArgumentException("require even number of arguments (key1,value1,key2,value2...)"); + } + Map m = new HashMap(args.length/2); + for (int i = 0; i < args.length; i = i+2) { + m.put((String)args[i],args[i+1]); + } + return m; + } + + /** Generates a list from the arguments passed. + */ + public static List generateList(Object ...args) { + List l = new ArrayList(args.length); + for (int i = 0; i < args.length; i++) { + l.add(args[i]); + } + return l; + } + + /** Generates a list from the string arguments passed. + */ + public static List generateStringList(String ...args) { + List l = new ArrayList(args.length); + for (int i = 0; i < args.length; i++) { + l.add(args[i]); + } + return l; + } + + public void setTimeout(int timeout) { + this.timeout = timeout; + } + + public int getTimeout() { + return this.timeout; + } + + public static String createAuthResponseHeader(SpeedyRequest response, + String username, String password) { + //check header... + + final String authRequestHeader = response.responseHeaders.getHeader(HTTPHeaders.HDR_WWWAUTH); + if ((authRequestHeader == null) || (authRequestHeader.length() == 0)) + return null; + + try { + System.out.println("header: "+authRequestHeader); + + + Pattern p = Pattern.compile("nonce=\\\"(\\S+)\\\""); + Matcher m = p.matcher(authRequestHeader); + m.find(); + final String cNonce = m.group(1); + + + MessageDigest md5 = MessageDigest.getInstance("MD5"); + md5.update((username+":xtreemfs:"+password).getBytes()); + byte[] digest = md5.digest(); + final String HA1 = OutputUtils.byteArrayToHexString(digest).toLowerCase(); + + md5.update((response.getMethod()+":"+response.getURI()).getBytes()); + digest = md5.digest(); + final String HA2 = OutputUtils.byteArrayToHexString(digest).toLowerCase(); + + md5.update((HA1+":"+cNonce+":"+HA2).getBytes()); + digest = md5.digest(); + return OutputUtils.byteArrayToHexString(digest).toLowerCase(); + } catch (Exception ex) { + return null; + } + } +} diff --git a/servers/src/org/xtreemfs/common/clients/RPCResponse.java b/servers/src/org/xtreemfs/common/clients/RPCResponse.java new file mode 100644 index 0000000000000000000000000000000000000000..6c9bb9d3ee79a2114f1ec030ca1a0f95c8e9862a --- /dev/null +++ b/servers/src/org/xtreemfs/common/clients/RPCResponse.java @@ -0,0 +1,301 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.common.clients; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.concurrent.atomic.AtomicBoolean; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.json.JSONString; +import org.xtreemfs.foundation.pinky.HTTPHeaders; +import org.xtreemfs.foundation.pinky.HTTPUtils; +import org.xtreemfs.foundation.speedy.MultiSpeedy; +import org.xtreemfs.foundation.speedy.SpeedyRequest; +import org.xtreemfs.foundation.speedy.SpeedyResponseListener; + +/** + * Response for an asynchronous RPC request. + * + * @author bjko + */ +public class RPCResponse implements SpeedyResponseListener { + + public static int MAX_AUTH_RETRY = 3; + + protected InetSocketAddress targetServer; + + protected RPCResponseListener listener; + + /** + * The httpRequest used for sending this RPC via Speedy. + */ + protected SpeedyRequest httpRequest; + + /** + * Arbitrary attachment for continouations. + */ + protected Object attachment; + + protected AtomicBoolean finished; + + protected String username, password; + + protected int authRetryCount; + + protected MultiSpeedy speedy; + + /** + * Creates a new instance of RPCResponse + * + * @param request + * the request sent via Speedy + */ + public RPCResponse(SpeedyRequest request, InetSocketAddress server) { + this.httpRequest = request; + this.targetServer = server; + finished = new AtomicBoolean(false); + } + + /** + * Creates a new instance of RPCResponse with credentials for digest authentication + * + * @param request + * the request sent via Speedy + */ + public RPCResponse(SpeedyRequest request, InetSocketAddress server, MultiSpeedy speedy, String username, String password) { + this(request,server); + this.username = username; + this.password = password; + this.authRetryCount = 0; + this.speedy = speedy; + } + + /** + * Checks the status of the request. + * + * @return returns true, if the server response is available or the request + * has failed. + */ + public boolean isDone() { + return this.finished.get(); + } + + /** + * Waits for the response if necessary and throws exceptions if the request + * did not succed. + * + * If the server sent a response and a status code 200 (OK) the method + * returns. If another status code is returned, an HttpErrorException is + * thrown. If the server is not available or some other communication error + * occurrs, an IO exception is thrown. + * + * @throws java.lang.InterruptedException + * if it is interrupted while waiting for the server's response. + * @throws org.xtreemfs.common.clients.HttpErrorException + * if the server returns a status code other than 200 (OK) + * @throws java.io.IOException + * if the server is not available or a communication error + * occurs + */ + public void waitForResponse() throws InterruptedException, + HttpErrorException, IOException { + waitForResponse(0); + } + + public void waitForResponse(long timeout) throws InterruptedException, HttpErrorException, + IOException { + synchronized (this) { + if (!isDone()) { + this.wait(timeout); + } + } + assert (httpRequest != null); + if (httpRequest.status == SpeedyRequest.RequestStatus.FINISHED) { + if (httpRequest.statusCode == HTTPUtils.SC_OKAY) { + return; + } else { + if ( (httpRequest.statusCode == HTTPUtils.SC_UNAUTHORIZED) && + (username != null) && (this.authRetryCount > MAX_AUTH_RETRY)) { + //resend with authentication! + httpRequest.addDigestAuthentication(username, password); + this.authRetryCount++; + assert(httpRequest.listener == this); + Logging.logMessage(Logging.LEVEL_DEBUG, this,"resending request with digest authentication"); + speedy.sendRequest(httpRequest, targetServer); + } + if (httpRequest.responseBody != null) + throw new HttpErrorException(httpRequest.statusCode, httpRequest.responseBody + .array()); + else + throw new HttpErrorException(httpRequest.statusCode); + } + } else if (httpRequest.status == SpeedyRequest.RequestStatus.PENDING) { + throw new IOException("server " + targetServer + " is not available"); + } else if (httpRequest.status == SpeedyRequest.RequestStatus.SENDING) { + throw new IOException("cannot establish connection to server " + targetServer); + } else if (httpRequest.status == SpeedyRequest.RequestStatus.WAITING) { + throw new IOException("server " + targetServer + " did not send a response"); + } else { + throw new IOException("server " + targetServer + " is not available"); + } + } + + /** + * Retrieves the response body sent by the server. Waits for the server if + * necessary. + * + * If the server sent a response and a status code 200 (OK) the method + * returns. If another status code is returned, an HttpErrorException is + * thrown. If the server is not available or some other communication error + * occurrs, an IO exception is thrown. + * + * @param timeout + * milliseconds to wait for a server response + * @throws java.lang.InterruptedException + * if it is interrupted while waiting for the server's response. + * @throws org.xtreemfs.common.clients.HttpErrorException + * if the server returns a status code other than 200 (OK) + * @throws java.io.IOException + * if the server is not available or a communication error + * occurs + * @return the response body + */ + public ReusableBuffer getBody() throws InterruptedException, + HttpErrorException, IOException { + waitForResponse(); + return httpRequest.responseBody; + } + + public V get() throws InterruptedException, HttpErrorException, + IOException, JSONException { + + waitForResponse(); + if (httpRequest.responseBody == null) + return null; + + String body = new String(httpRequest.responseBody.array(), + HTTPUtils.ENC_UTF8); + Object o = JSONParser.parseJSON(new JSONString(body)); + return (V) o; + } + + /** + * Retrieves the response status code. Waits for the server's response if + * necessary. + * + * @param timeout + * milliseconds to wait for a server response + * @return the status code + * @throws java.lang.InterruptedException + * if it is interrupted while waiting for the server's response. + * @throws org.xtreemfs.common.clients.HttpErrorException + * if the server returns a status code other than 200 (OK) + * @throws java.io.IOException + * if the server is not available or a communication error + * occurs + */ + public int getStatusCode() throws InterruptedException, HttpErrorException, + IOException { + waitForResponse(); + return httpRequest.statusCode; + } + + /** + * Retrieves the response headers. Waits for the server's response if + * necessary. + * + * @param timeout + * milliseconds to wait for a server response + * @throws java.lang.InterruptedException + * if it is interrupted while waiting for the server's response. + * @throws org.xtreemfs.common.clients.HttpErrorException + * if the server returns a status code other than 200 (OK) + * @throws java.io.IOException + * if the server is not available or a communication error + * occurs + * @return the response headers sent by the server + */ + public HTTPHeaders getHeaders() throws InterruptedException, + HttpErrorException, IOException { + waitForResponse(); + return httpRequest.responseHeaders; + } + + public void receiveRequest(SpeedyRequest theRequest) { + // Logging.logMessage(Logging.LEVEL_ERROR,this,"EVENT: "+theRequest); + + if (this.finished.getAndSet(true)) { + Logging.logMessage(Logging.LEVEL_ERROR, this, + "RESPONSE ALREADY SET!"); + throw new RuntimeException("response already sent!"); + } + + if (listener != null) { + listener.responseAvailable(this); + } + + synchronized (this) { + this.notifyAll(); + } + } + + + + public void setResponseListener(RPCResponseListener listener) { + synchronized (this) { + this.listener = listener; + if (this.isDone()) + listener.responseAvailable(this); + } + } + + public void setAttachment(Object attachment) { + this.attachment = attachment; + } + + public Object getAttachment() { + return this.attachment; + } + + public SpeedyRequest getSpeedyRequest() { + return httpRequest; + } + + public void freeBuffers() { + this.httpRequest.freeBuffer(); + this.httpRequest = null; + } + + protected void finalize() { + if (this.httpRequest != null) { + Logging.logMessage(Logging.LEVEL_DEBUG,this,"auto free for: "+this.httpRequest.responseHeaders); + freeBuffers(); + } + } + +} diff --git a/servers/src/org/xtreemfs/common/clients/RPCResponseListener.java b/servers/src/org/xtreemfs/common/clients/RPCResponseListener.java new file mode 100644 index 0000000000000000000000000000000000000000..69def1c4717d129641c7b03a90141d45f12b834b --- /dev/null +++ b/servers/src/org/xtreemfs/common/clients/RPCResponseListener.java @@ -0,0 +1,35 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.common.clients; + +/** + * + * @author bjko + */ +public interface RPCResponseListener { + + void responseAvailable(RPCResponse response); + +} diff --git a/servers/src/org/xtreemfs/common/clients/dir/DIRClient.java b/servers/src/org/xtreemfs/common/clients/dir/DIRClient.java new file mode 100644 index 0000000000000000000000000000000000000000..0fbd79aa25be06a52c814db8b0d756d592115c6c --- /dev/null +++ b/servers/src/org/xtreemfs/common/clients/dir/DIRClient.java @@ -0,0 +1,244 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB), Christian Lorenz (ZIB) + */ + +package org.xtreemfs.common.clients.dir; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.xtreemfs.common.clients.HttpErrorException; +import org.xtreemfs.common.clients.RPCClient; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.uuids.UnknownUUIDException; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.pinky.SSLOptions; +import org.xtreemfs.foundation.speedy.MultiSpeedy; + +/** + * + * @author bjko + */ +public class DIRClient extends RPCClient { + + public static final long TIMEOUT = 15000; + + private final InetSocketAddress defaultServer; + + public static final String HOMEDIR_PREFIX = "user-"; + + /** Creates a new instance of DIRClient */ + public DIRClient(MultiSpeedy sharedSpeedy, InetSocketAddress defaultServer) throws IOException { + super(sharedSpeedy); + this.defaultServer = defaultServer; + } + + /** Creates a new instance of DIRClient */ + public DIRClient(MultiSpeedy sharedSpeedy, InetSocketAddress defaultServer, int timeout) + throws IOException { + super(sharedSpeedy, timeout); + this.defaultServer = defaultServer; + } + + public DIRClient(InetSocketAddress defaultServer, SSLOptions sslOptions, int timeout) + throws IOException { + super(timeout, sslOptions); + this.defaultServer = defaultServer; + } + + public RPCResponse registerEntity(InetSocketAddress server, String uuid, + Map data, long version, String authStr) throws IOException, + HttpErrorException, JSONException, InterruptedException { + + List args = new ArrayList(2); + args.add(uuid); + args.add(data); + args.add(version); + + RPCResponse r = sendRPC(server, "registerEntity", args, authStr, null); + return r; + } + + public RPCResponse registerEntity(String uuid, Map data, long version, + String authStr) throws IOException, HttpErrorException, JSONException, InterruptedException { + return registerEntity(defaultServer, uuid, data, version, authStr); + } + + public RPCResponse>> getEntities(InetSocketAddress server, + Map query, List attrs, String authStr) throws IOException, + HttpErrorException, JSONException, InterruptedException { + + List args = new ArrayList(2); + args.add(query); + args.add(attrs); + + RPCResponse>> r = sendRPC(server, "getEntities", args, + authStr, null); + + return r; + } + + public RPCResponse>> getEntities(Map query, + List attrs, String authStr) throws IOException, HttpErrorException, JSONException, + InterruptedException { + return getEntities(defaultServer, query, attrs, authStr); + } + + public RPCResponse deregisterEntity(InetSocketAddress server, String uuid, String authStr) + throws IOException, HttpErrorException, JSONException, InterruptedException { + + List args = new ArrayList(1); + args.add(uuid); + + RPCResponse r = sendRPC(defaultServer, "deregisterEntity", args, authStr, null); + + return r; + } + + public RPCResponse deregisterEntity(String uuid, String authStr) throws IOException, + HttpErrorException, JSONException, InterruptedException { + return deregisterEntity(defaultServer, uuid, authStr); + } + + public RPCResponse registerAddressMapping(InetSocketAddress server, String uuid, + List> mapping, long version, String authStr) throws IOException, + HttpErrorException, JSONException, InterruptedException { + + List args = new ArrayList(3); + args.add(uuid); + args.add(mapping); + args.add(version); + + RPCResponse r = sendRPC(server, "registerAddressMapping", args, authStr, null); + return r; + } + + public RPCResponse registerAddressMapping(String uuid, List> mapping, + long version, String authStr) throws IOException, HttpErrorException, JSONException, + InterruptedException { + return registerAddressMapping(defaultServer, uuid, mapping, version, authStr); + } + + public RPCResponse>>> getAddressMapping( + InetSocketAddress server, String uuid, String authStr) throws IOException, + HttpErrorException, JSONException, InterruptedException { + + List args = new ArrayList(1); + args.add(uuid); + + RPCResponse>>> r = sendRPC(server, + "getAddressMapping", args, authStr, null); + return r; + } + + public RPCResponse>>> getAddressMapping(String uuid, + String authStr) throws IOException, HttpErrorException, JSONException, InterruptedException { + return getAddressMapping(defaultServer, uuid, authStr); + } + + public RPCResponse deregisterAddressMapping(InetSocketAddress server, String uuid, + String authStr) throws IOException, HttpErrorException, JSONException, InterruptedException { + + List args = new ArrayList(1); + args.add(uuid); + + RPCResponse r = sendRPC(defaultServer, "deregisterAddressMapping", args, authStr, null); + + return r; + } + + public RPCResponse deregisterAddressMapping(String uuid, String authStr) throws IOException, + HttpErrorException, JSONException, InterruptedException { + return deregisterAddressMapping(defaultServer, uuid, authStr); + } + + public RPCResponse getGlobalTime(InetSocketAddress server, String authStr) + throws IOException, HttpErrorException, JSONException, InterruptedException { + + RPCResponse r = sendRPC(server, "getGlobalTime", new ArrayList(0), authStr, null); + return r; + } + + public RPCResponse getGlobalTime(String authStr) throws IOException, HttpErrorException, + JSONException, InterruptedException { + + return getGlobalTime(defaultServer, authStr); + } + + /** + * Retrieves the volume URL for a user's home volume. + * @param globalUserId the user's global ID (GUID) + * @param authStr authentication string for the directory service + * @return the URL or null if the volume cannot be found + * @throws java.io.IOException + * @throws org.xtreemfs.common.clients.HttpErrorException + * @throws org.xtreemfs.foundation.json.JSONException + * @throws java.lang.InterruptedException + */ + public String locateUserHome(String globalUserId, String authStr) throws IOException, HttpErrorException, + JSONException, InterruptedException { + + Map qry = new HashMap(); + qry.put("type","volume"); + qry.put("name",HOMEDIR_PREFIX+globalUserId); + RPCResponse>> r = this.getEntities(qry, null, authStr); + Map> map = r.get(); + + if (map.size() == 0) + return null; + + String volname = null; + String mrcuuid = null; + for (String uuid : map.keySet()) { + Map data = map.get(uuid); + mrcuuid = (String) data.get("mrc"); + volname = (String) data.get("name"); + break; + } + if (mrcuuid == null) + return null; + + RPCResponse>>> r2 = this.getAddressMapping(mrcuuid, authStr); + r2.waitForResponse(2000); + List> l = r2.get().get(mrcuuid); + if ((l == null) || (l.size() == 1)) { + throw new UnknownUUIDException("MRC's uuid "+mrcuuid+" is not registered at directory server"); + } + List> mappings = (List>) l.get(1); + for (int i = 0; i < mappings.size(); i++) { + Map addrMapping = mappings.get(i); + final String network = (String)addrMapping.get("match_network"); + if (network.equals("*")) { + final String address = (String)addrMapping.get("address"); + final String protocol = (String)addrMapping.get("protocol"); + final int port = (int) ((Long)addrMapping.get("port")).intValue(); + return protocol+"://"+address+":"+port+"/"+volname; + } + } + return null; + } +} diff --git a/servers/src/org/xtreemfs/common/clients/io/ByteMapper.java b/servers/src/org/xtreemfs/common/clients/io/ByteMapper.java new file mode 100644 index 0000000000000000000000000000000000000000..0e2948ff6880ca03b0ba4b6cd2fd16ef2653a859 --- /dev/null +++ b/servers/src/org/xtreemfs/common/clients/io/ByteMapper.java @@ -0,0 +1,51 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Nele Andersen (ZIB), Björn Kolbeck (ZIB) + */ + + +package org.xtreemfs.common.clients.io; + +public interface ByteMapper { + + /** + * reads data from file. + * @param data a buffer of length (length+offset) in which the data is stored + * @param offset offset within buffer to write to + * @param length number of bytes to read + * @param filePosition offset within file + * @return the number of bytes read + * @throws java.lang.Exception + */ + public int read(byte[] data, int offset, int length, long filePosition) throws Exception; + + /** + * writes data to a file. + * @param data the data to write (buffer must be length+offset bytes long). + * @param offset the position within the buffer to start at. + * @param length number of bytes to write + * @param filePosition the offset within the file + * @return the number of bytes written + * @throws java.lang.Exception + */ + public int write(byte[] data, int offset, int length, long filePosition) throws Exception; +} diff --git a/servers/src/org/xtreemfs/common/clients/io/ByteMapperFactory.java b/servers/src/org/xtreemfs/common/clients/io/ByteMapperFactory.java new file mode 100644 index 0000000000000000000000000000000000000000..6da1ed42fbeacb0ad66f7874837646f960dbc705 --- /dev/null +++ b/servers/src/org/xtreemfs/common/clients/io/ByteMapperFactory.java @@ -0,0 +1,34 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Nele Andersen (ZIB) + */ + + +package org.xtreemfs.common.clients.io; + +public class ByteMapperFactory { + + public static ByteMapper createByteMapper(String policy, int stripeSize, ObjectStore store) { + //if( policy == "RAID0" ) + return new ByteMapperRAID0(stripeSize, store); + } +} diff --git a/servers/src/org/xtreemfs/common/clients/io/ByteMapperRAID0.java b/servers/src/org/xtreemfs/common/clients/io/ByteMapperRAID0.java new file mode 100644 index 0000000000000000000000000000000000000000..d31189d42ccebf4c127ec5f9a9d9733e4e9097d4 --- /dev/null +++ b/servers/src/org/xtreemfs/common/clients/io/ByteMapperRAID0.java @@ -0,0 +1,149 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Nele Andersen (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.common.clients.io; + +import java.io.IOException; + +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; + +class ByteMapperRAID0 implements ByteMapper{ + + final int stripeSize; + + ObjectStore objectStore; + + public ByteMapperRAID0(int stripeSize, ObjectStore objectStore){ + this.stripeSize = stripeSize; + this.objectStore = objectStore; + } + + /** + * + * @param resultBuffer - the buffer into which the data is read. + * @param offset - the start offset of the data. + * @param bytesToRead - the maximum number of bytes read. + * @return the total number of bytes read into the buffer, or -1 if + * there is no more data because the end of the file has been reached. + * @throws Exception + * @throws IOException + */ + public int read(byte[] data, int offset, int length, long filePosition) throws Exception{ + + if (data.length < offset+length) + throw new RuntimeException("buffer is too small!"); + + final int firstObject = (int) (filePosition / this.stripeSize); + assert(firstObject >= 0); + + int lastObject = (int) ( (filePosition + ((long)length)) / this.stripeSize); + if (( (filePosition + ((long)length)) % this.stripeSize) == 0) + lastObject--; + assert(lastObject >= firstObject); + + final int offsetInFirstObject = (int) (filePosition % this.stripeSize); + assert(offsetInFirstObject < stripeSize); + final int bytesInLastObject = (int) (((filePosition + length) % this.stripeSize) == 0 ? this.stripeSize : + ((filePosition + length) % this.stripeSize)); + assert(bytesInLastObject > 0); + assert(bytesInLastObject <= stripeSize); + + int bytesRead = 0; + for (int obj = firstObject; obj <= lastObject; obj++) { + + int bytesToRead = this.stripeSize; + int objOffset = 0; + + if (obj == firstObject) + objOffset = offsetInFirstObject; + if (obj == lastObject) + bytesToRead = bytesInLastObject; + + assert(bytesToRead > 0); + assert(objOffset >= 0); + + ReusableBuffer rb = objectStore.readObject(obj, objOffset, bytesToRead); + assert(offset+bytesRead <= data.length); + if (rb == null) { + //EOF! + break; + } + if (rb.capacity() < bytesToRead) { + //EOF! + rb.get(data, offset+bytesRead,rb.capacity()); + bytesRead += rb.capacity(); + BufferPool.free(rb); + break; + } + rb.get(data, offset+bytesRead, bytesToRead); + bytesRead += rb.capacity(); + BufferPool.free(rb); + } + return bytesRead; + + } + + public int write(byte[] data, int offset, int length, long filePosition) throws Exception{ + + final int firstObject = (int) (filePosition / this.stripeSize); + int lastObject = (int) ( (filePosition + ((long)length)) / this.stripeSize); + if (( (filePosition + ((long)length)) % this.stripeSize) == 0) + lastObject--; + + final int offsetInFirstObject = (int) (filePosition % this.stripeSize); + + + int bytesInLastObject = -1; + if (firstObject == lastObject) { + bytesInLastObject = length; + } else { + if (((filePosition + length) % this.stripeSize) == 0) { + bytesInLastObject = this.stripeSize; + } else { + bytesInLastObject = (int)((filePosition + length) % this.stripeSize); + } + } + + + int bytesWritten = 0; + for (int obj = firstObject; obj <= lastObject; obj++) { + + int bytesToWrite = this.stripeSize; + int objOffset = 0; + + if (obj == firstObject) + objOffset = offsetInFirstObject; + if (obj == lastObject) + bytesToWrite = bytesInLastObject; + + ReusableBuffer view = ReusableBuffer.wrap(data, offset+bytesWritten, bytesToWrite); + objectStore.writeObject(objOffset, obj, view); + bytesWritten += bytesToWrite; + } + return bytesWritten; + + } + +} \ No newline at end of file diff --git a/servers/src/org/xtreemfs/common/clients/io/ObjectStore.java b/servers/src/org/xtreemfs/common/clients/io/ObjectStore.java new file mode 100644 index 0000000000000000000000000000000000000000..9c5d21f7de37b90512cc358742f1b7a335edbf91 --- /dev/null +++ b/servers/src/org/xtreemfs/common/clients/io/ObjectStore.java @@ -0,0 +1,51 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Nele Andersen (ZIB) + */ + +package org.xtreemfs.common.clients.io; + +import java.io.IOException; + +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.clients.HttpErrorException; +import org.xtreemfs.foundation.json.JSONException; + +public interface ObjectStore { + + /** + * read an object from an OSD. + * @param offset offset within the object + * @param objectNo object number (0 is the first object in a file) + * @param length number of bytes to read + * @return the data read. In case of an EOF the buffer's length will be smaller than requested! + * @throws java.io.IOException + * @throws org.xtreemfs.foundation.json.JSONException + * @throws java.lang.InterruptedException + * @throws org.xtreemfs.common.clients.HttpErrorException + */ + ReusableBuffer readObject(long offset, long objectNo, long length) throws IOException, + JSONException, InterruptedException, HttpErrorException; + + void writeObject(long offset, long objectNo, ReusableBuffer buffer) throws IOException, + JSONException, InterruptedException, HttpErrorException; +} diff --git a/servers/src/org/xtreemfs/common/clients/io/RandomAccessFile.java b/servers/src/org/xtreemfs/common/clients/io/RandomAccessFile.java new file mode 100644 index 0000000000000000000000000000000000000000..73e864b917555a05893029e39d09110c11a71f99 --- /dev/null +++ b/servers/src/org/xtreemfs/common/clients/io/RandomAccessFile.java @@ -0,0 +1,386 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Nele Andersen (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.common.clients.io; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.URL; +import java.util.List; +import java.util.Map; + +import org.xtreemfs.common.Capability; +import org.xtreemfs.common.auth.NullAuthProvider; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.clients.HttpErrorException; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.clients.mrc.MRCClient; +import org.xtreemfs.common.clients.osd.OSDClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.striping.Location; +import org.xtreemfs.common.striping.Locations; +import org.xtreemfs.common.striping.StripingPolicy; +import org.xtreemfs.common.uuids.ServiceUUID; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.json.JSONString; +import org.xtreemfs.foundation.pinky.HTTPHeaders; +import org.xtreemfs.foundation.speedy.MultiSpeedy; +import org.xtreemfs.utils.utils; + +public class RandomAccessFile implements ObjectStore { + + private MultiSpeedy speedy; + + private MRCClient mrcClient; + + private OSDClient osdClient; + + private Capability capability; + + private Locations locations; + + private Location selectedReplica; + + private StripingPolicy selectedReplicaStripingPolicy; + + private List selectedReplicaOSDs; + + private int selectedReplicaStripeSize; + + private String fileId; + + private String pathName; + + private InetSocketAddress mrcAddress; + + private String authString; + + private ByteMapper byteMapper; + + private String newFileSizeHdr; + + private long filePos; + + private Map capAndXLoc; + + private long capTime; + + public RandomAccessFile(String mode, InetSocketAddress mrcAddress, String pathName, + MultiSpeedy speedy, String authString, StripingPolicy spolicy) throws Exception { + + this.speedy = speedy; + this.pathName = pathName; + this.mrcAddress = mrcAddress; + this.authString = authString; + + if (speedy == null) + Logging.logMessage(Logging.LEVEL_DEBUG, this, "speedy is null"); + + // use the shared speedy to create an MRC and OSD client + mrcClient = new MRCClient(speedy, 30000); + osdClient = new OSDClient(speedy, 30000); + + // create a new file if necessary + try { + if (mode.contains("c")) { + mode = "w"; + mrcClient.createFile(mrcAddress, pathName, authString); + } + } catch (HttpErrorException ex) { + // ignore them + } + + capAndXLoc = mrcClient.open(mrcAddress, pathName, mode, authString); + + // set and read striping policy + locations = new Locations(new JSONString(capAndXLoc.get(HTTPHeaders.HDR_XLOCATIONS))); + capability = new Capability(capAndXLoc.get(HTTPHeaders.HDR_XCAPABILITY)); + + capTime = System.currentTimeMillis(); + + setReplicaNo(0); + + fileId = capability.getFileId(); + newFileSizeHdr = null; + filePos = 0; + } + + public RandomAccessFile(String mode, InetSocketAddress mrcAddress, String pathName, + MultiSpeedy speedy, String authString) throws Exception { + this(mode, mrcAddress, pathName, speedy, authString, null); + } + + public RandomAccessFile(String mode, InetSocketAddress mrcAddress, String pathName, + MultiSpeedy speedy) throws Exception { + this(mode, mrcAddress, pathName, speedy, NullAuthProvider.createAuthString(System + .getProperty("user.name"), MRCClient.generateStringList(System + .getProperty("user.name")))); + } + + public RandomAccessFile(String mode, URL mrcURL, String pathName, MultiSpeedy speedy) + throws Exception { + this(mode, new InetSocketAddress(mrcURL.getHost(), mrcURL.getPort()), pathName, speedy); + } + + public RandomAccessFile(String mode, String pathName, MultiSpeedy speedy) throws Exception { + this(mode, new URL(utils.getxattr(pathName, "xtreemfs.url")), pathName, speedy); + } + + /** + * + * @param resultBuffer + * - the buffer into which the data is read. + * @param offset + * - the start offset of the data. + * @param bytesToRead + * - the maximum number of bytes read. + * @return - the total number of bytes read into the buffer, or -1 if there + * is no more data because the end of the file has been reached. + * @throws Exception + * @throws IOException + */ + public int read(byte[] resultBuffer, int offset, int bytesToRead) throws Exception { + + int tmp = byteMapper.read(resultBuffer, offset, bytesToRead, filePos); + filePos += tmp; + return tmp; + } + + /** + * + * @param objectNo + * - relative object number. + * @return the number of bytes in the object + * @throws HttpErrorException + * @throws IOException + * @throws JSONException + * @throws InterruptedException + */ + public int readObject(int objectNo) throws HttpErrorException, IOException, JSONException, + InterruptedException { + + // check whether capability needs to be renewed + checkCap(); + + RPCResponse response = null; + + try { + int current_osd_index = selectedReplicaStripingPolicy.getOSDByObject(objectNo); + InetSocketAddress current_osd_address = selectedReplicaOSDs.get(current_osd_index) + .getAddress(); + + response = osdClient.get(current_osd_address, locations, capability, fileId, objectNo); + + String header = response.getHeaders().getHeader(HTTPHeaders.HDR_XINVALIDCHECKSUM); + if (header != null && header.equalsIgnoreCase("true")) + throw new IOException("object " + objectNo + " has an invalid checksum"); + + ReusableBuffer data = response.getBody(); + if (data == null) + return 0; + + data.flip(); + + return data.limit(); + } finally { + if (response != null) + response.freeBuffers(); + } + } + + /** + * + * @param objectNo + * - relative object number. + * @param firstByteInObject + * - the first byte to be read. + * @param bytesInObject + * - the maximal number of bytes to be read. + * @return a ReusableBuffer containing the data which was read. + */ + public ReusableBuffer readObject(long objectNo, long firstByteInObject, long bytesInObject) + throws IOException, JSONException, InterruptedException, HttpErrorException { + + int current_osd_index = selectedReplicaStripingPolicy.getOSDByObject(objectNo); + InetSocketAddress current_osd_address = selectedReplicaOSDs.get(current_osd_index) + .getAddress(); + + RPCResponse response = osdClient.get(current_osd_address, locations, capability, fileId, + objectNo, firstByteInObject, bytesInObject - 1); + + ReusableBuffer data = response.getBody(); + if (data == null) { + return null; + } + data.flip(); + return data; + } + + /** + * Writes bytesToWrite bytes from the writeFromBuffer byte array starting at + * offset to this file. + * + * @param writeFromBuffer + * @param offset + * @param bytesToWrite + * @return the number of bytes written + * @throws Exception + */ + public int write(byte[] writeFromBuffer, int offset, int bytesToWrite) throws Exception { + + int tmp = byteMapper.write(writeFromBuffer, offset, bytesToWrite, filePos); + filePos += bytesToWrite; + return tmp; + } + + /** + * Writes... + * + * @param firstByteInObject + * - the start offset in the file + * @param objectNo + * - the relative object number + * @param data + * - the data to be written..... + */ + public void writeObject(long firstByteInObject, long objectNo, ReusableBuffer data) + throws IOException, JSONException, InterruptedException, HttpErrorException { + + // check whether capability needs to be renewed + checkCap(); + + int current_osd_index = selectedReplicaStripingPolicy.getOSDByObject(objectNo); + InetSocketAddress current_osd_address = selectedReplicaOSDs.get(current_osd_index) + .getAddress(); + + RPCResponse response = osdClient.put(current_osd_address, locations, capability, fileId, + objectNo, firstByteInObject, data); + + response.waitForResponse(); + final String tmp = response.getHeaders().getHeader(HTTPHeaders.HDR_XNEWFILESIZE); + if (tmp != null) + newFileSizeHdr = tmp; + } + + public String getStripingPolicy() { + return selectedReplica.getStripingPolicy().toString(); + } + + public long getStripeSize() { + // the stripe size of a file is constant. + return selectedReplicaStripingPolicy.getStripeSize(0); + } + + public long getStripeSize(long objectNo) { + return selectedReplicaStripingPolicy.getStripeSize(objectNo); + } + + public List getOSDs() { + return selectedReplicaOSDs; + } + + public long length() throws Exception { + return (Long) mrcClient.stat(mrcAddress, pathName, false, true, false, authString).get( + "size"); + } + + public long noOfObjects() throws Exception { + return (length() / selectedReplicaStripeSize) + 1; + } + + public ServiceUUID getOSDId(long objectNo) { + long osd = selectedReplicaStripingPolicy.getOSDByObject(objectNo); + return selectedReplicaOSDs.get((int) osd); + } + + public Locations getLocations() { + return locations; + } + + public Capability getCapability() { + return capability; + } + + public String getFileId() { + return fileId; + } + + public String getPath() { + return pathName; + } + + public void seek(long pos) { + filePos = pos; + } + + public long getFilePointer() { + return filePos; + } + + public void flush() throws Exception { + if (newFileSizeHdr != null) + this.mrcClient.updateFileSize(mrcAddress, capability.toString(), newFileSizeHdr, + authString); + } + + public void delete() throws Exception { + mrcClient.delete(mrcAddress, pathName, authString); + RPCResponse r = osdClient.delete(selectedReplicaOSDs.get(0).getAddress(), locations, + capability, fileId); + r.waitForResponse(); + } + + public void finalize() { + if (speedy == null) { + mrcClient.shutdown(); + osdClient.shutdown(); + } + } + + private void setReplicaNo(int no) { + + selectedReplica = locations.getLocation(no); + selectedReplicaStripingPolicy = selectedReplica.getStripingPolicy(); + selectedReplicaOSDs = selectedReplica.getOSDs(); + selectedReplicaStripeSize = (int) selectedReplicaStripingPolicy.getStripeSize(0); + + byteMapper = ByteMapperFactory.createByteMapper(selectedReplicaStripingPolicy + .getPolicyName(), selectedReplicaStripeSize, this); + } + + private void checkCap() throws IOException { + + long time = System.currentTimeMillis(); + + if (time - capTime > (Capability.DEFAULT_VALIDITY - 60) * 1000) { + try { + capAndXLoc = mrcClient.renew(mrcAddress, capAndXLoc, authString); + capTime = time; + } catch (Exception e) { + throw new IOException(e); + } + } + } + +} diff --git a/servers/src/org/xtreemfs/common/clients/mrc/MRCClient.java b/servers/src/org/xtreemfs/common/clients/mrc/MRCClient.java new file mode 100644 index 0000000000000000000000000000000000000000..d4d72ecf5178f418685af33885124e5de2bc7345 --- /dev/null +++ b/servers/src/org/xtreemfs/common/clients/mrc/MRCClient.java @@ -0,0 +1,622 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.common.clients.mrc; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.xtreemfs.common.clients.RPCClient; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.pinky.HTTPHeaders; +import org.xtreemfs.foundation.pinky.SSLOptions; +import org.xtreemfs.foundation.speedy.MultiSpeedy; + +/** + * A client for the MRC. Can be used as a generic client for all JSON-WP34-RPC + * calls. Supports sync and async RPCs. + * + * @author bjko + */ +public class MRCClient extends RPCClient { + + /** + * Creates a new instance of MRCClient + * + * @param debug + * if true speedy will generate debug messages + * @throws java.io.IOException + */ + public MRCClient(MultiSpeedy sharedSpeedy) throws IOException { + super(sharedSpeedy); + } + + public MRCClient(MultiSpeedy sharedSpeedy, int timeout) throws IOException { + super(sharedSpeedy, timeout); + } + + public MRCClient() throws IOException { + this(null); + } + + public MRCClient(int timeout, SSLOptions sslOptions) throws IOException { + super(timeout, sslOptions); + } + + public void setACLEntries(InetSocketAddress server, String path, + Map entries, String authString) throws Exception { + + RPCResponse r = null; + try { + r = sendRPC(server, "setACLEntries", RPCClient.generateList(path, + entries), authString, null); + r.waitForResponse(); + } finally { + r.freeBuffers(); + } + } + + public void setXAttrs(InetSocketAddress server, String path, + Map attrs, String authString) throws Exception { + + RPCResponse r = null; + try { + r = sendRPC(server, "setXAttrs", RPCClient + .generateList(path, attrs), authString, null); + r.waitForResponse(); + } finally { + r.freeBuffers(); + } + } + + public void addReplica(InetSocketAddress server, String fileId, + Map stripingPolicy, List osdList, + String authString) throws Exception { + + RPCResponse r = null; + try { + r = sendRPC(server, "addReplica", RPCClient.generateList(fileId, + stripingPolicy, osdList), authString, null); + r.waitForResponse(); + } finally { + r.freeBuffers(); + } + } + + public void removeReplica(InetSocketAddress server, String fileId, + Map stripingPolicy, List osdList, + String authString) throws Exception { + + RPCResponse r = null; + try { + r = sendRPC(server, "removeReplica", RPCClient.generateList(fileId, + stripingPolicy, osdList), authString, null); + r.waitForResponse(); + } finally { + r.freeBuffers(); + } + } + + public void changeAccessMode(InetSocketAddress server, String path, + long mode, String authString) throws Exception { + + RPCResponse r = null; + try { + r = sendRPC(server, "changeAccessMode", RPCClient.generateList( + path, mode), authString, null); + r.waitForResponse(); + } finally { + r.freeBuffers(); + } + } + + public void changeOwner(InetSocketAddress server, String path, + String userId, String groupId, String authString) throws Exception { + + RPCResponse r = null; + try { + r = sendRPC(server, "changeOwner", RPCClient.generateList(path, + userId, groupId), authString, null); + r.waitForResponse(); + } finally { + r.freeBuffers(); + } + } + + public boolean checkAccess(InetSocketAddress server, String path, + String mode, String authString) throws Exception { + + RPCResponse r = null; + try { + r = sendRPC(server, "checkAccess", RPCClient.generateList(path, + mode), authString, null); + return (Boolean) r.get(); + } finally { + r.freeBuffers(); + } + } + + /** + *

The MRC makes an analyze stringPattern for the given list of fileIDs.
+ * example result: {001001111...}
+ * 1 means that file does exist, 0 if not and one single 2 will be returned,
+ * if the whole volume does not exist.

+ * + * @param server + * @param volumeID + * @param data + * + * @return MRC Response + * + * @throws JSONException + * @throws IOException + */ + public RPCResponse checkFileList(InetSocketAddress server, String volumeID, + List fileList, String authString) throws IOException, JSONException { + + RPCResponse r = sendRPC(server, "checkFileList", + RPCClient.generateList(volumeID, fileList), + authString, null); + return r; + } + + public void createDir(InetSocketAddress server, String dirPath, + String authString) throws Exception { + + RPCResponse r = null; + try { + r = sendRPC(server, "createDir", RPCClient.generateList(dirPath), + authString, null); + r.waitForResponse(); + } finally { + r.freeBuffers(); + } + } + + public void createDir(InetSocketAddress server, String dirPath, + Map attrs, long accessMode, String authString) + throws Exception { + + RPCResponse r = null; + try { + r = sendRPC(server, "createDir", RPCClient.generateList(dirPath, + attrs, accessMode), authString, null); + r.waitForResponse(); + } finally { + r.freeBuffers(); + } + } + + public void createFile(InetSocketAddress server, String filePath, + String authString) throws Exception { + + RPCResponse r = null; + try { + r = sendRPC(server, "createFile", RPCClient.generateList(filePath), + authString, null); + r.waitForResponse(); + } finally { + r.freeBuffers(); + } + } + + public void createFile(InetSocketAddress server, String filePath, + Map attrs, Map stripingPolicy, + long accessMode, String authString) throws Exception { + + RPCResponse r = null; + try { + r = sendRPC(server, "createFile", RPCClient.generateList(filePath, + attrs, stripingPolicy, accessMode), authString, null); + r.waitForResponse(); + } finally { + r.freeBuffers(); + } + } + + public Map createFile(InetSocketAddress server, + String filePath, Map attrs, + Map stripingPolicy, long accessMode, boolean open, + String authString) throws Exception { + + RPCResponse r = null; + try { + r = sendRPC(server, "createFile", RPCClient.generateList(filePath, + attrs, stripingPolicy, accessMode, open), authString, null); + + return toXCapMap(r.getHeaders()); + } finally { + r.freeBuffers(); + } + } + + /** + * Restore the MetaData for the given fileID. + * + * @param server + * @param filePath + * @param fileID + * @param fileSize + * @param xAttrs + * @param authString + * @param osd + * @param objectSize + * @throws Exception + */ + public void restoreFile(InetSocketAddress server, String filePath, long fileID, long fileSize, Map xAttrs, + String authString,String osd, long objectSize, String volumeID) + throws Exception { + + RPCResponse r = null; + try { + r = sendRPC(server, "restoreFile", RPCClient.generateList(filePath, + fileID, fileSize, xAttrs, osd, objectSize, volumeID), authString, null); + r.waitForResponse(); + } finally { + r.freeBuffers(); + } + } + + public void createLink(InetSocketAddress server, String linkPath, + String targetPath, String authString) throws Exception { + + RPCResponse r = null; + try { + r = sendRPC(server, "createLink", RPCClient.generateList(linkPath, + targetPath), authString, null); + r.waitForResponse(); + } finally { + r.freeBuffers(); + } + } + + public void createSymbolicLink(InetSocketAddress server, String linkPath, + String targetPath, String authString) throws Exception { + + RPCResponse r = null; + try { + r = sendRPC(server, "createSymbolicLink", RPCClient.generateList( + linkPath, targetPath), authString, null); + r.waitForResponse(); + } finally { + r.freeBuffers(); + } + } + + public void createVolume(InetSocketAddress server, String volumeName, + String authString) throws Exception { + + RPCResponse r = null; + try { + r = sendRPC(server, "createVolume", RPCClient + .generateList(volumeName), authString, null); + r.waitForResponse(); + } finally { + r.freeBuffers(); + } + } + + public void createVolume(InetSocketAddress server, String volumeName, + long osdSelectionPolicyId, Map stripingPolicy, + long acPolicyId, long partitioningPolicyId, Map acl, + String authString) throws Exception { + + RPCResponse r = null; + try { + r = sendRPC(server, "createVolume", RPCClient.generateList( + volumeName, osdSelectionPolicyId, stripingPolicy, acPolicyId, + partitioningPolicyId, acl), authString, null); + r.waitForResponse(); + } finally { + r.freeBuffers(); + } + } + + public void delete(InetSocketAddress server, String path, String authString) + throws Exception { + + RPCResponse r = null; + try { + r = sendRPC(server, "delete", RPCClient.generateList(path), + authString, null); + r.waitForResponse(); + } finally { + r.freeBuffers(); + } + } + + public void deleteVolume(InetSocketAddress server, String name, + String authString) throws Exception { + + RPCResponse r = null; + try { + r = sendRPC(server, "deleteVolume", RPCClient.generateList(name), + authString, null); + r.waitForResponse(); + } finally { + r.freeBuffers(); + } + } + + public Map getLocalVolumes(InetSocketAddress server, + String authString) throws Exception { + + RPCResponse> r = null; + try { + r = sendRPC(server, "getLocalVolumes", RPCClient.generateList(), + authString, null); + return r.get(); + } finally { + r.freeBuffers(); + } + } + + public Map getServerConfiguration(InetSocketAddress server, + String authString) throws Exception { + + RPCResponse> r = null; + try { + r = sendRPC(server, "getServerConfiguration", RPCClient.generateList(), + authString, null); + return r.get(); + } finally { + r.freeBuffers(); + } + } + + public void initFileSystem(InetSocketAddress server, String authString) + throws Exception { + + RPCResponse r = null; + try { + r = sendRPC(server, "initFileSystem", RPCClient.generateList(), + authString, null); + r.waitForResponse(); + } finally { + r.freeBuffers(); + } + } + + public Map move(InetSocketAddress server, + String sourcePath, String targetPath, String authString) + throws Exception { + + RPCResponse> r = null; + try { + r = sendRPC(server, "move", RPCClient.generateList(sourcePath, + targetPath), authString, null); + return toXCapMap(r.getHeaders()); + } finally { + r.freeBuffers(); + } + } + + public Map open(InetSocketAddress server, String path, + String accessMode, String authString) throws Exception { + + RPCResponse> r = null; + try { + r = sendRPC(server, "open", RPCClient + .generateList(path, accessMode), authString, null); + return toXCapMap(r.getHeaders()); + } finally { + r.freeBuffers(); + } + } + + public List query(InetSocketAddress server, String path, + String queryString, String authString) throws Exception { + + RPCResponse> r = null; + try { + r = sendRPC(server, "query", RPCClient.generateList(path, + queryString), authString, null); + return r.get(); + } finally { + r.freeBuffers(); + } + } + + public List readDir(InetSocketAddress server, String path, + String authString) throws Exception { + + RPCResponse> r = null; + try { + r = sendRPC(server, "readDir", RPCClient.generateList(path), + authString, null); + return r.get(); + } finally { + r.freeBuffers(); + } + } + + public Map> readDirAndStat( + InetSocketAddress server, String path, String authString) + throws Exception { + + RPCResponse>> r = null; + try { + r = sendRPC(server, "readDirAndStat", RPCClient.generateList(path), + authString, null); + return r.get(); + } finally { + r.freeBuffers(); + } + } + + public void removeACLEntries(InetSocketAddress server, String path, + List entities, String authString) throws Exception { + + RPCResponse r = null; + try { + r = sendRPC(server, "removeACLEntries", RPCClient.generateList( + path, entities), authString, null); + r.waitForResponse(); + } finally { + r.freeBuffers(); + } + } + + public void removeXAttrs(InetSocketAddress server, String path, + List attrKeys, String authString) throws Exception { + + RPCResponse r = null; + try { + r = sendRPC(server, "removeXAttrs", RPCClient.generateList(path, + attrKeys), authString, null); + r.waitForResponse(); + } finally { + r.freeBuffers(); + } + } + + public Map renew(InetSocketAddress server, + Map capability, String authString) throws Exception { + + RPCResponse> r = null; + try { + r = sendRPC(server, "renew", RPCClient.generateList(), authString, + toHTTPHeaders(capability)); + return toXCapMap(r.getHeaders()); + } finally { + r.freeBuffers(); + } + } + + public void updateFileSize(InetSocketAddress server, String capability, + String newFileSizeHeader, String authString) throws Exception { + + Map headers = new HashMap(); + headers.put(HTTPHeaders.HDR_XCAPABILITY, capability); + headers.put(HTTPHeaders.HDR_XNEWFILESIZE, newFileSizeHeader); + + RPCResponse r = null; + try { + r = sendRPC(server, "updateFileSize", RPCClient.generateList(), + authString, toHTTPHeaders(headers)); + r.waitForResponse(); + } finally { + r.freeBuffers(); + } + } + + public Map stat(InetSocketAddress server, String path, + boolean inclReplicas, boolean inclXAttrs, boolean inclACLs, + String authString) throws Exception { + + RPCResponse> r = null; + try { + r = sendRPC(server, "stat", RPCClient.generateList(path, + inclReplicas, inclXAttrs, inclACLs), authString, null); + return r.get(); + } finally { + if (r != null) + r.freeBuffers(); + } + } + + public String getXAttr(InetSocketAddress server, String path, String key, + String authString) throws Exception { + + RPCResponse r = null; + try { + r = sendRPC(server, "getXAttr", RPCClient.generateList(path, key), + authString, null); + return r.get(); + } finally { + if (r != null) + r.freeBuffers(); + } + } + + public void setDefaultStripingPolicy(InetSocketAddress server, String path, + Map stripingPolicy, String authString) throws Exception { + + RPCResponse r = null; + try { + r = sendRPC(server, "setDefaultStripingPolicy", RPCClient + .generateList(path, stripingPolicy), authString, null); + r.waitForResponse(); + } finally { + r.freeBuffers(); + } + } + + public Map getDefaultStripingPolicy( + InetSocketAddress server, String path, String authString) + throws Exception { + + RPCResponse> r = null; + try { + r = sendRPC(server, "getDefaultStripingPolicy", RPCClient + .generateList(path), authString, null); + return r.get(); + } finally { + if (r != null) + r.freeBuffers(); + } + } + + public long getProtocolVersion(InetSocketAddress server, List versions, + String authString) throws Exception { + + RPCResponse r = null; + try { + r = sendRPC(server, "getDefaultStripingPolicy", RPCClient + .generateList(versions), authString, null); + return r.get(); + } finally { + if (r != null) + r.freeBuffers(); + } + } + + private static HTTPHeaders toHTTPHeaders(Map hdrs) { + + HTTPHeaders headers = new HTTPHeaders(); + for (String key : hdrs.keySet()) + headers.addHeader(key, hdrs.get(key)); + + return headers; + } + + private static Map toXCapMap(HTTPHeaders hdrs) { + + Map map = new HashMap(); + + if (hdrs.getHeader(HTTPHeaders.HDR_XCAPABILITY) != null) + map.put(HTTPHeaders.HDR_XCAPABILITY, hdrs + .getHeader(HTTPHeaders.HDR_XCAPABILITY)); + if (hdrs.getHeader(HTTPHeaders.HDR_XLOCATIONS) != null) + map.put(HTTPHeaders.HDR_XLOCATIONS, hdrs + .getHeader(HTTPHeaders.HDR_XLOCATIONS)); + + return map; + } + +} diff --git a/servers/src/org/xtreemfs/common/clients/mrc/MRCClientInterface.java b/servers/src/org/xtreemfs/common/clients/mrc/MRCClientInterface.java new file mode 100644 index 0000000000000000000000000000000000000000..6366feac77f48b29783f72222b48f064b3247b7e --- /dev/null +++ b/servers/src/org/xtreemfs/common/clients/mrc/MRCClientInterface.java @@ -0,0 +1,718 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.common.clients.mrc; + +import java.util.List; +import java.util.Map; + +import org.xtreemfs.mrc.MRCRequest; +import org.xtreemfs.mrc.brain.BrainException; +import org.xtreemfs.mrc.brain.UserException; + +/** + * The interface to the MRC backend. + * + * @author stender, bjko + * + */ +public interface MRCClientInterface { + + /** + * Sets up a new file system. The following steps are taken: + *
    + *
  1. all local volumes are deregistered from the directory service + *
  2. the database containing local metadata is stopped and all contents + * are deleted + *
  3. an empty database for local metadata is started, representing an + * empty file system + *
+ * + * @throws BrainException + * if the deregistration at the Directory Service of existing + * volumes failed or if an error occured in the storage backend + */ + public void initFileSystem() throws BrainException; + + /** + * Locally creates a new volume with the default OSD, striping and file + * access policy without an ACL and registers the volume at the Directory + * Service.
This method is equivalent to + * createVolume(volumeName, null, userId, SimpleSelectionPolicy.POLICY_ID). + * + * @param volumeName + * the name for the new volume + * @param userId + * the user id + * @throws UserException + * if the volume already exists + * @throws BrainException + * if an error occured in the storage backend + * @see #createVolume(String, Map, long, long, long, long) + */ + public void createVolume(String volumeName, long userId) + throws BrainException, UserException; + + /** + * Locally creates a new volume and registers the volume at the Directory + * Service.
The ACL is provided as described in + * setVolumeACL(...). + * + * + * @param volumeName + * the name for the new volume + * @param volumeACL + * the ACL for the volume + * @param userId + * the user id + * @param osdPolicyId + * the id of the OSD policy to use with this volume + * @param stripingPolicyId + * the id of the default striping policy used for files stored in + * the volume + * @param fileAccessPolicyId + * the id of the access policy used for files in the volume + * @throws UserException + * if the volume already exists + * @throws BrainException + * if an error occured in the storage backend + * @see #setVolumeACL(String, Map) + */ + public void createVolume(MRCRequest request, String volumeName, + long osdPolicyId, long stripingPolicyId, long fileAccessPolicyId, + long uid, Map volumeACL) throws BrainException, + UserException; + + /** + * Sets an ACL for the volume with the given name.
The ACL is provided + * as an access control list of the form {user:long=rights:long, user2=..., + * ...}. + * + *
    + *
  • rights: the rights which the user has on the file. + * rights & 1 checks for read access and + * rights & 2 checks for write access. + * rights & 4 checks for execution access. + *
+ * + * @param volumeName + * the name of the volume + * @param volumeACL + * the ACL + * @throws UserException + * if the volume is invalid or the local MRC is not responsible + * for the volume + * @throws BrainException + * if an error occured in the storage backend + * + * @see #setVolumeACL(String, Map) + */ + public void setVolumeACL(String volumeName, Map volumeACL) + throws BrainException, UserException; + + /** + * Returns the ACL of the volume with the given name.
The ACL is + * provided as described in {@link #setVolumeACL(String, Map)}. + * + * @param volumeName + * the name of the volume + * @return the ACL of the volume + * @throws UserException + * if the volume is invalid or the local MRC is not responsible + * for the volume + * @throws BrainException + * if an error occured in the storage backend + * + * @see #setVolumeACL(String, Map) + */ + public Map getVolumeACL(String volumeName) + throws BrainException, UserException; + + /** + * Deletes an existing volume held by the local MRC. All associated + * directories and files are removed as well. + * + * @param name + * the name of the volume to remove + * @throws UserException + * if the volume is invalid or the local MRC is not responsible + * for the volume + * @throws BrainException + * if an error occured in the storage backend + */ + public void deleteVolume(String name) throws BrainException, UserException; + + /** + * Creates a new file without user attributes and striping policy. This + * method is equivalent to createFile(path, null, userId). + * + * @param path + * the path to the file + * @param userId + * the id of the user on behalf of whom the file is created + * @throws UserException + * if the path is invalid or the local MRC is not responsible + * for the path + * @throws BrainException + * if an error occured in the storage backend + * @see #createFile(String, Map, long, long) + */ + public void createFile(String path, long userId) throws BrainException, + UserException; + + /** + * Creates a new file. + * + * @param path + * the path to the file + * @param attrs + * a map containing the file attributes as (key/value) pairs + * @param stripingPolicyId + * the id of the striping policy used with this file. If + * 0 is specified, the volume striping policy will + * be used. + * @param userId + * the id of the user on behalf of whom the file is created + * @throws UserException + * if the path is invalid or the local MRC is not responsible + * for the path + * @throws BrainException + * if an error occured in the storage backend + */ + public void createFile(String path, Map attrs, + long stripingPolicyId, long userId, Map acl) + throws BrainException, UserException; + + /** + * Adds a user attribute to an existing file. If the attribute already + * exists for the given user, it will be overwritten. + * + * @param path + * the path to the file + * @param key + * the attribute key + * @param value + * the attribute value + * @param userId + * the user id associated with the attribute. If 0 + * is provided, the attribute will be regarded as global, i.e. it + * will be visible to any user. + * @throws UserException + * if the path is invalid or the local MRC is not responsible + * for the path + * @throws BrainException + * if an error occured in the storage backend + */ + public void addUserAttribute(String path, String key, String value, + long userId) throws BrainException, UserException; + + /** + * Adds multiple user attributes to an existing file. If the attribute + * already exists for the given user, it will be overwritten. + * + * @param path + * the path to the file + * @param attrs + * a map containing the file attributes as (key/value) pairs + * @param userId + * the user id associated with the attributes. If 0 + * is provided, the attributes will be regarded as global, i.e. + * they will be visible to any user. + * @throws UserException + * if the path is invalid or the local MRC is not responsible + * for the path + * @throws BrainException + * if an error occured in the storage backend + */ + public void addUserAttributes(String path, Map attrs, + long userId) throws BrainException, UserException; + + /** + * Assigns a new replica to an existing file. Each replica of a file + * represents the entire file content. Since different replicas may be + * striped over multiple OSDs in different ways, each replica is described + * by a string containing striping information. The striping information + * string will only be stored but not evaluated by the MRC. + * + * @param globalFileId + * the global ID of the file in the form of "volumeId":"fileId" + * @param stripingInfo + * an opaque string containing striping information about the + * replica + * @throws UserException + * if the path is invalid or the local MRC is not responsible + * for the path + * @throws BrainException + * if an error occured in the storage backend if an error + * occured in t + */ + public void addReplica(String globalFileId, String stripingInfo) + throws BrainException, UserException; + + /** + * Removes a user attribute from an existing file. + * + * @param path + * the path to the file + * @param key + * the key of the attribute + * @param userId + * the id of the user who defined the attribute, or + * 0 for a global attribute + * @throws UserException + * if the path is invalid or the local MRC is not responsible + * for the path + * @throws BrainException + * if an error occured in the storage backend + */ + public void removeUserAttribute(String path, String key, long userId) + throws BrainException, UserException; + + /** + * Removes multiple user attributes from an existing file. + * + * @param path + * the path to the file + * @param attrKeys + * a list containing all keys of the attribute + * @param userId + * the id of the user who defined the attributes, or + * 0 for a global attribute + * @throws UserException + * if the path is invalid or the local MRC is not responsible + * for the path + * @throws BrainException + * if an error occured in the storage backend + */ + public void removeUserAttributes(String path, List attrKeys, + long userId) throws BrainException, UserException; + + /** + * Returns a map containing all user-defined attribute/value pairs of a + * file. In case of a directory, null will be returned. + * + * @param path + * the path to the file + * @param userId + * the user id associated with the attributes + * @return a map containing the attributes + * + * @throws UserException + * if the path is invalid or the local MRC is not responsible + * for the path + * @throws BrainException + * if an error occured in the storage backend + */ + public Map getUserAttributes(String path, long userId) + throws BrainException, UserException; + + /** + * Deletes a file or directory including all user attributes. In case of a + * directory, the directory is required to be empty, i.e. it must neither + * contain files nor subdirectories. + * + * @param path + * the path to the file + * @throws UserException + * if the path is invalid or the local MRC is not responsible + * for the path + * @throws BrainException + * if an error occured in the storage backend + */ + public void delete(String path) throws BrainException, UserException; + + /** + * Creates a new directory without user attributes. The operation will fail + * unless the first n-1 of n components in path refer to an + * existing directory. This method is equivalent to + * createDir(path, null, userId). + * + * @param path + * complete path including the volume name + * @param userId + * the id of the user on behalf of whom the directory is created + * @throws UserException + * if the parent path does not exist or the local MRC is not + * responsible for the path + * @throws BrainException + * if an error occured in the storage backend + */ + public void createDir(String path, long userId) throws BrainException, + UserException; + + /** + * Creates a new directory with user attributes. The operation will fail + * unless the first n-1 of n components in path refer to an + * existing directory. + * + * @param path + * complete path including the volume name + * @param attrs + * a map containing the directory attributes as (key/value) pairs + * @param userId + * the id of the user on behalf of whom the directory is created + * @throws UserException + * if the parent path does not exist or the local MRC is not + * responsible for the path + * @throws BrainException + * if an error occured in the storage backend + */ + public void createDir(String path, Map attrs, long userId) + throws BrainException, UserException; + + /** + * Lists the contents of a directory. Note that no guarantees are given + * about the order in which elements are listed. + * + * @param path + * the complete path including the volume + * @return a list of strings of the subdirectorie and files in the directory + * @throws UserException + * if the path is invalid or the local MRC is not responsible + * for the path + * @throws BrainException + * if an error occured in the storage backend + */ + public List readDir(String path) throws BrainException, + UserException; + + /** + * Returns the result of a 'readdir' combined with a 'stat' for each + * directory entry. It is returned in the form of a map which maps the entry + * names to maps containing the stat infos as provided by the 'stat' method. + * + * @param path + * the directory of which the contents are returned + * @param userId + * the id of the user on behalf of whom the stat is returned. + * This is necessary in order to properly translate the POSIX + * access rights. + * @return a list of stats for the directory contents + * @throws UserException + * if the path is invalid or the local MRC is not responsible + * for the path + * @throws BrainException + * if an error occured in the storage backend + */ + public Map> readDirAndStat(String path, + long userId) throws BrainException, UserException; + + /** + * Returns information about a single file or directory. The data returned + * has the following shape:

+ * + * {volId=long, sliceId=long, fileId=long, type=int, userId=long, size=long, + * atime=long, mtime=long, ctime=long, posixAccessMode=int}. + * + *
    + *
  • volId: the id of the volume holding the file or + * directory + *
  • sliceId: the id of the slice holding the file or + * directory + *
  • fileId: the id of the file or directory + *
  • type: an integer between 0 and 2 describing the type + * (0=directory, 1=file, 2=symlink) + *
  • userId: the user id of the file owner + *
  • size: the file size + *
  • atime: the access timestamp + *
  • mtime: the modification timestamp + *
  • ctime: the change timestamp + *
  • posixAccessMode: the posix access rights (rwx) for + * the owner, the VO and the rest + *
+ * + * @param path + * the path of the file in the file system + * @param userId + * the id of the user on behalf of whom the stat is returned. + * This is necessary in order to properly translate the POSIX + * access rights. + * @throws UserException + * if the path is invalid or the local MRC is not responsible + * for the path + * @throws BrainException + * if an error occured in the storage backend + * @return the stats for the file + */ + public Map stat(String path, long userId) + throws BrainException, UserException; + + /** + * Creates a symbolic link to the given target.
The link itself + * behaves like an independent file with its own metadata. When file + * contents are read, however, the read request will be redirected to the + * given target path. No guarantees are given that the target path is valid, + * nor will the softlink be updated when the referenced file is moved or + * renamed. + * + * @param linkPath + * the path for the link itself + * @param targetPath + * the path to the link's target + * @param userId + * the id of the user on behalf of whom the file is created + * @throws UserException + * if the link path is invalid or the local MRC is not + * responsible for the path + * @throws BrainException + * if an error occured in the storage backend + */ + public void createSymLink(String linkPath, String targetPath, long userId) + throws BrainException, UserException; + + /** + * Returns the path to which the symbolic link referenced by the given path + * points to. + * + * @param path + * the path to the symbolic link + * @return the path which the symbolic link points to + * @throws UserException + * if the path does not point to a symbolic link or the local + * MRC is not responsible for the path + * @throws BrainException + * if an error occured in the storage backend + */ + public String getSymlinkTarget(String path) throws BrainException, + UserException; + + /** + * Opens an existing file. + * + *

+ * If the capability is successfully issued, a map of the following form + * will be returned:

+ * + * result = {storageLocs:StorageLocList, stripingPolicy:long, + * capablity:string}.
storageLocs = [feasibleHost_1:string, ... , + * feasibleHost_n:string] + * + *

    + *
  • storageLocs: a list of strings 'hostname:port' + * describing the locations of feasible OSDs + *
  • stripingPolicy: the id of the striping policy used + * with the given path + *
  • capability: the string containing the encrypted + * capability + *
+ *

+ * + *
In case the capability could not be issued, null is + * returned. + * + * @param path + * the path for which to generate the capability + * @param accessMode + * the access for the file/directory. Possible attributes are + * "rwx". + * @param userId + * the id of the user on behalf of whom the capability is issued + * @return a map of the form described above + * + * @throws UserException + * if the path is invalid or the local MRC is not responsible + * for the path + * @throws BrainException + * if an error occured in the storage backend + */ + public Map open(String path, String accessMode, long userId) + throws BrainException, UserException; + + /** + * Checks whether the given path refers to a directory. + * + * @param path + * the path + * @return true if the path refers to a directory, + * false otherwise + * @throws UserException + * if the path is invalid or the local MRC is not responsible + * for the path + * @throws BrainException + * if an error occured in the storage backend + */ + public boolean isDir(String path) throws BrainException, UserException; + + /** + * Checks whether the given path refers to a symbolic link. + * + * @param path + * the path + * @return true if the path refers to a symbolic link, + * false otherwise + * @throws UserException + * if the path is invalid or the local MRC is not responsible + * for the path + * @throws BrainException + * if an error occured in the storage backend + */ + public boolean isSymLink(String path) throws BrainException, UserException; + + /** + * Moves the file or directory referenced by the source path to the given + * target path. + *

+ * The behavior of this method depends on what both paths are pointing to. + * The source path must point to a valid file or directory which is managed + * by the local MRC. + * + * The behavior is a follows: + *

    + *
  • source points to a file: + *
  • + *
      + *
    • target is a file or does not exist: the source file will be moved to + * the target's parent directory where the old file (if exists) is removed + *
    • target is a directory: the source file will be moved to the target + * directory + *
    + *
  • source points to a directory: + *
  • + *
      + *
    • target is a file: an exception is thrown + *
    • target is a directory: the source directory tree will be moved to + * the target directory + *
    • target does not exist: the source directory will be moved to the + * target's parent directory and renamed + *
    + *
+ *

+ * + * @param sourcePath + * the path pointing to the source file or directory + * @param targetPath + * the path pointing to the target file or directory + * @throws UserException + * if the source or target path is invalid or the local MRC is + * not responsible for the source path + * @throws BrainException + * if an error occured in the storage backend + */ + public void move(String sourcePath, String targetPath) + throws BrainException, UserException; + + /** + * Submits a query. A list of files matching the given query string is + * returned in the form of path names. + * + * @param path + * the path from which the query is executed. Query results will + * be restricted to paths that are contained by the given path. + * @param queryString + * a string representing the query + * @param userId + * the id of the user on behalf of whom the query is executed + * @return if the path or query string is invalid or the local MRC is not + * responsible for the path or an I/O error occured + * @throws UserException + * if the query is invalid + * @throws BrainException + * if an error occured in the storage backend + */ + public List query(String path, String queryString, long userId) + throws BrainException, UserException; + + /** + * Sets the size of a file. + * + * @param path + * the path of the file + * @param fileSize + * the new size of the file + * @throws UserException + * if the path is invalid or the local MRC is not responsible + * for the path + * @throws BrainException + * if an error occured in the storage backend + */ + public void updateFileSize(String path, long fileSize) + throws BrainException, UserException; + + /** + * Terminates the Brain instance. All connections to remote hosts will be + * closed, and unconfirmed writes will be flushed to disk. + * + * @throws BrainException + * if an error occured in the storage backend + */ + public void shutdown() throws BrainException; + + /** + * Returns the list containing striping information about replicas for the + * file with the given path. + * + * @param globalFileId + * the global ID of the file in the form of "volumeId":"fileId" + * @return a list of strings containing striping information + * @throws UserException + * if the path is invalid or the local MRC is not responsible + * for the path + * @throws BrainException + * if an error occured in the storage backend + */ + public List getReplicas(String globalFileId) throws BrainException, + UserException; + + /** + * Locally creates a file tree from the given tree data. The tree is + * inserted at the given target path.
The purpose of this method is to + * allow remote MRC instances to transfer directory trees to the local file + * system, e.g.\ in connection with a 'move' operation. + * + * The tree data is provided as follows: + * + * treeData:TreeData = [fileData:FileData, attrs:AttributeList, + * osdData:OSDEndpointList, stripingPolicyId: long, ref:string, + * subElements:TreeData]

FileData = {name:string, atime:long, + * ctime:long, mtime:long, size:long, userId:long, isDirectory:boolean} + *

AttributeList = [{key:string, value:string, type:long, + * userId:long}, {...}]

OSDEndpointList = [endpoint1:string, + * ...] + * + * @param treeData + * the data representing the subtree to add + * @param targetPath + * the path where to add the subtree + * @throws BrainException + * if an error occured in the storage backend + * @throws UserException + * if the path is invalid or the local MRC is not responsible + * for the path + */ + public void createFileTree(List treeData, String targetPath) + throws BrainException, UserException; + + // -- MONITORING ROUTINES + + public Map getPerVolumeOSDs(); + + /** + * Returns a map of volumes held by the local MRCs. The result is returned + * in the form of a mapping from volume ids to volume names. + * + * @return a map volumeId -> volumeName of all volumes on the local server + */ + public Map getLocalVolumes() throws Exception; + +} diff --git a/servers/src/org/xtreemfs/common/clients/mrc/XtreemFile.java b/servers/src/org/xtreemfs/common/clients/mrc/XtreemFile.java new file mode 100644 index 0000000000000000000000000000000000000000..47084836d777a9a172b966025bd189042b200273 --- /dev/null +++ b/servers/src/org/xtreemfs/common/clients/mrc/XtreemFile.java @@ -0,0 +1,441 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.common.clients.mrc; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import org.xtreemfs.common.auth.NullAuthProvider; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.mrc.utils.MessageUtils; + +/** + * + * @author bjko + */ +public class XtreemFile { + + public static String pathSeparator = "/"; + + public static char pathSeparatorChar = '/'; + + private final String volumeName; + + private final String filename; + + private Map statInfo; + + private MRCClient client; + + private InetSocketAddress mrc; + + private final boolean isVolumeList; + + private final boolean invalidVolume; + + private final boolean isSysDir; + + private static final HashMap vcache = new HashMap(); + + /** Creates a new instance of XtreemFile */ + public XtreemFile(MRCClient client, InetSocketAddress dirService, String filename) + throws IOException { + System.out.println("created new file for: " + filename); + this.client = client; + // first extract the volume name + String woPrefix = filename; + if (filename.startsWith("/xtreemfs")) { + woPrefix = filename.substring("/xtreemfs".length()); + } + if (woPrefix.length() == 0) + woPrefix = "/"; + int posSecondslash = woPrefix.substring(1).indexOf(pathSeparatorChar); + if (posSecondslash == -1) { + volumeName = woPrefix.substring(1); + this.filename = "/"; + } else { + volumeName = woPrefix.substring(1, posSecondslash + 1); + this.filename = woPrefix.substring(posSecondslash + 1); + } + System.out.println("XtreemFile: voumeName=" + volumeName + " filename=" + this.filename); + + if (volumeName.length() > 0) { + // ask the dir service for the MRC holding the volume + + // check, if it is a system dir + if (woPrefix.endsWith("/.") || woPrefix.endsWith("/..")) { + isSysDir = true; + statInfo = null; + isVolumeList = false; + invalidVolume = false; + } else { + // check my cache + VolCacheEntry vci = vcache.get(this.volumeName); + if (vci != null) { + if (vci.created > System.currentTimeMillis() + 1000 * 60) { + vci = null; + } + } + if (vci == null) { + ArrayList params = new ArrayList(); + params.add(volumeName); + Object o = null; + // try { + // RPCResponse resp = + // client.sendGenericRequest(dirService,"getVolumeInfo",params); + // o = resp.get(); + // } catch (JSONException ex) { + // throw new IOException("cannot encode/decode message",ex); + // } + // FIXME: adapt to new Directory Service + System.out.println("VVOLINFO is " + o); + if (o == null) { + invalidVolume = true; + } else { + Map volInfo = (Map) o; + Map mrcMap = (Map) volInfo.get("mrcMap"); + mrc = MessageUtils.addrFromString((String) mrcMap.keySet().toArray()[0]); + vci = new VolCacheEntry(); + vci.created = System.currentTimeMillis(); + vci.volName = this.volumeName; + vci.mrc = mrc; + vcache.put(this.volumeName, vci); + invalidVolume = false; + } + } else { + mrc = vci.mrc; + invalidVolume = false; + } + if (!invalidVolume) { + try { + + // now we have a mrc..lets fetch the file details + statInfo = client.stat(mrc, this.volumeName + this.filename, true, true, + true, NullAuthProvider.createAuthString("1", "1")); + System.out.println("STAT INFO:" + statInfo); + } catch (Exception ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + statInfo = null; + } /* + * catch (IOException ex) { ex.printStackTrace(); + * statInfo = null; } + */ + } + isVolumeList = false; + isSysDir = false; + } + } else { + isSysDir = false; + isVolumeList = true; + invalidVolume = false; + mrc = dirService; + } + } + + public Map getStatInfo() { + return statInfo; + } + + public boolean isDirectory() { + if (isSysDir) + return true; + if (isVolumeList) + return true; + if (statInfo == null) { + System.out.println("no stat info"); + return false; + } + Long oType = (Long) statInfo.get("objType"); + System.out.println("isDir= " + (oType == 2) + " type=" + oType); + return (oType == 2); + } + + public boolean isFile() { + if (isSysDir) + return false; + if (isVolumeList) + return false; + if (statInfo == null) { + System.out.println("no stat info"); + return false; + } + Long oType = (Long) statInfo.get("objType"); + System.out.println("isFile= " + (oType == 1) + " type=" + oType); + return (oType == 1); + } + + public boolean delete() { + if (isSysDir) + return false; + if (isVolumeList) + return false; + try { + client.delete(mrc, this.volumeName + this.filename, NullAuthProvider.createAuthString( + "1", "1")); + return true; + } catch (Exception ex) { + return false; + } + } + + public long length() { + if (isSysDir) + return 0l; + if (isVolumeList) + return 0l; + if (statInfo == null) + return 0l; + return (Long) statInfo.get("size"); + } + + public String toString() { + return this.volumeName + this.filename; + } + + public boolean exists() { + if (isSysDir) + return true; + if (isVolumeList && invalidVolume) + return false; + if (isVolumeList) + return true; + return (statInfo != null); + } + + public boolean renameTo(XtreemFile dest) { + if (isSysDir) + return false; + if (isVolumeList) + return false; + if (statInfo == null) + return false; + + try { + client.move(mrc, this.volumeName + this.filename, dest.volumeName + dest.filename, + NullAuthProvider.createAuthString("1", "1")); + return true; + } catch (Exception ex) { + return false; + } + } + + public boolean canExecute() { + if (isSysDir) + return false; + if (isVolumeList) + return false; + if (statInfo == null) + return false; + Long posixAccessMode = (Long) statInfo.get("posixAccessMode"); + return (posixAccessMode.intValue() & 64) > 0; + } + + public boolean canRead() { + if (isSysDir) + return true; + if (isVolumeList) + return true; + if (statInfo == null) + return false; + Long posixAccessMode = (Long) statInfo.get("posixAccessMode"); + return (posixAccessMode.intValue() & 256) > 0; + } + + public boolean canWrite() { + if (isSysDir) + return false; + if (isVolumeList) + return false; + if (statInfo == null) + return false; + Long posixAccessMode = (Long) statInfo.get("posixAccessMode"); + return (posixAccessMode.intValue() & 128) > 0; + } + + public long lastModified() { + if (isSysDir) + return System.currentTimeMillis(); + if (isVolumeList) + return 0l; + if (statInfo == null) + return 0l; + Long ll = (Long) statInfo.get("mtime") * 1000; + return ll; + } + + public String[] list() { + if (isSysDir) + return null; + if (isVolumeList) { + // list volumes... + Object o = null; + try { + RPCResponse resp = client.sendRPC(mrc, "getVolumeInfos", new ArrayList(), + NullAuthProvider.createAuthString("1", "1"), null); + o = resp.get(); + } catch (Exception ex) { + System.out.println("cannot get volumes: " + ex); + return null; + } + List vols = (List) o; + List volNames = new LinkedList(); + for (Object vol : vols) { + Map mrcMap = (Map) vol; + volNames.add((String) mrcMap.get("name")); + } + return volNames.toArray(new String[0]); + } else { + if (isDirectory() == false) + return null; + try { + + List entries = client.readDir(mrc, this.volumeName + this.filename, + NullAuthProvider.createAuthString("1", "1")); + return entries.toArray(new String[0]); + } catch (Exception ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + return null; + } + } + } + + public MiniStatInfo[] listAndStat() { + if (isSysDir) + return null; + if (isVolumeList) { + // list volumes... + Object o = null; + try { + RPCResponse resp = client.sendRPC(mrc, "getVolumeInfos", new ArrayList(), + NullAuthProvider.createAuthString("1", "1"), null); + o = resp.get(); + } catch (Exception ex) { + System.out.println("cannot get volumes: " + ex); + return null; + } + List vols = (List) o; + List volNames = new LinkedList(); + for (Object vol : vols) { + Map mrcMap = (Map) vol; + MiniStatInfo mi = new MiniStatInfo(); + mi.type = "vol"; + mi.name = (String) mrcMap.get("name"); + volNames.add(mi); + } + return volNames.toArray(new MiniStatInfo[0]); + } else { + if (isDirectory() == false) + return null; + try { + + Map> entries = client.readDirAndStat(mrc, + this.volumeName + this.filename, NullAuthProvider.createAuthString("1", "1")); + List dir = new LinkedList(); + for (String entry : entries.keySet()) { + MiniStatInfo mi = new MiniStatInfo(); + Long otype = (Long) entries.get(entry).get("objType"); + if (otype == 1) + mi.type = "file"; + else + mi.type = "dir"; + mi.name = entry; + dir.add(mi); + } + return dir.toArray(new MiniStatInfo[0]); + } catch (Exception ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + return null; + } + } + } + + public boolean mkdir() { + if (isSysDir) + return false; + if (isVolumeList) + return false; + try { + client.createDir(mrc, this.volumeName + this.filename, NullAuthProvider + .createAuthString("1", "1")); + return true; + } catch (Exception ex) { + return false; + } + } + + public boolean touch() { + if (isSysDir) + return false; + if (isVolumeList) + return false; + try { + client.createFile(mrc, this.volumeName + this.filename, NullAuthProvider + .createAuthString("1", "1")); + return true; + } catch (Exception ex) { + return false; + } + } + + public byte[] read(long start, long numBytes) throws IOException { + try { + // FIXME:not finished yet + Map capability = client.open(mrc, this.volumeName + this.filename, "r", + NullAuthProvider.createAuthString("1", "1")); + } catch (Exception ex) { + throw new IOException(ex); + } + + // OSDClient oc = new OSDClient(); + + return new byte[0]; + } + + public boolean write(long start, long numBytes) { + return true; + } + + public static class VolCacheEntry { + public long created; + + public String volName; + + public InetSocketAddress mrc; + } + + public static class MiniStatInfo { + public String type; + + public String name; + } + +} diff --git a/servers/src/org/xtreemfs/common/clients/osd/ConcurrentFileMap.java b/servers/src/org/xtreemfs/common/clients/osd/ConcurrentFileMap.java new file mode 100644 index 0000000000000000000000000000000000000000..2239e3e1e02ed4068075c7841c620cc717779d26 --- /dev/null +++ b/servers/src/org/xtreemfs/common/clients/osd/ConcurrentFileMap.java @@ -0,0 +1,466 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin, + Barcelona Supercomputing Center - Centro Nacional de Supercomputacion + and Consiglio Nazionale delle Ricerche. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHOR: Felix Langner (ZIB) + */ +package org.xtreemfs.common.clients.osd; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +/** + *

This class models a {@link Thread} safe FileMap for listing all available files of an OSD for + * service purpose like cleaning deleted files up.

+ * + *

The first key is the volume as a {@link List} of volumeID, mrcAddress and mrcPort, the second + * key is the fileID and the third key some file attributes like fileSize and preview.

+ * + * @author langner + */ +public final class ConcurrentFileMap { + private static final long serialVersionUID = -7736474666790682726L; + + Map>> map = new ConcurrentHashMap>>(); + + /** + * Default constructor. + */ + public ConcurrentFileMap() { + super(); + } + + /** + * Parses a JSON response into a {@link ConcurrentFileMap}. + * @param map + */ + public ConcurrentFileMap(Map>> map) { + super(); + for (String key : map.keySet()){ + this.map.put(Volume.parse(key) , map.get(key)); + } + } + + /** + *

Inserts a volumeID,fileID pair given by a directory's name into the fileMap. + * And adds the attributes size and preview to the entry.

+ * + * @param directory hex name (volumeID:fileID) + * @param size + * @param preview + */ + public synchronized void insert(String directory, long size, String preview, long maxObjectSize) throws IOException{ + String[] fileDesc = directory.split(":"); + if (fileDesc.length==2){ + try{ + Integer.parseInt(fileDesc[1]); + + Volume newVol = new Volume (fileDesc[0]); + + if (fileDesc.length!=2) throw new IOException("Directory: '"+directory+"' has an illegal format!"); + + if(containsKey(newVol)) + addFile(directory,size,maxObjectSize,preview,(Map>) get(newVol)); + else + map.put(newVol, newVolFile(directory,size,maxObjectSize,preview)); + }catch(NumberFormatException ne){ + // ignore + } + } + } + + /** + * + * @return set of unresolved volumeIDs. + */ + public Set unresolvedVolumeIDSet() { + Set result = new HashSet(); + + Set keys = map.keySet(); + for(Volume key : keys){ + if (key.size()==1) result.add(key.get(0)); + } + return result; + } + + /** + * @return set of resolved volumeIDs. + */ + public Set resolvedVolumeIDSet() { + Set result = new HashSet(); + + Set keys = map.keySet(); + for(Volume key : keys){ + if (key.size()>1 || key.equals(Volume.NOT_AVAILABLE)) result.add(key.get(0)); + } + return result; + } + + /** + * @return set of resolved volumeIDs without not available volume. + */ + public Set volumeIDSetForRequest() { + Set result = new HashSet(); + + Set keys = map.keySet(); + for(Volume key : keys){ + if (key.size()>1 && !key.equals(Volume.NOT_AVAILABLE)) result.add(key.get(0)); + } + return result; + } + + /** + *

Replaces the entry with the given volumeID with a new one with address.

+ * + *

If address is null volume is marked as 'unknown'. + * + * @param volumeID + * @param address + */ + public synchronized void saveAddress(String volumeID, InetSocketAddress address) { + if (address!=null) + map.put(new Volume(volumeID,address.getHostName(),((Integer) address.getPort()).toString()), remove(new Volume(volumeID))); + else{ + if (containsKey(Volume.NOT_AVAILABLE)) + ((Map>) get(Volume.NOT_AVAILABLE)).putAll((Map>) remove(new Volume(volumeID))); + else + map.put(Volume.NOT_AVAILABLE, remove(new Volume(volumeID))); + } + } + + /** + * + * @param volumeID + * @return the address for the given volumeID, or null if not available. + */ + public InetSocketAddress getAddress(String volumeID) { + Volume predicate = new Volume(volumeID); + + Set keys = map.keySet(); + for (Volume key : keys){ + if (predicate.equals(Volume.NOT_AVAILABLE)) + return null; + else if (key.equals(predicate)) + return new InetSocketAddress(key.get(1),Integer.parseInt(key.get(2))); + } + + return null; + } + + /** + * + * @param volume + * @return a {@link Set} of fileIDs for the given volume. + */ + public Set getFileNumberSet(List volume) { + Set result = new HashSet(); + for (String fID : getFileIDSet(volume)){ + result.add(fID.substring(fID.indexOf(":")+1, fID.length())); + } + return result; + } + + /** + * + * @param volume + * @return a {@link Set} of fileIDs for the given volumeID. + */ + public Set getFileNumberSet(String volumeID) { + Set result = new HashSet(); + for (String fID : getFileIDSet(volumeID)){ + result.add(fID.substring(fID.indexOf(":")+1, fID.length())); + } + return result; + } + + /** + * + * @param volumeID + * @return a {@link List} of fileNumbers for the given volumeID. + */ + public List getFileNumbers(String volumeID) { + List result = new LinkedList(); + for (String fID : getFileIDs(volumeID)){ + result.add(fID.substring(fID.indexOf(":")+1, fID.length())); + } + return result; + } + + /** + * + * @param volume + * @return a {@link Set} of fileIDs for the given volume. + */ + public Set getFileIDSet(List volume) { + return ((Map>) get(volume)).keySet(); + } + + /** + * + * @param volumeID + * @return a {@link Set} of fileIDs for the given volumeID. + */ + public Set getFileIDSet(String volumeID) { + return ((Map>) get(volumeID)).keySet(); + } + + /** + * + * @return the fileMap JSON compatible. + */ + public Map>> getJSONCompatible (){ + Map>> result = new ConcurrentHashMap>>(); + for (Volume key : map.keySet()){ + result.put(key.toString(), get(key)); + } + return result; + } + + /** + * Removes a file given by volumeID and fileID from the fileMap. + * @param volumeID + * @param fileID + */ + public void remove(String volumeID, String fileID) { + ((Map>) get(new Volume(volumeID))).remove(fileID); + + } + + /** + * + * @return the number of fileIDs in the fileMap. + */ + public synchronized int size(){ + int result = 0; + + for (Volume key : ((Set) map.keySet())){ + result += ((Map>) get(key)).size(); + } + + return result; + } + +/* + * getter + */ + public Long getFileSize(String volumeID, String file) { + return Long.valueOf(get(volumeID).get(file).get("size")); + } + + public Long getFileSize(List volume, String file) { + return Long.valueOf(get(volume).get(file).get("size")); + } + + public String getFilePreview(List volume, String file) { + return get(volume).get(file).get("preview"); + } + + public Long getObjectSize(List volume, String file) { + return Long.valueOf(get(volume).get(file).get("objectSize")); + } + +/* + * override + */ + + public Set> keySetList() { + Set> result = new HashSet>(); + for (Volume v: map.keySet()){ + result.add(v); + } + + return result; + } + + public boolean containsKey(Object key) { + for (Volume thisKey : map.keySet()){ + if(thisKey.equals(key)){ + return true; + } + } + return false; + } + + public Map> remove(Object key) { + Volume rq = null; + for (Volume thisKey : map.keySet()){ + if(thisKey.get(0).equals(key) || thisKey.equals(key)){ + rq = thisKey; + break; + } + } + return map.remove(rq); + } + + public Map> get(Object key) { + for (Volume thisKey : map.keySet()){ + if(thisKey.get(0).equals(key) || thisKey.equals(key)){ + key = thisKey; + break; + } + } + return map.get(key); + } + + /** + * + * @return true, if there are any fileIDs saved in the map.false otherwise. + */ + public boolean isEmpty() { + if (!map.isEmpty()){ + boolean isEmpty = true; + for (Map> value : map.values()) + isEmpty &= value.isEmpty(); + return isEmpty; + } + return true; + } +/* + * private methods + */ + + /** + * + * @param volumeID + * @return a {@link List} of fileIDs for the given volumeID. + */ + private List getFileIDs(String volumeID) { + List result = new LinkedList(); + for (String fID : getFileIDSet(volumeID)) + result.add(fID); + + return result; + } + + /** + * + * @param size + * @param objectSize + * @param preview + * @return a new Map with the given file details in it. + */ + private Map fileDetails (Long size,Long objectSize,String preview){ + ConcurrentHashMap details = new ConcurrentHashMap(); + details.put("size", size.toString()); + details.put("objectSize", objectSize.toString()); + details.put("preview", preview); + + return details; + } + + /** + * + * @param fileID + * @param size + * @param preview + * @param objectSize + * @return a new Map with the fileID with the given details in it. + */ + private Map> newVolFile (String fileID,long size,long objectSize,String preview){ + Map> volFile = new ConcurrentHashMap>(); + + volFile.put(fileID, fileDetails(size,objectSize,preview)); + + return volFile; + } + + /** + * Put the fileID and the file details into the given map. + * + * @param fileID + * @param size + * @param preview + * @param objectSize + * @param map + */ + private void addFile(String fileID,long size,long objectSize,String preview, Map> map){ + map.put(fileID, fileDetails (size,objectSize,preview)); + } +} + + /** + *

Volume is a {@link List} of volumeID, mrcAddress and mrcPort.

+ *

It will just be compared by the first value in the List (the volumeID).

+ * + * @author langner + * + */ + class Volume extends LinkedList implements List{ + private static final long serialVersionUID = 7408578018651016089L; + + public Volume(String volID) { + super(); + add(volID); + } + + public Volume(String volID, String mrcAddress, String mrcPort) { + super(); + add(volID); + add(mrcAddress); + add(mrcPort); + } + + private Volume(){ + super(); + add("unknown"); + add("unknown"); + add("unknown"); + } + + @Override + public boolean equals(Object o) { + if (o instanceof String){ + return get(0).equals(o); + }else if (o instanceof Volume){ + return get(0).equals(((Volume) o).get(0)); + } + return false; + } + + public static Volume NOT_AVAILABLE = new Volume(); + + public static Volume parse(String key) { + Volume result = null; + String[] values = key.split(","); + for (int i=0;i for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jesús Malo (BSC), Jan Stender (ZIB) + */ + +package org.xtreemfs.common.clients.osd; + +import java.io.IOException; +import java.net.InetSocketAddress; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.xtreemfs.common.Capability; +import org.xtreemfs.common.ClientLease; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.clients.RPCClient; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.striping.Locations; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.pinky.HTTPHeaders; +import org.xtreemfs.foundation.pinky.HTTPUtils; +import org.xtreemfs.foundation.pinky.SSLOptions; +import org.xtreemfs.foundation.speedy.MultiSpeedy; +import org.xtreemfs.osd.RPCTokens; + +/** + * + * @author bjko + */ +public class OSDClient extends RPCClient { + + /** Creates a new instance of NewOSDClient */ + public OSDClient() throws IOException { + super(); + } + + public OSDClient(MultiSpeedy sharedSpeedy) throws IOException { + super(sharedSpeedy); + } + + public OSDClient(MultiSpeedy sharedSpeedy, int timeout) throws IOException { + super(sharedSpeedy, timeout); + } + + public OSDClient(int timeout, SSLOptions sslOptions) throws IOException { + super(timeout, sslOptions); + } + + public RPCResponse get(InetSocketAddress osd, Locations loc, Capability cap, String file) + throws IOException, JSONException { + HTTPHeaders headers = new HTTPHeaders(); + headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString()); + headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString().asString()); + + return send(osd, file, null, headers, null, HTTPUtils.DATA_TYPE.BINARY, HTTPUtils.GET_TOKEN); + } + + /** + * performs a GET of a range of bytes on an OSD + * + * @param loc + * Location of the files. + * @param cap + * Capability of the request + * @param file + * File to use + * @param objectNumber + * Number of the object to use + * @param firstByte + * Offset relative to the object of the first requested byte + * @param lastByte + * Offset relative to the object of the last requested byte + * @return The response of the OSD + */ + public RPCResponse get(InetSocketAddress osd, Locations loc, Capability cap, String file, + long objectNumber, long firstByte, long lastByte) throws IOException, JSONException { + HTTPHeaders headers = new HTTPHeaders(); + headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString()); + headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString().asString()); + headers.addHeader(HTTPHeaders.HDR_XOBJECTNUMBER, Long.toString(objectNumber)); + headers.addHeader(HTTPHeaders.HDR_CONTENT_RANGE, "bytes " + Long.toString(firstByte) + "-" + + Long.toString(lastByte) + "/*"); + + return send(osd, file, null, headers, null, HTTPUtils.DATA_TYPE.BINARY, HTTPUtils.GET_TOKEN); + } + + /** + * performs a GET of a range of bytes on an OSD + * + * @param loc + * Location of the files. + * @param cap + * Capability of the request + * @param file + * File to use + * @param objectNumber + * Number of the object to use + * @param firstByte + * Offset relative to the object of the first requested byte + * @param lastByte + * Offset relative to the object of the last requested byte + * @return The response of the OSD + */ + public RPCResponse get(InetSocketAddress osd, Locations loc, Capability cap, String file, + long objectNumber, ClientLease lease) throws IOException, JSONException { + HTTPHeaders headers = new HTTPHeaders(); + headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString()); + headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString().asString()); + headers.addHeader(HTTPHeaders.HDR_XOBJECTNUMBER, Long.toString(objectNumber)); + headers.addHeader(HTTPHeaders.HDR_XLEASETO, Long.toString(lease.getExpires())); + + return send(osd, file, null, headers, null, HTTPUtils.DATA_TYPE.BINARY, HTTPUtils.GET_TOKEN); + } + + /** + * performs a GET for an entire object on an OSD + * + * @param loc + * Location of the files. + * @param cap + * Capability of the request + * @param file + * File to use + * @param objectNumber + * Number of the object to use + * @return The response of the OSD + */ + public RPCResponse get(InetSocketAddress osd, Locations loc, Capability cap, String file, + long objectNumber) throws IOException, JSONException { + HTTPHeaders headers = new HTTPHeaders(); + headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString()); + headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString().asString()); + headers.addHeader(HTTPHeaders.HDR_XOBJECTNUMBER, Long.toString(objectNumber)); + + return send(osd, file, null, headers, null, HTTPUtils.DATA_TYPE.BINARY, HTTPUtils.GET_TOKEN); + } + + /** + * It requests to the OSD to perform a PUT of a range of bytes + * + * @param loc + * Location of the files. + * @param cap + * Capability of the request + * @param file + * File to use + * @param objectNumber + * Number of the object to use + * @param firstByte + * Offset relative to the object of the first byte to write + * @param data + * Data to write + * @return The response of the OSD + */ + public RPCResponse put(InetSocketAddress osd, Locations loc, Capability cap, String file, + long objectNumber, long firstByte, ReusableBuffer data) throws IOException, JSONException { + HTTPHeaders headers = new HTTPHeaders(); + headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString()); + headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString().asString()); + headers.addHeader(HTTPHeaders.HDR_XOBJECTNUMBER, Long.toString(objectNumber)); + headers.addHeader(HTTPHeaders.HDR_CONTENT_RANGE, "bytes " + Long.toString(firstByte) + "-" + + Long.toString(firstByte + data.capacity() - 1) + "/*"); + + return send(osd, file, data, headers, null, HTTPUtils.DATA_TYPE.BINARY, HTTPUtils.PUT_TOKEN); + } + + /** + * It requests to the OSD to perform a PUT of a range of bytes + * + * @param loc + * Location of the files. + * @param cap + * Capability of the request + * @param file + * File to use + * @param objectNumber + * Number of the object to use + * @param firstByte + * Offset relative to the object of the first byte to write + * @param data + * Data to write + * @return The response of the OSD + */ + public RPCResponse putWithForcedIncrement(InetSocketAddress osd, Locations loc, Capability cap, + String file, long objectNumber, long firstByte, ReusableBuffer data) throws IOException, + JSONException { + HTTPHeaders headers = new HTTPHeaders(); + headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString()); + headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString().asString()); + headers.addHeader(HTTPHeaders.HDR_XOBJECTNUMBER, Long.toString(objectNumber)); + headers.addHeader("X-Force-Increment", "yes"); + headers.addHeader(HTTPHeaders.HDR_CONTENT_RANGE, "bytes " + Long.toString(firstByte) + "-" + + Long.toString(firstByte + data.capacity() - 1) + "/*"); + + return send(osd, file, data, headers, null, HTTPUtils.DATA_TYPE.BINARY, HTTPUtils.PUT_TOKEN); + } + + /** + * It requests to the OSD to perform a PUT of a whole object + * + * @param loc + * Location of the files. + * @param cap + * Capability of the request + * @param file + * File to use + * @param objectNumber + * Number of the object to use + * @param data + * Data to write + * @return The response of the OSD + */ + public RPCResponse put(InetSocketAddress osd, Locations loc, Capability cap, String file, + long objectNumber, ReusableBuffer data) throws IOException, JSONException { + HTTPHeaders headers = new HTTPHeaders(); + headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString()); + headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString().asString()); + headers.addHeader(HTTPHeaders.HDR_XOBJECTNUMBER, Long.toString(objectNumber)); + + return send(osd, file, data, headers, null, HTTPUtils.DATA_TYPE.BINARY, HTTPUtils.PUT_TOKEN); + } + + /** + * writes a full object onto an OSD + * + * @param loc + * Location of the files. + * @param cap + * Capability of the request + * @param file + * File to use + * @param objectNumber + * Number of the object to use + * @param data + * Data to write + * @return The response of the OSD + */ + public RPCResponse put(InetSocketAddress osd, Locations loc, Capability cap, String file, + long objectNumber, ReusableBuffer data, ClientLease lease) throws IOException, JSONException { + HTTPHeaders headers = new HTTPHeaders(); + headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString()); + headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString().asString()); + headers.addHeader(HTTPHeaders.HDR_XOBJECTNUMBER, Long.toString(objectNumber)); + headers.addHeader(HTTPHeaders.HDR_XLEASETO, Long.toString(lease.getExpires())); + + return send(osd, file, data, headers, null, HTTPUtils.DATA_TYPE.BINARY, HTTPUtils.PUT_TOKEN); + } + + /** + * It requests to the OSD to perform a DELETE of a file + * + * @param loc + * Location of the files. If null is given, only the data in the + * OSD will be deleted, otherwise, the deletion will be in every + * OSD in loc. + * @todo This specification will be changed for the new OSD + * @param cap + * Capability of the request + * @param file + * File to use + * @return The response of the OSD + */ + public RPCResponse delete(InetSocketAddress osd, Locations loc, Capability cap, String file) + throws IOException, JSONException { + HTTPHeaders headers = new HTTPHeaders(); + headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString()); + + // @todo In the new OSD, loc cannot be null. This has been changed to + // deleteReplica + if (loc != null) + headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString().asString()); + + return send(osd, file, null, headers, null, HTTPUtils.DATA_TYPE.BINARY, + HTTPUtils.DELETE_TOKEN); + } + + /** + * It requests to the OSD to perform a getFileSize of a file + * + * @param loc + * Location of the file. + * @param cap + * Capability of the request + * @param file + * File whose size is requested + * @param knownSize + * Current known size of the file + * @return The response of the OSD + */ + public RPCResponse globalMax(InetSocketAddress osd, Locations loc, Capability cap, String file) + throws IOException, JSONException { + + HTTPHeaders headers = new HTTPHeaders(); + headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString()); + headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString().asString()); + headers.addHeader(HTTPHeaders.HDR_XFILEID, file); + + return sendRPC(osd, RPCTokens.fetchGlobalMaxToken, null, null, headers); + } + + /** + * It requests to the OSD to perform a truncate of a file + * + * @param loc + * Location of the file. + * @param cap + * Capability of the request + * @param file + * File whose size is requested + * @param finalSize + * Size of the file after truncate + * @param exclusion + * OSD for the X-Excluded-Location or null if no OSD is excluded + * @return The response of the OSD + */ + public RPCResponse truncate(InetSocketAddress osd, Locations loc, Capability cap, String file, + long finalSize) throws JSONException, IOException { + HTTPHeaders headers = new HTTPHeaders(); + headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString()); + headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString().asString()); + + ReusableBuffer data = ReusableBuffer.wrap(JSONParser.toJSON(file, Long.valueOf(finalSize)) + .getBytes(HTTPUtils.ENC_UTF8)); + + return sendRPC(osd, RPCTokens.truncateTOKEN, data, null, headers); + } + + /** + * It requests to delete a certain replica from the specified location + * + * @param cap + * Capability of the request + * @param fileID + * The fileID of the replica to be deleted. + * @return The response of the OSD + */ + public RPCResponse deleteReplica(InetSocketAddress osd, Capability cap, String fileID) + throws IOException, JSONException { + HTTPHeaders headers = new HTTPHeaders(); + headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString()); + headers.addHeader(HTTPHeaders.HDR_XFILEID, fileID); + + return sendRPC(osd, RPCTokens.deleteLocalTOKEN, null, null, headers); + } + + public RPCResponse deleteReplica(InetSocketAddress osd, Capability cap, String fileID, + int timeout) throws IOException, JSONException { + RPCResponse r = deleteReplica(osd, cap, fileID); + r.getSpeedyRequest().timeout = timeout; + return r; + } + + /** + * It requests to delete a certain replica from the specified location + * + * @param cap + * Capability of the request + * @param file + * The fileID of the replica to be deleted. + * @param newFileSize + * Size of the file after truncate + * @return The response of the OSD + */ + public RPCResponse truncateReplica(InetSocketAddress osd, Locations loc, Capability cap, + String file, Long newFileSize) throws IOException, JSONException { + HTTPHeaders headers = new HTTPHeaders(); + headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString()); + headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString().asString()); + + ReusableBuffer data = ReusableBuffer.wrap(JSONParser.toJSON(file, newFileSize).getBytes( + HTTPUtils.ENC_UTF8)); + + return sendRPC(osd, RPCTokens.truncateLocalTOKEN, data, null, headers); + } + + public RPCResponse> getStatistics(InetSocketAddress osd) throws IOException, JSONException { + return sendRPC(osd, RPCTokens.getstatsTOKEN, null, null, new HTTPHeaders()); + } + + /** + * Checks consistency of a given object and returns the object's file size. + * + * @param osd + * the OSD holding the object + * @param loc + * the X-Locations List of the file + * @param cap + * the capability issued by the MRC + * @param file + * the file ID + * @param objectNumber + * the object number + * @return the response of the OSD, which contains the size of the object in + * bytes if no error has occurred + * + * @throws IOException + * @throws JSONException + */ + public RPCResponse checkObject(InetSocketAddress osd, Locations loc, Capability cap, + String file, long objectNumber) throws IOException, JSONException { + + HTTPHeaders headers = new HTTPHeaders(); + headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString()); + headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString().asString()); + headers.addHeader(HTTPHeaders.HDR_XFILEID, file); + headers.addHeader(HTTPHeaders.HDR_XOBJECTNUMBER, Long.toString(objectNumber)); + + return sendRPC(osd, RPCTokens.checkObjectTOKEN, null, null, headers); + } + + public RPCResponse> recordStageStats(InetSocketAddress osd, Boolean measureRqs, Boolean basicStats) throws IOException, JSONException { + + ReusableBuffer data = ReusableBuffer.wrap(JSONParser.toJSON(measureRqs,basicStats).getBytes( + HTTPUtils.ENC_UTF8)); + + return sendRPC(osd, RPCTokens.recordRqDurationTOKEN, data, null, new HTTPHeaders()); + } + + /** + * Acquires or renews a client lease. + * @param osd the osd from which the lease is requested + * @param lease the lease object (must contain a lease id for renewal) + * @return a list with a JSON-encoded client lease and a timestamp (see XtreemFS protocol for details) + * @throws java.io.IOException + * @throws org.xtreemfs.foundation.json.JSONException + */ + public RPCResponse>> acquireClientLease(InetSocketAddress osd, Locations loc, Capability cap, ClientLease lease) throws IOException, JSONException { + + HTTPHeaders headers = new HTTPHeaders(); + headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString()); + headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString().asString()); + headers.addHeader(HTTPHeaders.HDR_XFILEID, lease.getFileId()); + + List l = new ArrayList(1); + l.add(lease.encodeAsMap()); + + ReusableBuffer data = ReusableBuffer.wrap(JSONParser.writeJSON(l).getBytes( + HTTPUtils.ENC_UTF8)); + + return sendRPC(osd, RPCTokens.acquireLeaseTOKEN, data, null, headers); + } + + public RPCResponse returnLease(InetSocketAddress osd, Locations loc, Capability cap, ClientLease lease) throws IOException, JSONException { + HTTPHeaders headers = new HTTPHeaders(); + headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString()); + headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString().asString()); + headers.addHeader(HTTPHeaders.HDR_XFILEID, lease.getFileId()); + + List l = new ArrayList(1); + l.add(lease.encodeAsMap()); + + ReusableBuffer data = ReusableBuffer.wrap(JSONParser.writeJSON(l).getBytes( + HTTPUtils.ENC_UTF8)); + + return sendRPC(osd, RPCTokens.returnLeaseTOKEN, data, null, headers); + } + + /** + * TODO authenticate the user, to ensure that he has the right capabilities. + * + * @param osd + * @param authString + * @return a List of fileIDs from potential zombies. + * @throws IOException + * @throws JSONException + * @throws InterruptedException + */ + public RPCResponse>>> cleanUp(InetSocketAddress osd, String authString) throws IOException, JSONException, InterruptedException { + return sendRPC(osd, RPCTokens.cleanUpTOKEN, null, authString, null); + } + + /** + *

If a file was located by the cleanUpOperation this command + * deletes a file with the given fileID from the given OSD.

+ * + * @param osd + * @param authString + * @param fileID + * @return + * @throws IOException + * @throws JSONException + * @throws InterruptedException + */ + public RPCResponse cleanUpDelete(InetSocketAddress osd, String authString, String fileID) throws IOException, JSONException, InterruptedException { + HTTPHeaders headers = new HTTPHeaders(); + headers.addHeader(HTTPHeaders.HDR_XFILEID, fileID); + return sendRPC(osd, RPCTokens.deleteLocalTOKEN, null, authString, headers); + } +} diff --git a/servers/src/org/xtreemfs/common/clients/scrubber/AsyncScrubber.java b/servers/src/org/xtreemfs/common/clients/scrubber/AsyncScrubber.java new file mode 100644 index 0000000000000000000000000000000000000000..ce826075c366694eb4fecc4644c4b3fea6dec62b --- /dev/null +++ b/servers/src/org/xtreemfs/common/clients/scrubber/AsyncScrubber.java @@ -0,0 +1,547 @@ +package org.xtreemfs.common.clients.scrubber; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.URL; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; + +import org.xtreemfs.common.Capability; +import org.xtreemfs.common.TimeSync; +import org.xtreemfs.common.auth.NullAuthProvider; +import org.xtreemfs.common.clients.RPCClient; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.clients.dir.DIRClient; +import org.xtreemfs.common.clients.io.RandomAccessFile; +import org.xtreemfs.common.clients.mrc.MRCClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.util.OutputUtils; +import org.xtreemfs.common.uuids.ServiceUUID; +import org.xtreemfs.common.uuids.UUIDResolver; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.pinky.HTTPHeaders; +import org.xtreemfs.foundation.pinky.SSLOptions; +import org.xtreemfs.foundation.speedy.MultiSpeedy; +import org.xtreemfs.utils.CLIParser; +import org.xtreemfs.utils.DefaultDirConfig; +import org.xtreemfs.utils.CLIParser.CliOption; + +public class AsyncScrubber { + + private static final int DEFAULT_NUM_CONS = 10; + + private static final int DEFAULT_NUM_FILES = 100; + + private static final String DEFAULT_DIR_CONFIG = "/etc/xos/xtreemfs/default_dir"; + + private static String authString; + + static { + try { + authString = NullAuthProvider.createAuthString("root", MRCClient + .generateStringList("root")); + } catch (JSONException e) { + e.printStackTrace(); + } + } + + private AtomicInteger returnCode; + + private long startTime; + + private long lastStatusPrint; + + private long lastBytes; + + private int filesRead; + + private int connectionsPerOSD; + + private boolean updateFileSize; + + private HashMap osds; + + private List currentFiles; + + private VolumeWalker volumeWalker; + + private MultiSpeedy speedy; + + private MRCClient mrcClient; + + private DIRClient dirClient; + + private InetSocketAddress mrcAddress; + + private Logger logger; + + public static String latestScrubAttr = "scrubber.latestscrub"; + + private Map osdBytesMap = new HashMap(); + + private SSLOptions sslOptions; + + /** + * @param sharedSpeedy + * @param dirAddress + * the address of the directory service + * @param mrcAddress + * the address of the mrc holding the volume + * @param volumeName + * @param updateFileSize + * true if the file size should be updated. + * @throws JSONException + * thrown by createAuthString + * @throws IOException + * thrown when creating a new MRCClient + * @throws Exception + * thrown when creating a new VolumeWalker + */ + public AsyncScrubber(final MultiSpeedy sharedSpeedy, InetSocketAddress dirAddress, + InetSocketAddress mrcAddress, String volumeName, boolean updateFileSize, + int connectionsPerOSD, int noFilesToFetch, SSLOptions ssl) throws Exception { + this.connectionsPerOSD = connectionsPerOSD; + this.updateFileSize = updateFileSize; + this.speedy = sharedSpeedy; + this.mrcAddress = mrcAddress; + + returnCode = new AtomicInteger(0); + + assert(sharedSpeedy != null); + //dirClient = new DIRClient(sharedSpeedy, dirAddress); + TimeSync.initialize(dirClient, 100000, 50, authString); + + mrcClient = new MRCClient(sharedSpeedy); + //UUIDResolver.shutdown(); + //UUIDResolver.start(dirClient, 1000, 1000); + + volumeWalker = new VolumeWalker(volumeName, mrcAddress, noFilesToFetch, authString, ssl); + + currentFiles = Collections.synchronizedList(new ArrayList()); + osds = new HashMap(); + logger = new Logger(null); + + sslOptions = ssl; + } + + public void shutdown() { + speedy.shutdown(); + //dirClient.shutdown(); + mrcClient.shutdown(); + volumeWalker.shutdown(); + for (OSDWorkQueue que : osds.values()) + que.shutDown(); + + //UUIDResolver.shutdown(); + //TimeSync.getInstance().shutdown(); + } + + public void waitForShutdown() { + mrcClient.shutdown(); + } + + /** + * Called by Main thread. Starts the scrubbing. Adds Files to the osd work + * queues until all files in the volume has been scrubbed. + * + * @throws Exception + */ + public void start() throws Exception { + startTime = System.currentTimeMillis(); + + if (volumeWalker.hasNext()) { + fillOSDs(); + } + while (currentFiles.size() > 0 || volumeWalker.hasNext()) { + fillOSDs(); + } + logger.closeFileWriter(); + System.out.println("Done. Total time: " + (System.currentTimeMillis() - startTime) / 1000 + + " secs."); + } + + /** + * Called by Main thread. Prints the total number of files/bytes read and + * the speed in KB/s. For each osd: prints the average connection speed in + * KB/s and the number of idle connections. + */ + private void printStatus() { + long currentStatusPrint = System.currentTimeMillis(); + try { + long bytes = 0; + String msg = ""; + String osdDetails = "OSDs: "; + + for (OSDWorkQueue osd : osds.values()) { + + long osdBytes = osd.getTransferredBytes(); + + Long lastOSDBytes = osdBytesMap.get(osd); + if (lastOSDBytes == null) + lastOSDBytes = 0L; + + osdDetails += osd.getOSDId() + + ": " + + OutputUtils.formatBytes((osdBytes - lastOSDBytes) * 1000 + / (currentStatusPrint - lastStatusPrint)) + "/s, " + + osd.getNumberOfIdleConnections() + " idle; "; + bytes += osdBytes; + osdBytesMap.put(osd, osdBytes); + } + + msg += "#files scrubbed: " + + filesRead + + " (" + + OutputUtils.formatBytes(bytes) + + "), avrg. throughput: " + + OutputUtils.formatBytes((bytes - lastBytes) * 1000 + / (currentStatusPrint - lastStatusPrint)) + "/s, "; + + System.out.println(msg + osdDetails + "\u001b[100D\u001b[A"); + + lastStatusPrint = currentStatusPrint; + lastBytes = bytes; + + } catch (Exception e) { + e.printStackTrace(); + } + } + + /** + * Called by Main thread. Retrieves the next path from the VolumeWalker and + * creates a RandomAccessFile of the file specified by the path. If no work + * queue exists for an osd on which the file is stored a new queue is + * created. The file is added to the list currentFiles. + * + * @throws Exception + * thrown by hasNext + */ + void addNextFileToCurrentFiles() throws Exception { + if (volumeWalker.hasNext()) { + String path = volumeWalker.removeNextFile(); + try { + RandomAccessFile file = new RandomAccessFile("r", mrcAddress, path, speedy, + authString); + for (ServiceUUID osdId : file.getOSDs()) { + // add new OSD to the scrubbing process + if (!osds.containsKey(osdId)) { + System.out.println("Adding OSD: " + osdId); + osds.put(osdId, new OSDWorkQueue(osdId, connectionsPerOSD,sslOptions)); + } + } + currentFiles.add(new ScrubbedFile(this, file)); + } catch (Exception e) { + e.printStackTrace(); + } + } + } + + /** + * Called by Main thread. Fills the osd work queues. If currentFiles does + * not contain enough request for an osd new files are added to + * currentFiles. + * + * @throws Exception + * thrown by addNextFileToCurrentFiles + */ + void fillOSDs() throws Exception { + if (System.currentTimeMillis() - lastStatusPrint > 1000) + printStatus(); + if (osds.isEmpty()) { + addNextFileToCurrentFiles(); + } + try { + for (OSDWorkQueue osd : osds.values()) { + fillQueue(osd); + if (osd.getNumberOfIdleConnections() > 0) { + for (int i = 0; i < 10; i++) { + if (volumeWalker.hasNext()) { + addNextFileToCurrentFiles(); + } else + break; + } + fillQueue(osd); + } + } + } catch (Exception e) { + e.printStackTrace(); + } + } + + /** + * Called by Main thread. Fills the osd work queue specified by the + * parameter osd. + * + * @param osd + */ + void fillQueue(OSDWorkQueue osd) { + synchronized (currentFiles) { + for (ScrubbedFile file : currentFiles) { + while (true) { // get all possible reads for this osd + int objectNo = file.getRequestForOSD(osd); + if (objectNo == -1) {// no objects for this file + break; + } + if (!osd.readObjectAsync(file, objectNo)) + return; // OSD has currently no idle connections, + // proceed with next OSD + } + } + } + } + + /** + * @TODO setXattr last scrubber check setXattr(file, + * "xtreemfs-scrubber-lastcheck", now()) Called by MultiSpeedy or Main + * thread. Invoked when all objects of file has been successfully + * read. If result differs from the expected file size the + * inconsistency is logged and if updateFileSize is set to true, the + * filesize is updated. The file is removed from the list + * currentFiles. + * @param file + * @param result + * the number of bytes that has been read (the file size). + */ + void fileFinished(ScrubbedFile file, long result, boolean isUnreadable) { + // fileFinished can be called multiple times for a file when there are + // outstanding requests. Cannot use remove(file) here, since it could + // result in scrubber.shutdown() (when currentFiles.isEmpty) being + // called before all updates are finished. + boolean firstCall = currentFiles.contains(file); + + if (!firstCall) // do not output messages twice + return; + + filesRead++; + + if (isUnreadable) { + returnCode.set(2); + logger.logError(file.getPath() + ": could not read from OSD, skipping file."); + } else if (!(result == file.getExpectedFileSize())) { + returnCode.compareAndSet(0, 1); + if (updateFileSize == true) { + try { + updateFileSize(file.getPath(), result); + logger.logError(file.getPath() + + ": file size in MRC is outdated, updated from " + + file.getExpectedFileSize() + " to " + result); + } catch (Exception e) { + e.printStackTrace(); + logger.logError(file.getPath() + ": Exception " + + "thrown while attempting to update file size"); + } + } else { + logger.logError(file.getPath() + " file size in MRC is outdated, was: " + + file.getExpectedFileSize() + ", found: " + result); + } + } + + try { + setLastScrubAttr(file.getPath()); + } catch (Exception e) { + e.printStackTrace(); + logger.logError(file.getPath() + ": Exception " + + "thrown while attempting set lastScrub attribute"); + } + // must be invoked after the updates have been made, because it sync. + // with the main thread. + currentFiles.remove(file); + } + + /** + * + * Called by MultiSpeedy or Main thread. Invoked when an object of file had + * invalid checksum. + * + * @param file + */ + + void foundInvalidChecksum(ScrubbedFile file, int objectNo) { + returnCode.set(2); + logger.logError(file.getPath() + ": object no. " + objectNo + " has invalid checksum."); + } + + /** + * Called by Multispeedy or Main thread. Updates the file size of the file + * specified by path to newFileSize + * + * @param path + * @param newFileSize + * @throws Exception + */ + public void updateFileSize(String path, long newFileSize) throws Exception { + Map open = mrcClient.open(mrcAddress, path, "t", authString); + String xcap = open.get(HTTPHeaders.HDR_XCAPABILITY); + Capability capability = new Capability(xcap); + String newFileSizeHeader = "[" + newFileSize + "," + capability.getEpochNo() + "]"; + mrcClient.updateFileSize(mrcAddress, xcap, newFileSizeHeader, authString); + } + + void setLastScrubAttr(String path) throws Exception { + Map newXAttr = new HashMap(); + long time = System.currentTimeMillis(); + newXAttr.put(latestScrubAttr, String.valueOf(time)); + mrcClient.setXAttrs(mrcAddress, path, newXAttr, authString); + volumeWalker.fileOrDirScrubbed(path, time); + } + + public void enableLogfile(String filename) { + logger = new Logger(filename); + } + + public int getReturnCode() { + return returnCode.get(); + } + + public static void main(String[] args) throws Exception { + + Logging.start(Logging.LEVEL_WARN); + + Map options = new HashMap(); + List arguments = new ArrayList(1); + options.put("h", new CliOption(CliOption.OPTIONTYPE.SWITCH)); + options.put("dir", new CliOption(CliOption.OPTIONTYPE.URL)); + options.put("chk", new CliOption(CliOption.OPTIONTYPE.SWITCH)); + options.put("cons", new CliOption(CliOption.OPTIONTYPE.NUMBER)); + options.put("files", new CliOption(CliOption.OPTIONTYPE.NUMBER)); + options.put("c", new CliOption(CliOption.OPTIONTYPE.STRING)); + options.put("cp", new CliOption(CliOption.OPTIONTYPE.STRING)); + options.put("t", new CliOption(CliOption.OPTIONTYPE.STRING)); + options.put("tp", new CliOption(CliOption.OPTIONTYPE.STRING)); + options.put("h", new CliOption(CliOption.OPTIONTYPE.SWITCH)); + + CLIParser.parseCLI(args, options, arguments); + + if (arguments.size() != 1 || options.get("h").switchValue != null) { + usage(); + return; + } + + InetSocketAddress dirAddr = null; + boolean useSSL = false; + String serviceCredsFile = null; + String serviceCredsPass = null; + String trustedCAsFile = null; + String trustedCAsPass = null; + + URL dirURL = options.get("dir").urlValue; + + // parse security info if protocol is 'https' + if (dirURL != null && "https".equals(dirURL.getProtocol())) { + useSSL = true; + serviceCredsFile = options.get("c").stringValue; + serviceCredsPass = options.get("cp").stringValue; + trustedCAsFile = options.get("t").stringValue; + trustedCAsPass = options.get("tp").stringValue; + } + + // read default settings + if (dirURL == null) { + + DefaultDirConfig cfg = new DefaultDirConfig(DEFAULT_DIR_CONFIG); + cfg.read(); + + dirAddr = cfg.getDirectoryService(); + useSSL = cfg.isSslEnabled(); + serviceCredsFile = cfg.getServiceCredsFile(); + serviceCredsPass = cfg.getServiceCredsPassphrase(); + trustedCAsFile = cfg.getTrustedCertsFile(); + trustedCAsPass = cfg.getTrustedCertsPassphrase(); + } else + dirAddr = new InetSocketAddress(dirURL.getHost(), dirURL.getPort()); + + boolean checkOnly = options.get("chk").switchValue != null; + + int noConnectionsPerOSD = DEFAULT_NUM_CONS; + if (options.get("cons").numValue != null) + noConnectionsPerOSD = options.get("cons").numValue.intValue(); + + int noFilesToFetch = DEFAULT_NUM_FILES; + if (options.get("files").numValue != null) + noFilesToFetch = options.get("files").numValue.intValue(); + + String volume = arguments.get(0); + boolean isVolUUID = false; + if (volume.startsWith("uuid:")) { + volume = volume.substring("uuid:".length()); + isVolUUID = true; + } + + SSLOptions sslOptions = useSSL ? new SSLOptions(serviceCredsFile, serviceCredsPass, + SSLOptions.PKCS12_CONTAINER, + trustedCAsFile, trustedCAsPass, SSLOptions.JKS_CONTAINER, false) : null; + + // resolve volume MRC + Map query = RPCClient.generateMap(isVolUUID ? "uuid" : "name", volume); + DIRClient dirClient = new DIRClient(dirAddr, sslOptions, RPCClient.DEFAULT_TIMEOUT); + TimeSync.initialize(dirClient, 100000, 50, authString); + + RPCResponse>> resp = dirClient.getEntities(query, RPCClient + .generateStringList("mrc", "name"), authString); + Map> result = resp.get(); + resp.freeBuffers(); + + + if (result.isEmpty()) { + System.err.println("volume '" + arguments.get(0) + + "' could not be found at Directory Service '" + dirURL + "'"); + System.exit(3); + } + Map volMap = result.values().iterator().next(); + String mrc = (String) volMap.get("mrc"); + volume = (String) volMap.get("name"); + + UUIDResolver.start(dirClient, 60*60, 10*60*60); + + ServiceUUID mrcUUID = new ServiceUUID(mrc); + InetSocketAddress mrcAddress = mrcUUID.getAddress(); + + try { + + MultiSpeedy speedy = new MultiSpeedy(sslOptions); + speedy.start(); + AsyncScrubber scrubber = new AsyncScrubber(speedy, dirAddr, mrcAddress, volume, + !checkOnly, noConnectionsPerOSD, noFilesToFetch,sslOptions); + + scrubber.start(); + scrubber.shutdown(); + System.exit(scrubber.getReturnCode()); + } catch (Exception e) { + e.printStackTrace(); + } + + TimeSync.close(); + UUIDResolver.shutdown(); + dirClient.shutdown(); + + } + + private static void usage() { + System.out.println("usage: xtfs_scrub [options] | uuid:"); + System.out.println(" -dir uri directory service to use (e.g. 'http://localhost:32638')"); + System.out + .println(" If no URI is specified, URI and security settings are taken from '" + + DEFAULT_DIR_CONFIG + "'"); + System.out + .println(" In case of a secured URI ('https://...'), it is necessary to also specify SSL credentials:"); + System.out + .println(" -c a PKCS#12 file containing user credentials"); + System.out + .println(" -cp a pass phrase to decrypt the the user credentials file"); + System.out + .println(" -t a PKCS#12 file containing a set of certificates from trusted CAs"); + System.out + .println(" -tp a pass phrase to decrypt the trusted CAs file"); + System.out + .println(" -chk check only (do not update file sizes on the MRC in case of inconsistencies)"); + System.out.println(" -cons n number of connections per OSD (default=" + DEFAULT_NUM_CONS + + ")"); + System.out.println(" -files n number of files to fetch at once from MRC (default=" + + DEFAULT_NUM_FILES + ")"); + System.out.println(" -h show usage info"); + } +} diff --git a/servers/src/org/xtreemfs/common/clients/scrubber/FileState.java b/servers/src/org/xtreemfs/common/clients/scrubber/FileState.java new file mode 100644 index 0000000000000000000000000000000000000000..99402b2dda0d5e8145ae6d0806ca79c5929bebd2 --- /dev/null +++ b/servers/src/org/xtreemfs/common/clients/scrubber/FileState.java @@ -0,0 +1,117 @@ +package org.xtreemfs.common.clients.scrubber; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + + + +public class FileState{ + + private enum ObjectState {TODO, READING, DONE;} + + private List objectStates; + private int NoOfObjectsEstimate = Integer.MAX_VALUE; + private long stripeSize; + private long fileSize = -1; + private boolean fileDone = false; + + /** + * + * @param stripeSize + * @param size - the file size stored in the meta data + */ + public FileState(long stripeSize, int size) { + this.stripeSize = stripeSize; + objectStates = Collections.synchronizedList( + new ArrayList(size)); + for(int i = 0; i < size; i++) + objectStates.add(ObjectState.TODO); + } + + public boolean isFileDone() { return fileDone; } + + /** + * + * @return returns the file size read if EOF has been read, otherwise -1 is returned. + */ + public long getFileSize() { + if(fileDone && fileSize == -1) + fileSize = NoOfObjectsEstimate * stripeSize; + return fileSize; + } + + /** + * Called by Multispeedy or Main thread. + * Changes the state of the object specified by the parameter objectNo to + * DONE. If the file is not marked as unreadable and EOF has been read and + * all objects are DONE, the file is marked as done. + */ + public void incorporateReadResult(int objectNo, long bytesRead) { + assert objectStates.get(objectNo).equals(ObjectState.READING); + objectStates.set(objectNo, ObjectState.DONE); + if(bytesRead > 0) { // some data read + assert NoOfObjectsEstimate >= objectNo; + if(bytesRead != stripeSize) { + NoOfObjectsEstimate = objectNo; + fileSize = objectNo * stripeSize + bytesRead; + } + } + else { // read of object after after EOF + NoOfObjectsEstimate = Math.min(NoOfObjectsEstimate,objectNo); + } + + // check if file is finished and update flag + if(NoOfObjectsEstimate != Integer.MAX_VALUE){ + fileDone = true; + for(int i = 0; i <= NoOfObjectsEstimate; i++){ + if(!objectStates.get(i).equals(ObjectState.DONE)){ + fileDone = false; + break; + } + } + } + // if the object is the last object and the file is not + // done, the file was longer as expected, and another object + // is added to the object states. + else if((objectStates.size()-1 == objectNo)){ + addObject(); + } + } + + /** + * Called by Multispeedy or Main thread. + * Sets the object state to READING + * @param objectNo + */ + public void markObjectAsInFlight(int objectNo){ + objectStates.set(objectNo, ObjectState.READING); + } + + /** + * + * @param objectNo + * @return returns true if the object state of the object specified by + * objectNo is TODO, returns false otherwise. + */ + public boolean isTodo(int objectNo) { + return objectStates.get(objectNo).equals(ObjectState.TODO); + } + + + private void addObject() { + objectStates.add(ObjectState.TODO); + } + + public void setObjectState(int objectNo, ObjectState state){ + objectStates.set(objectNo, state); + } + + public List getObjectStates(){ + return objectStates; + } + + public int getNoOfObjectStates(){ + return objectStates.size(); + } +} diff --git a/servers/src/org/xtreemfs/common/clients/scrubber/Logger.java b/servers/src/org/xtreemfs/common/clients/scrubber/Logger.java new file mode 100644 index 0000000000000000000000000000000000000000..a0512b4a86729030fd70b980b9b1e7d2b9bbe0f2 --- /dev/null +++ b/servers/src/org/xtreemfs/common/clients/scrubber/Logger.java @@ -0,0 +1,50 @@ +package org.xtreemfs.common.clients.scrubber; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.FileWriter; +import java.io.IOException; +import java.io.PrintStream; +import java.util.Date; + +public class Logger extends PrintStream { + + FileWriter writer; + public Logger(String logFileName) { + super(System.err); + + if(logFileName != null) { + File logFile = new File(logFileName); + Date date = new Date(); + try { + writer = new FileWriter(logFile); + writer.write("Date: " + date.toString() + "\n"); + } catch (IOException e) { + System.err.println("Could not create log file."); + e.printStackTrace(); + } + } + } + + public void logError(String message) { + super.println(message); + + try { + if(writer != null) + writer.write(message + "\n"); + } catch (IOException e) { + System.err.println("Could not write to log!"); + e.printStackTrace(); + } + } + + public void closeFileWriter(){ + try { + if(writer != null) + writer.close(); + } catch (IOException e) { + e.printStackTrace(); + } + } +} diff --git a/servers/src/org/xtreemfs/common/clients/scrubber/OSDWorkQueue.java b/servers/src/org/xtreemfs/common/clients/scrubber/OSDWorkQueue.java new file mode 100644 index 0000000000000000000000000000000000000000..5eaca3af3d669bdffb0ad4061caf92699a46c22a --- /dev/null +++ b/servers/src/org/xtreemfs/common/clients/scrubber/OSDWorkQueue.java @@ -0,0 +1,148 @@ +package org.xtreemfs.common.clients.scrubber; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; + +import org.xtreemfs.common.TimeSync; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.clients.RPCResponseListener; +import org.xtreemfs.common.clients.osd.OSDClient; +import org.xtreemfs.common.uuids.ServiceUUID; +import org.xtreemfs.foundation.pinky.HTTPHeaders; +import org.xtreemfs.foundation.pinky.SSLOptions; +import org.xtreemfs.foundation.speedy.MultiSpeedy; + +/** + * The work queue for an OSD. + * + * Asynchronously sends off checkObject requests with one of its MultiSpeedies. + * Dispatches "finished" callback to respective file. + */ +public class OSDWorkQueue implements RPCResponseListener { + + private AtomicLong transferredBytes = new AtomicLong(); + + private final ServiceUUID id; + + private MultiSpeedy[] connections; + + private OSDClient[] clients; + + private boolean[] isIdle; + + private AtomicInteger noOfIdleConnections; + + private int noOfConnections; + + public OSDWorkQueue(ServiceUUID id, int noConnections, SSLOptions ssl) throws IOException { + this.id = id; + connections = new MultiSpeedy[noConnections]; + clients = new OSDClient[noConnections]; + isIdle = new boolean[noConnections]; + noOfIdleConnections = new AtomicInteger(0); + this.noOfConnections = noConnections; + + for (int i = 0; i < noConnections; i++) { + MultiSpeedy speedy = null; + if (ssl == null) + speedy = new MultiSpeedy(); + else + speedy = new MultiSpeedy(ssl); + speedy.start(); + connections[i] = speedy; + clients[i] = new OSDClient(speedy); + isIdle[i] = true; + } + noOfIdleConnections.set(noConnections); + } + + ServiceUUID getOSDId() { + return id; + } + + int getNumberOfIdleConnections() { + return noOfIdleConnections.get(); + } + + int getTotalNumberOfConnections() { + return noOfConnections; + } + + /** + * @return the total amount of bytes transferred by the OSD + */ + public long getTransferredBytes() { + return transferredBytes.get(); + } + + /** + * Called by the Main thread Reads an object given by the parameters file + * and objectNo asynchronously, if the file not marked as unreadable and an + * idle connection exists. + * + * @return returns false if there is no idle connection. + */ + public boolean readObjectAsync(ScrubbedFile file, int objectNo) { + for (int i = 0; i < clients.length; i++) { + if (isIdle[i]) { + // submitrequest + isIdle[i] = false; + noOfIdleConnections.decrementAndGet(); + file.markObjectAsInFlight(objectNo); + file.readObjectAsync(clients[i], this, new ReadObjectContext(i, file, objectNo, + TimeSync.getLocalSystemTime()), objectNo); + return true; + } + } + return false; + } + + /** + * Called by MultiSpeedy or Main thread. + * + */ + public void responseAvailable(RPCResponse response) { + ReadObjectContext context = (ReadObjectContext) response.getAttachment(); + + // unsynchronized access to shared variable! + // ok here as only connection might be unused for another round + isIdle[context.connectionNo] = true; + noOfIdleConnections.incrementAndGet(); // atomic! + try { + if (response.getStatusCode() == 200) {// no error occurred + ReusableBuffer data = response.getBody(); + if (data != null) {// read was successful + data.flip(); + String tmp = new String(data.array()); + long bytesInObject = Long.valueOf(tmp); + + transferredBytes.addAndGet(bytesInObject); + context.file.objectHasBeenRead(bytesInObject, context.objectNo); + } else + context.file.objectHasBeenRead(0, context.objectNo); + + String header = response.getHeaders().getHeader(HTTPHeaders.HDR_XINVALIDCHECKSUM); + if (header != null && header.equalsIgnoreCase("true")) + context.file.objectHasInvalidChecksum(context.objectNo); + // throw new IOException("object " + context.objectNo + + // " has an invalid checksum"); + // TODO: dont throw, but call method as in objectHasNotBeenRead + } else { + context.file.couldNotReadObject(context.objectNo); + } + + } catch (Exception e) { + context.file.couldNotReadObject(context.objectNo); + } finally { + response.freeBuffers(); + } + } + + void shutDown() { + for (int i = 0; i < connections.length; i++) { + connections[i].shutdown(); + } + } +} \ No newline at end of file diff --git a/servers/src/org/xtreemfs/common/clients/scrubber/ReadObjectContext.java b/servers/src/org/xtreemfs/common/clients/scrubber/ReadObjectContext.java new file mode 100644 index 0000000000000000000000000000000000000000..9a4aaf4564af8cde1060c7212e7809f727a0db94 --- /dev/null +++ b/servers/src/org/xtreemfs/common/clients/scrubber/ReadObjectContext.java @@ -0,0 +1,20 @@ +package org.xtreemfs.common.clients.scrubber; + +/** + * Holds information to identify an object and the connection used for + * the read request after receiving a response from the osd. + */ +public class ReadObjectContext { + public long readStart; + public int connectionNo; + public int objectNo; + public ScrubbedFile file; + + ReadObjectContext(int connectionNo, ScrubbedFile file, int objectNo, + long readStart) { + this.readStart = readStart; + this.connectionNo = connectionNo; + this.file = file; + this.objectNo = objectNo; + } +} \ No newline at end of file diff --git a/servers/src/org/xtreemfs/common/clients/scrubber/ScrubbedFile.java b/servers/src/org/xtreemfs/common/clients/scrubber/ScrubbedFile.java new file mode 100644 index 0000000000000000000000000000000000000000..2e2044ae8ce7145c9047cad821a4ac984a3c9f75 --- /dev/null +++ b/servers/src/org/xtreemfs/common/clients/scrubber/ScrubbedFile.java @@ -0,0 +1,125 @@ +package org.xtreemfs.common.clients.scrubber; + +import java.net.InetSocketAddress; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.clients.RPCResponseListener; +import org.xtreemfs.common.clients.io.RandomAccessFile; +import org.xtreemfs.common.clients.osd.OSDClient; +import org.xtreemfs.common.uuids.ServiceUUID; + +/** + * + * + */ +public class ScrubbedFile { + private AsyncScrubber scrubber; + private RandomAccessFile file; + private long expectedFileSize; + private FileState fileState; + private boolean stopIssuingRequests = false; + + public ScrubbedFile(AsyncScrubber scrubber, RandomAccessFile file) + throws Exception { + this.scrubber = scrubber; + this.file = file; + this.expectedFileSize = file.length(); + int expectedNoOfObject = + (int) (expectedFileSize / file.getStripeSize()) + 1; + fileState = new FileState(file.getStripeSize(), expectedNoOfObject); + } + + /** + * Called by Main thread. + * @return returns the next object stored on the osd specified by the + * parameter osd which has not been read or -1 if the file is marked as + * unreadable or has no unread objects stored on the osd. + */ + public int getRequestForOSD(OSDWorkQueue osd) { + // check if osd is in StripingPolicy list of osds for this file + if(stopIssuingRequests || !file.getOSDs().contains(osd.getOSDId())) + return -1; + + // find next object which has not been read for the osd + for(int i = 0; i < fileState.getNoOfObjectStates(); i++){ + if (fileState.isTodo(i) && file.getOSDId(i).equals(osd.getOSDId())){ + return i; + } + } + return -1; + } + /** + * Called by Multispeedy or Main thread. + * Sets the object state to READING + */ + public void markObjectAsInFlight(int objectNo){ + fileState.markObjectAsInFlight(objectNo); + } + + /** + * Called by Multispeedy or Main thread. + * Is only invoked after successfully reading the object + * @param bytesInObject the number of bytes read + * @param objectNo the object which has been read + */ + public void objectHasBeenRead(long bytesInObject, int objectNo) { + fileState.incorporateReadResult(objectNo, bytesInObject); + if(fileState.isFileDone()) + scrubber.fileFinished(this, fileState.getFileSize(),false); + } + /** + * Called by Multispeedy or Main thread. + * Marks the file as unreadable and removes the file from the scrubbers + * currentFiles list. + * @param objectNo + */ + public void couldNotReadObject(int objectNo) { + stopIssuingRequests = true; + scrubber.fileFinished(this, fileState.getFileSize(),true); + } + + /*** + * + * @param objectNo + */ + + public void objectHasInvalidChecksum(int objectNo) { + scrubber.foundInvalidChecksum(this,objectNo); + } + + + /** + * @TODO logging... ist es notwendig hier... wird es nicht in responseAvailable gemacht? + * Called by Main thread. + * Sends an checkObject request to the osd holding the object specified by objectNo. + * @param osdClient + * @param listener + * @param context + * @param objectNo + */ + public RPCResponse readObjectAsync(OSDClient osdClient, + RPCResponseListener listener, + ReadObjectContext context, + int objectNo) { + RPCResponse response = null; + try { + ServiceUUID osd = file.getOSDId(objectNo); + InetSocketAddress current_osd_address = osd.getAddress(); + response = osdClient.checkObject(current_osd_address, file.getLocations(), + file.getCapability(), file.getFileId(), objectNo); + response.setAttachment(context); + response.setResponseListener(listener); + }catch(Exception e){ + e.printStackTrace(); + // log "Exception thrown while attempting to read object no. ... of file ... " + } + return response; + } + + public String getPath(){ + return file.getPath(); + } + + public long getExpectedFileSize(){ + return expectedFileSize; + } +} \ No newline at end of file diff --git a/servers/src/org/xtreemfs/common/clients/scrubber/VolumeWalker.java b/servers/src/org/xtreemfs/common/clients/scrubber/VolumeWalker.java new file mode 100644 index 0000000000000000000000000000000000000000..845447ba16dfdd33143d00b5d1d8c756d97d8145 --- /dev/null +++ b/servers/src/org/xtreemfs/common/clients/scrubber/VolumeWalker.java @@ -0,0 +1,125 @@ +/** + * + */ +package org.xtreemfs.common.clients.scrubber; + +import java.net.InetSocketAddress; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.Map; + +import org.xtreemfs.common.clients.mrc.MRCClient; +import org.xtreemfs.foundation.pinky.SSLOptions; + +public class VolumeWalker { + + private InetSocketAddress mrcAddress; + private MRCClient mrcClient; + private String authString; + private LinkedList files; + private LinkedList dirs; + public HashMap elementsInDir; + private int noFilesToFetch; + + public VolumeWalker(String volumeName, InetSocketAddress mrcAddress, + int noFilesToFetch, String authString, SSLOptions ssl) throws Exception{ + this.mrcAddress = mrcAddress; + if (ssl != null) { + mrcClient = new MRCClient(MRCClient.DEFAULT_TIMEOUT, ssl); + } else { + mrcClient = new MRCClient(); + } + this.noFilesToFetch = noFilesToFetch; + this.authString = authString; + dirs = new LinkedList(); + dirs.add(volumeName); + files = new LinkedList(); + elementsInDir = new HashMap(); + } +/** + * Adds files and directories from the volume to the lists files and dirs. + * The directories are traversed using depth first search. + * @throws Exception thrown by readDirAndStat + */ + private void getMoreFiles() throws Exception { + while(!dirs.isEmpty() && files.size() < noFilesToFetch){ + String dir = dirs.removeFirst(); + Map> dirsAndFiles = + mrcClient.readDirAndStat(mrcAddress, dir, authString); + if(dirsAndFiles.isEmpty()){ + long latestScrub = System.currentTimeMillis(); + setLatestScrubOfDir(dir, latestScrub); + fileOrDirScrubbed(dir, latestScrub); + } + else + elementsInDir.put(dir, dirsAndFiles.size()); + for(String path : dirsAndFiles.keySet()){ + String type = dirsAndFiles.get(path).get("objType").toString(); + //if file + if(type.equals("1")){ + files.add(dir + "/" + path); + } + //if directory + if(type.equals("2")){ + dirs.add(dir + "/" + path); + } + } + } + } + + /** + * + * @return + * @throws Exception thrown by getMoreFiles. + */ + public boolean hasNext() throws Exception { + + if(!files.isEmpty()) + return true; + else if(dirs.isEmpty()) + return false; + else{ + getMoreFiles(); + return hasNext(); + } + } + + public String removeNextFile(){ + return files.removeLast(); + } + + public void setLatestScrubOfDir(String path, long time) throws Exception { + Map newXAttr = new HashMap(); + newXAttr.put(AsyncScrubber.latestScrubAttr, time); + mrcClient.setXAttrs(mrcAddress, path, newXAttr, authString); + } + /** + * @ TODO: currently sets a directories xattr to the largest time of its + * entries. should use the minimum though. + */ + public void fileOrDirScrubbed(String path, long time) throws Exception { + String dir = getParentDir(path); + if(dir != null){ + int noOfUnscrubbedElements = elementsInDir.get(dir)-1; + elementsInDir.put(dir, noOfUnscrubbedElements); + if(noOfUnscrubbedElements == 0){ + setLatestScrubOfDir(dir, time); + fileOrDirScrubbed(dir, time); + } + } + + } + + public String getParentDir(String path) { + int lastIndex = path.lastIndexOf('/'); + if(lastIndex != -1) + return path.substring(0, lastIndex); + else + return null; + } + public void shutdown() { + mrcClient.shutdown(); + mrcClient.waitForShutdown(); + } + +} \ No newline at end of file diff --git a/servers/src/org/xtreemfs/common/config/Config.java b/servers/src/org/xtreemfs/common/config/Config.java new file mode 100644 index 0000000000000000000000000000000000000000..57e95b57b721f52008971a470a2b001715de8086 --- /dev/null +++ b/servers/src/org/xtreemfs/common/config/Config.java @@ -0,0 +1,118 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Christian Lorenz (ZIB), Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.common.config; + +import java.io.FileInputStream; +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import java.util.Properties; + +/** + * + * @author bjko + */ +abstract public class Config { + + protected final Properties props; + + public Config() { + props = new Properties(); + } + + public Config(Properties prop) { + this.props = new Properties(prop); + } + + /** Creates a new instance of Config */ + public Config(String filename) throws IOException { + props = new Properties(); + props.load(new FileInputStream(filename)); + } + + protected int readRequiredInt(String paramName) { + String tmp = props.getProperty(paramName); + if (tmp == null) + throw new RuntimeException("property '" + paramName + + "' is required but was not found"); + try { + return Integer.parseInt(tmp.trim()); + } catch (NumberFormatException ex) { + throw new RuntimeException("property '" + paramName + + "' is an integer but '" + tmp + "' is not a valid number"); + } + } + + protected String readRequiredString(String paramName) { + String tmp = props.getProperty(paramName); + if (tmp == null) + throw new RuntimeException("property '" + paramName + + "' is required but was not found"); + return tmp.trim(); + } + + protected InetSocketAddress readRequiredInetAddr(String hostParam, + String portParam) { + String host = readRequiredString(hostParam); + int port = readRequiredInt(portParam); + InetSocketAddress isa = new InetSocketAddress(host, port); + return isa; + } + + protected boolean readRequiredBoolean(String paramName) { + String tmp = props.getProperty(paramName); + if (tmp == null) + throw new RuntimeException("property '" + paramName + + "' is required but was not found"); + return Boolean.parseBoolean(tmp.trim()); + } + + protected boolean readOptionalBoolean(String paramName, boolean defaultValue) { + String tmp = props.getProperty(paramName); + if (tmp == null) + return defaultValue; + else + return Boolean.parseBoolean(tmp.trim()); + } + + protected InetAddress readOptionalInetAddr(String paramName, + InetAddress defaultValue) throws UnknownHostException { + String tmp = props.getProperty(paramName); + if (tmp == null) + return defaultValue; + else + return InetAddress.getByName(tmp); + } + + protected String readOptionalString(String paramName, String defaultValue) { + return props.getProperty(paramName, defaultValue); + } + + public Properties getProps() { + return props; + } + +} diff --git a/servers/src/org/xtreemfs/common/config/ServiceConfig.java b/servers/src/org/xtreemfs/common/config/ServiceConfig.java new file mode 100644 index 0000000000000000000000000000000000000000..1828ef4b28e4e20af05a3592a61c68fa0eb9a89e --- /dev/null +++ b/servers/src/org/xtreemfs/common/config/ServiceConfig.java @@ -0,0 +1,143 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Christian Lorenz (ZIB), Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.common.config; + +import java.io.IOException; +import java.net.InetAddress; +import java.util.Properties; + +public class ServiceConfig extends Config { + + protected int debugLevel; + + protected int port; + + protected InetAddress address; + + protected boolean useSSL; + + protected String serviceCredsFile; + + protected String serviceCredsPassphrase; + + protected String serviceCredsContainer; + + protected String trustedCertsFile; + + protected String trustedCertsPassphrase; + + protected String trustedCertsContainer; + + private String geoCoordinates; + + public ServiceConfig() { + super(); + } + + public ServiceConfig(Properties prop) { + super(prop); + } + + public ServiceConfig(String filename) throws IOException { + super(filename); + } + + public void read() throws IOException { + + this.debugLevel = this.readRequiredInt("debug_level"); + + this.port = this.readRequiredInt("listen.port"); + + this.address = this.readOptionalInetAddr("listen.address", null); + + if(this.useSSL = this.readRequiredBoolean("ssl.enabled")){ + this.serviceCredsFile = this.readRequiredString("ssl.service_creds"); + + this.serviceCredsPassphrase = this.readRequiredString("ssl.service_creds.pw"); + + this.serviceCredsContainer = this.readRequiredString("ssl.service_creds.container"); + + this.trustedCertsFile = this.readRequiredString("ssl.trusted_certs"); + + this.trustedCertsPassphrase = this.readRequiredString("ssl.trusted_certs.pw"); + + this.trustedCertsContainer = this.readRequiredString("ssl.trusted_certs.container"); + } + + this.geoCoordinates = this.readOptionalString("geographic_coordinates", ""); + + + + } + + public int getDebugLevel() { + return this.debugLevel; + } + + public int getPort() { + return this.port; + } + + public InetAddress getAddress() { + return this.address; + } + + public boolean isUsingSSL() { + return this.useSSL; + } + + public String getServiceCredsContainer() { + return this.serviceCredsContainer; + } + + public String getServiceCredsFile() { + return this.serviceCredsFile; + } + + public String getServiceCredsPassphrase() { + return this.serviceCredsPassphrase; + } + + public String getTrustedCertsContainer() { + return this.trustedCertsContainer; + } + + public String getTrustedCertsFile() { + return this.trustedCertsFile; + } + + public String getTrustedCertsPassphrase() { + return this.trustedCertsPassphrase; + } + + public String getGeoCoordinates() { + return geoCoordinates; + } + + public void setGeoCoordinates(String geoCoordinates) { + this.geoCoordinates = geoCoordinates; + } + +} diff --git a/servers/src/org/xtreemfs/common/logging/Logging.java b/servers/src/org/xtreemfs/common/logging/Logging.java new file mode 100644 index 0000000000000000000000000000000000000000..5fe78ee406070ca7b124bb0c086079f8d9f21355 --- /dev/null +++ b/servers/src/org/xtreemfs/common/logging/Logging.java @@ -0,0 +1,191 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.common.logging; + +/** + * + * @author bjko + */ +public class Logging { + + protected static final char ABBREV_LEVEL_INFO = 'I'; + + protected static final char ABBREV_LEVEL_DEBUG = 'D'; + + protected static final char ABBREV_LEVEL_WARN = 'W'; + + protected static final char ABBREV_LEVEL_ERROR = 'E'; + + protected static final char ABBREV_LEVEL_TRACE = 'T'; + + public static final int LEVEL_ERROR = 0; + + public static final int LEVEL_WARN = 1; + + public static final int LEVEL_INFO = 2; + + public static final int LEVEL_DEBUG = 3; + + public static final int LEVEL_TRACE = 10; + + public static final String FORMAT_PATTERN = "[ %c | %-20s | %-15s | %3d | %9s] %s"; + + protected static Logging instance; + + protected static boolean tracingEnabled = false; + + private final int level; + + private long startTime; + + /** + * Creates a new instance of Logging + */ + private Logging(int level) { + + if (level < 0) + this.level = 0; + else + this.level = level; + + instance = this; + + if (this.level >= LEVEL_TRACE) + tracingEnabled = true; + + startTime = System.currentTimeMillis(); + } + + public static void logMessage(int level, Object me, String msg) { + if (level <= instance.level) { + char levelName = getLevelName(level); + if (me == null) { + System.out.println(String.format(FORMAT_PATTERN, levelName, "-", Thread + .currentThread().getName(), Thread.currentThread().getId(), getTimeStamp(), + msg)); + } else { + System.out.println(String.format(FORMAT_PATTERN, levelName, me.getClass() + .getSimpleName(), Thread.currentThread().getName(), Thread.currentThread() + .getId(), getTimeStamp(), msg)); + } + } + } + + public static void logMessage(int level, Object me, Throwable msg) { + if (level <= instance.level) { + char levelName = getLevelName(level); + if (me == null) { + System.out.println(String.format(FORMAT_PATTERN, levelName, "-", Thread + .currentThread().getName(), Thread.currentThread().getId(), getTimeStamp(), + msg.toString())); + for (StackTraceElement elem : msg.getStackTrace()) { + System.out.println(" ... " + + elem.toString()); + } + if (msg.getCause() != null) { + System.out.println(String.format(FORMAT_PATTERN, levelName, "-", Thread + .currentThread().getName(), Thread.currentThread().getId(), + getTimeStamp(), "root cause: " + msg.getCause())); + for (StackTraceElement elem : msg.getCause().getStackTrace()) { + System.out.println(" ... " + + elem.toString()); + } + } + } else { + System.out.println(String.format(FORMAT_PATTERN, levelName, me.getClass() + .getSimpleName(), Thread.currentThread().getName(), Thread.currentThread() + .getId(), getTimeStamp(), msg)); + for (StackTraceElement elem : msg.getStackTrace()) { + System.out.println(" ... " + + elem.toString()); + } + if (msg.getCause() != null) { + System.out.println(String.format(FORMAT_PATTERN, levelName, me.getClass(), + Thread.currentThread().getName(), Thread.currentThread().getId(), + getTimeStamp(), "root cause: " + msg.getCause())); + for (StackTraceElement elem : msg.getCause().getStackTrace()) { + System.out.println(" ... " + + elem.toString()); + } + } + } + } + } + + public static char getLevelName(int level) { + switch (level) { + case LEVEL_ERROR: + return ABBREV_LEVEL_ERROR; + case LEVEL_INFO: + return ABBREV_LEVEL_INFO; + case LEVEL_WARN: + return ABBREV_LEVEL_WARN; + case LEVEL_DEBUG: + return ABBREV_LEVEL_DEBUG; + case LEVEL_TRACE: + return ABBREV_LEVEL_TRACE; + default: + return '?'; + } + } + + public synchronized static void start(int level) { + if (instance == null) { + instance = new Logging(level); + } + } + + /*public static void setLevel(int level) { + if (instance != null) + instance.level = level; + }*/ + + public static boolean isDebug() { + if (instance == null) + return false; + else + return instance.level >= LEVEL_DEBUG; + } + + public static boolean isInfo() { + if (instance == null) + return false; + else + return instance.level >= LEVEL_INFO; + } + + public static boolean tracingEnabled() { + return tracingEnabled; + } + + private static String getTimeStamp() { + long seconds = (System.currentTimeMillis() - instance.startTime) / 1000; + long hours = seconds / 3600; + long mins = (seconds % 3600) / 60; + long secs = seconds % 60; + return hours + ":" + (mins < 10 ? "0" : "") + mins + ":" + (secs < 10 ? "0" : "") + secs; + } + +} diff --git a/servers/src/org/xtreemfs/common/logging/Utils.java b/servers/src/org/xtreemfs/common/logging/Utils.java new file mode 100644 index 0000000000000000000000000000000000000000..84084553a9703ce5ad2104f0c59dd19c26b1c6df --- /dev/null +++ b/servers/src/org/xtreemfs/common/logging/Utils.java @@ -0,0 +1,55 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.common.logging; + +/** + * + * @author bjko + */ +public class Utils { + + public static final char LEVEL_INFO = 'I'; + public static final char LEVEL_DEBUG = 'D'; + public static final char LEVEL_WARN = 'W'; + public static final char LEVEL_ERROR = 'E'; + + + public static void logMessage(char level, Object me, String msg) { + if (me == null) { + System.out.println(String.format("[ %c | %-20s | %3d ] %s", + level,"?",Thread.currentThread().getId(), + msg)); + } else { + System.out.println(String.format("[ %c | %-20s | %3d ] %s", + level,me.getClass().getSimpleName(),Thread.currentThread().getId(), + msg)); + } + } + + /** Creates a new instance of Utils */ + public Utils() { + } + +} diff --git a/servers/src/org/xtreemfs/common/striping/Location.java b/servers/src/org/xtreemfs/common/striping/Location.java new file mode 100644 index 0000000000000000000000000000000000000000..a0627c03d99f16766645b1724bdfb65716869588 --- /dev/null +++ b/servers/src/org/xtreemfs/common/striping/Location.java @@ -0,0 +1,305 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin and + Barcelona Supercomputing Center - Centro Nacional de Supercomputacion. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Christian Lorenz (ZIB), Jan Stender (ZIB), Björn Kolbeck (ZIB), Jesús Malo (BSC) + */ + +package org.xtreemfs.common.striping; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.xtreemfs.common.uuids.ServiceUUID; +import org.xtreemfs.common.uuids.UnknownUUIDException; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.json.JSONString; + +/** + * It models the locations of one replica. Every instance of this class will be + * an object oriented representation of one replica + * + * @author clorenz + */ +public class Location { + /** + * update policies + */ + public static final String REPLICA_UPDATE_SYNC = "sync"; + public static final String REPLICA_UPDATE_ONDEMAND = "lazy"; + + /** + * used update policy for this replica + */ + private String replicaUpdatePolicy; + + /** + * striping policy which is used for this replica + */ + private StripingPolicy policy; + /** + * involved osds + */ + private List osdList; + + /** + * It creates a new instance of Location + * + * @param sp + * Striping policy of the replica + * @param osds + * OSDs containing the pieces of the same replica + */ + public Location(StripingPolicy sp, List osds) { + if ((sp != null) && (osds != null)) { + if (sp.getWidth() == osds.size()) { + policy = sp; + osdList = osds; + } else + throw new IllegalArgumentException( + "The striping policy is for " + sp.getWidth() + + " OSDs but the list of OSDs contains " + + osds.size()); + } else if (sp == null) + throw new IllegalArgumentException("The policy is null"); + else + throw new IllegalArgumentException("The osdList is null"); + } + + /** + * It creates a new instance from a list containing the object + * + * @param listedObject + * The object contained in the general way (as the JSON parser + * gives us) + */ + public Location(List listedObject) throws JSONException { + initLocation(listedObject); + } + + /** + * Creates an instance of this class from a JSON representation + * + * @param plain + * JSON representation of an object of this class + */ + public Location(JSONString plain) throws JSONException { + List parsed = (List) JSONParser.parseJSON(plain); + + if (parsed == null) + throw new IllegalArgumentException( + "The location specification is null"); + else { + initLocation(parsed); + } + } + + /** + * Convenience method that initializes the Location + */ + private void initLocation(List listedObject) throws JSONException { + if (listedObject.size() != 2) + throw new IllegalArgumentException("Incorrect list's length"); + + // It gets the striping policy + Map policyCandidate = (Map) listedObject + .get(0); + if (policyCandidate == null) + throw new IllegalArgumentException("The striping policy is null"); + + policy = StripingPolicy.readFromJSON(policyCandidate); + + // It gets the OSD list + List osdListCandidate = (List) listedObject.get(1); + + if (osdListCandidate == null) + throw new IllegalArgumentException("The list of replicas is null"); + else if (osdListCandidate.size() != policy.getWidth()) + throw new IllegalArgumentException( + "The number of replicas in the list is wrong"); + + osdList = new ArrayList(osdListCandidate.size()); + for (String osdUUID : osdListCandidate) { + osdList.add(new ServiceUUID(osdUUID)); + } + } + + /** + * Provides the responsible OSD for this object. + * + * @param objectID + * @return + */ + public ServiceUUID getOSDByObject(long objectID) { + return osdList.get(policy.getOSDByObject(objectID)); + } + + /** + * Provides the responsible OSD for this offset. + * + * @param objectID + * @return + */ + public ServiceUUID getOSDByOffset(long offset) { + return osdList.get(policy.getOSDByOffset(offset)); + } + + /** + * Provides the responsible OSD for this byte-range. Returns only a value, + * if the byte-range is saved on one OSD. + * + * @param firstByte + * @param lastByte + * @return null, if the byte-range covers multiple objects on different OSDs + */ + public ServiceUUID getOSDByByteRange(long firstByte, long lastByte) { + List objectRange = policy.getObjects(firstByte, lastByte); + if (objectRange.size() > 1) { + // throw exception, because byte range covers multiple objects + // throw new + // NoSuchElementException("byte range covers multiple objects"); + return null; + } else + return getOSDByObject(objectRange.get(0).objectNumber); + } + + /** + * It provides the list of OSDs of the location + * + * @return The list of OSDs of the object + */ + public List getOSDs() { + return osdList; + } + + /** + * Number of OSDs which contain data of this replica. + * + * @return + */ + public int getWidth() { + return this.osdList.size(); + } + + /** + * Resolves the UUID of all OSDs + * + * @throws UnknownUUIDException + */ + void resolve() throws UnknownUUIDException { + for (ServiceUUID uuid : osdList) { + uuid.resolve(); + } + } + + /** + * checks if this replica location belongs to the OSD + * + * @param uuid + * @return + */ + public boolean containsOSD(ServiceUUID uuid) { + return osdList.contains(uuid); + } + + /** + * It provides the striping policy of this object + * + * @return The striping policy of the object + */ + public StripingPolicy getStripingPolicy() { + return policy; + } + + /** + * It provides a listed representation of the object + * + * @return The representation of this object like a list suitable for JSON + */ + public List asList() { + List returnValue = new ArrayList(2); + returnValue.add(policy.asMap()); + + List osds = new ArrayList(osdList.size()); + for (ServiceUUID osd : osdList) { + osds.add(osd.toString()); + } + + returnValue.add(osds); + + return returnValue; + } + + /** + * It gives a JSON string which represents the object. + * + * @return The string representing the object + */ + public JSONString asJSONString() throws JSONException { + return new JSONString(JSONParser.writeJSON(asList())); + } + + /** + * Provides the used update policy. + */ + public String getReplicaUpdatePolicy() { + return this.replicaUpdatePolicy; + } + + /** + * @param replicaUpdatePolicy + * the replicaUpdatePolicy to set + */ + public void setReplicaUpdatePolicy(String replicaUpdatePolicy) { + assert (replicaUpdatePolicy.equals(REPLICA_UPDATE_SYNC) || replicaUpdatePolicy + .equals(REPLICA_UPDATE_ONDEMAND)); + this.replicaUpdatePolicy = replicaUpdatePolicy; + } + + public boolean equals(Object obj) { + if (this == obj) + return true; + if ((obj == null) || (obj.getClass() != this.getClass())) + return false; + + Location other = (Location) obj; + return policy.equals(other.policy) && osdList.equals(other.osdList); + } + + public int hashCode() { + return policy.hashCode() + osdList.hashCode(); + } + + @Override + public String toString() { + return osdList.toString() + " ; " + policy; + } + + /* + * old code + */ + public int indexOf(ServiceUUID osdId) { + return osdList.indexOf(osdId); + } + +} diff --git a/servers/src/org/xtreemfs/common/striping/Locations.java b/servers/src/org/xtreemfs/common/striping/Locations.java new file mode 100644 index 0000000000000000000000000000000000000000..fff6df73465855e37333d3c205a12c2a4875cae2 --- /dev/null +++ b/servers/src/org/xtreemfs/common/striping/Locations.java @@ -0,0 +1,292 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin and + Barcelona Supercomputing Center - Centro Nacional de Supercomputacion. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Christian Lorenz (ZIB), Jan Stender (ZIB), Björn Kolbeck (ZIB), Jesús Malo (BSC) + */ + +package org.xtreemfs.common.striping; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + +import org.xtreemfs.common.uuids.ServiceUUID; +import org.xtreemfs.common.uuids.UnknownUUIDException; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.json.JSONString; + +/** + * It models the list of locations of replicas + * + * @author clorenz + */ +public class Locations implements Iterable { + /** + * update policies + */ + public static final String REPLICA_UPDATE_POLICY_SYNC = "sync"; + public static final String REPLICA_UPDATE_POLICY_ONDEMAND = "lazy"; + + /** + * used update policy for all replicas + */ + private String replicaUpdatePolicy; + /** + * it defines how many replicas must be updated synchronous, if update + * policy is "sync", + */ + private int replicaUpdatePolicySyncLevel; + + /** + * version of the Locations-list + */ + private final long version; + + /** + * list of replicas + */ + private final List replicas; + + /** + * It creates an instance of Locations with an existing list. It uses the + * "ondemand"-policy as default. + * + * @param locations + * List of replica's location + */ + public Locations(List locations) { + this(locations, 1, REPLICA_UPDATE_POLICY_ONDEMAND, 0); + } + + /** + * It creates an instance of Locations with an existing list. + * + * @param locations + * List of replica's location + * @param version + * version of locations-list + * @param replicaUpdatePolicy + * policy which will be used + * @param replicaSyncLevel + * how many replicas must be updated synchronous, if update + * policy is "sync" + */ + public Locations(List locations, long version, + String replicaUpdatePolicy, int replicaSyncLevel) { + if (locations == null) + throw new IllegalArgumentException("The list of replicas is null"); + else if (locations.size() == 0) + throw new IllegalArgumentException( + "There is no replicas in the list"); + + this.replicas = locations; + this.version = version; + this.replicaUpdatePolicy = replicaUpdatePolicy; + this.replicaUpdatePolicySyncLevel = replicaSyncLevel; + } + + /** + * Creates an instance of this class from a JSON representation + * + * @param plain + * JSON representation of an object of this class + */ + public Locations(JSONString plain) throws JSONException { + List list = (List) JSONParser.parseJSON(plain); + + if (list == null) + throw new IllegalArgumentException("The list of replicas is null"); + if (list.size() < 2) + throw new IllegalArgumentException("Locations list is not valid."); + + this.version = (Long) list.get(1); + + List> xLocList = (List>) list.get(0); + this.replicas = new ArrayList(xLocList.size()); + for (int i = 0; i < xLocList.size(); i++) + this.replicas.add(new Location((List) xLocList.get(i))); + + if (list.size() >= 3) + parseRepUpdatePolicy((String) list.get(2)); + else + replicaUpdatePolicy = REPLICA_UPDATE_POLICY_ONDEMAND; + } + + /** + * parses the JSON-update-policy-string + * + * @param rp + */ + private void parseRepUpdatePolicy(String rp) { + // parse the replication policy + int sepIndex = rp.indexOf(':'); + if (sepIndex == -1) { + replicaUpdatePolicy = rp; + replicaUpdatePolicySyncLevel = replicas.size(); + } else { + // TODO: conform to the specification: don't allow "lazy:5" + replicaUpdatePolicy = rp.substring(0, sepIndex); + replicaUpdatePolicySyncLevel = Integer.parseInt(rp + .substring(sepIndex + 1)); + if (replicaUpdatePolicySyncLevel > replicas.size()) // all sync + replicaUpdatePolicySyncLevel = replicas.size(); + } + } + + /** + * It provides a list representing the object + * + * @return The listed representation of the object + */ + public List asList() { + List returnValue = new ArrayList(replicas.size()); + for (Location loc : replicas) { + returnValue.add(loc.asList()); + } + return returnValue; + } + + /** + * It provides a JSONString representing the object + * + * @return The JSONString representation of the object + */ + public JSONString asJSONString() throws JSONException { + List args = new ArrayList(3); + args.add(asList()); + args.add(version); + if (replicaUpdatePolicy.equals(REPLICA_UPDATE_POLICY_SYNC) + && replicaUpdatePolicySyncLevel != replicas.size()) + args.add(replicaUpdatePolicy + ":" + replicaUpdatePolicySyncLevel); + else + args.add(replicaUpdatePolicy); + return new JSONString(JSONParser.writeJSON(args)); + } + + /** + * It provides the location related to an OSD + * + * @param osd + * OSD to locate + * @return The replica location where the osd is taking part. + */ + public Location getLocation(ServiceUUID osd) { + for (Location loc : replicas) { + if (loc.containsOSD(osd)) + return loc; + } + return null; + } + + /** + * It provides the location of the specified index + * + * @param index + * @return + */ + public Location getLocation(int index) { + return replicas.get(index); + } + + /** + * Provides a list of OSDs which are containing replicas of the given object. + * NOTE: If the replicas use different striping policies the same object must not contain the same data. + * @param objectID + * @return + */ + public List getOSDsByObject(long objectID){ + List osds = new ArrayList(); + for(Location loc : replicas){ + osds.add(loc.getOSDByObject(objectID)); + } + return osds; + } + + /** + * Resolves the UUID of all OSDs + * + * @throws UnknownUUIDException + */ + public void resolveAll() throws UnknownUUIDException { + for (Location loc : this.replicas) { + loc.resolve(); + } + } + + /** + * Provides the number, how many replicas are used. + * + * @return + */ + public int getNumberOfReplicas() { + return replicas.size(); + } + + public boolean equals(Object obj) { + if (this == obj) + return true; + if ((obj == null) || (obj.getClass() != this.getClass())) + return false; + + Locations other = (Locations) obj; + return replicas.equals(other.replicas); + } + + public int hashCode() { + return replicas.hashCode(); + } + + /** + * Provides the version of the locations-list. + */ + public long getVersion() { + return version; + } + + /** + * Provides the used update policy. + * @return + */ + public String getReplicaUpdatePolicy() { + return replicaUpdatePolicy; + } + + /** + * Provides how many replicas must be updated synchronous, if the used update policy is "sync". + * @return + */ + public int getReplicaSyncLevel() { + return replicaUpdatePolicySyncLevel; + } + + @Override + public Iterator iterator() { + return replicas.iterator(); + } + + @Override + public String toString() { + return "version: " + version + " ; " + replicas.toString() + " ; " + + replicaUpdatePolicy + ":" + replicaUpdatePolicySyncLevel; + } +} diff --git a/servers/src/org/xtreemfs/common/striping/RAID0.java b/servers/src/org/xtreemfs/common/striping/RAID0.java new file mode 100644 index 0000000000000000000000000000000000000000..809bbdb8615310412a4fe9e666294bec9f1fc022 --- /dev/null +++ b/servers/src/org/xtreemfs/common/striping/RAID0.java @@ -0,0 +1,202 @@ +/* Copyright (c) 2008 Barcelona Supercomputing Center - Centro Nacional + de Supercomputacion and Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Christian Lorenz (ZIB), Jesús Malo (BSC), Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.common.striping; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.json.JSONString; + +/** + * RAID 0 + * + * @author clorenz + */ +public final class RAID0 extends StripingPolicy { + protected static final long KILOBYTE = 1024L; + + /** + * used as key for JSON + */ + public static final String POLICY_NAME = "RAID0"; + /** + * used as key for JSON + */ + protected static final String JSON_STRIPE_SIZE_TOKEN = "stripe-size"; + + protected final long stripeSize; + + /** + * Creates a new instance of RAID0 + * + * @param size + * Size of the stripes in kilobytes (1kB == 1024 bytes) + * @param width + * Number of OSDs where the file will be striped + */ + public RAID0(long size, long width) { + super(width); + + if (size <= 0) + throw new IllegalArgumentException("size must be > 0"); + + this.stripeSize = size; + } + + @Override + public long getStripeSize(long objID) { + return this.stripeSize * KILOBYTE; + } + + @Override + public long getRow(long objId) { + return objId / this.width; + } + + /** + * It generates an object from a given map of names and values + * + * @param translater + * Map containing a RAID0 object like a set of pairs (name, + * value) + * @return The object contained in the map + */ + public static RAID0 readFromJSON(Map translater) + throws JSONException { + String name = (String) translater.get(JSON_STRIPING_POLICY_TOKEN); + + if (name.equals(POLICY_NAME)) { + Object tmp = translater.get(JSON_STRIPE_SIZE_TOKEN); + if (tmp == null) + throw new JSONException(JSON_STRIPE_SIZE_TOKEN + + " argument is missing"); + long size = (Long) tmp; + + tmp = translater.get(JSON_WIDTH_TOKEN); + if (tmp == null) + throw new JSONException(JSON_WIDTH_TOKEN + + " argument is missing"); + long width = (Long) tmp; + + return new RAID0(size, width); + } else + throw new JSONException("[ E | RAID0 ] Bad striping policy name"); + } + + @Override + public JSONString asJSONString() throws JSONException { + return new JSONString(JSONParser.writeJSON(asMap())); + } + + @Override + public Map asMap() { + Map returnValue = new HashMap(); + returnValue.put(JSON_STRIPING_POLICY_TOKEN, POLICY_NAME); + returnValue.put(JSON_STRIPE_SIZE_TOKEN, stripeSize); + returnValue.put(JSON_WIDTH_TOKEN, getWidth()); + + return returnValue; + } + + @Override + public String toString() { + return POLICY_NAME + " with " + this.width + " width and " + + this.stripeSize + "kb stripe-size"; + } + + @Override + public long getObject(long offset) { + return (offset / this.stripeSize) / KILOBYTE; + } + + @Override + public long getFirstByte(long object) { + return object * this.stripeSize * KILOBYTE; + } + + @Override + public long getLastByte(long object) { + return getFirstByte(object + 1) - 1; + } + + @Override + public List getObjects(long firstByte, long lastByte) { + ArrayList list = new ArrayList(2); + long objectID, relativeFirstByte, relativeLastByte, osd; + + // first object + objectID = getObject(firstByte); + relativeFirstByte = firstByte - getFirstByte(objectID); + relativeLastByte = ((relativeFirstByte + (lastByte - firstByte)) < stripeSize + * KILOBYTE) ? (relativeFirstByte + (lastByte - firstByte)) + : (stripeSize * KILOBYTE - 1); + osd = getOSDByObject(objectID); + + StripeInfo start = new StripeInfo(objectID, osd, relativeFirstByte, + relativeLastByte); + list.add(start); + + if ((objectID = getObject(lastByte)) != start.objectNumber) { // multiple + // objects + // last object + relativeFirstByte = 0L; + relativeLastByte = lastByte - getFirstByte(objectID); + osd = getOSDByObject(objectID); + + StripeInfo end = new StripeInfo(objectID, osd, relativeFirstByte, + relativeLastByte); + list.add(end); + } + return list; + } + + @Override + public int getOSDByObject(long object) { + return (int) (object % this.width); + } + + @Override + public int getOSDByOffset(long offset) { + return getOSDByObject(getObject(offset)); + } + + @Override + public String getPolicyName() { + return POLICY_NAME; + } + + /* + * old code + */ + @Override + public boolean isLocalObject(long objId, long osdNo) { + return objId % getWidth() == osdNo - 1; + } +} diff --git a/servers/src/org/xtreemfs/common/striping/StripeInfo.java b/servers/src/org/xtreemfs/common/striping/StripeInfo.java new file mode 100644 index 0000000000000000000000000000000000000000..723ce9e00e75e94a100cac6dd90a6b1c67e0e6b3 --- /dev/null +++ b/servers/src/org/xtreemfs/common/striping/StripeInfo.java @@ -0,0 +1,86 @@ +/* Copyright (c) 2008 Barcelona Supercomputing Center - Centro Nacional + de Supercomputacion and Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jesús Malo (BSC), Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.common.striping; + +/** + * It encapsulates the information related to a stripe + * + * @author Jesús Malo (jmalo) + */ +public class StripeInfo { + public final Long objectNumber; // Relative object number + public final Long OSD; // Relative osd number + public final Long firstByte; // Relative first byte offset + public final Long lastByte; // Relative last byte offset + + /** + * Creates a new instance of StripeInfo + * + * @param r + * Relative object number + * @param o + * Relative OSD position (it begins at 0) + * @param f + * Relative offset of the first byte of the stripe + * @param l + * Relative offset of the last byte of the stripe + * @pre (r >= 0) && (o >= 0) && (f >= 0) && (l >= 0) + */ + public StripeInfo(long r, long o, long f, long l) { + assert ((r >= 0) && (o >= 0) && (f >= 0) && (l >= 0)) : "r = " + r + + ", o = " + o + ", f = " + f + ", l = " + l; + + objectNumber = Long.valueOf(r); + OSD = Long.valueOf(o); + firstByte = Long.valueOf(f); + lastByte = Long.valueOf(l); + } + + public boolean equals(Object obj) { + + if (this == obj) + return true; + + if ((obj == null) || (obj.getClass() != this.getClass())) + return false; + + final StripeInfo toCompare = (StripeInfo) obj; + return objectNumber.equals(toCompare.objectNumber) + && OSD.equals(toCompare.OSD) + && firstByte.equals(toCompare.firstByte) + && lastByte.equals(toCompare.lastByte); + } + + public int hashCode() { + return objectNumber.hashCode() + OSD.hashCode() + firstByte.hashCode() + + lastByte.hashCode(); + } + + @Override + public String toString() { + return "StripeInfo: object "+objectNumber+" on osd "+OSD+" with bytes from "+firstByte+" to "+lastByte; + } +} diff --git a/servers/src/org/xtreemfs/common/striping/StripingPolicy.java b/servers/src/org/xtreemfs/common/striping/StripingPolicy.java new file mode 100644 index 0000000000000000000000000000000000000000..331f785cd9c00fc196aaaa6a65ab3e08395f0af2 --- /dev/null +++ b/servers/src/org/xtreemfs/common/striping/StripingPolicy.java @@ -0,0 +1,255 @@ +/* Copyright (c) 2008 Barcelona Supercomputing Center - Centro Nacional + de Supercomputacion and Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Christian Lorenz (ZIB), Jesús Malo (BSC), Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.common.striping; + +import java.util.List; +import java.util.Map; + +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.json.JSONString; + +/** + * It models the StripingPolicy. + * + * @author clorenz + */ +public abstract class StripingPolicy { + /** + * used as key for JSON + */ + protected static final String JSON_STRIPING_POLICY_TOKEN = "policy"; + /** + * used as key for JSON + */ + protected static final String JSON_WIDTH_TOKEN = "width"; + + /** + * width (number of involved OSDs) + */ + protected final long width; + + /** + * Creates an object with the specific width. + * + * @param w + * Number of OSDs where this object will be related to + * @pre w > 0 + */ + protected StripingPolicy(long w) { + if (w <= 0) + throw new IllegalArgumentException("width must be > 0"); + + this.width = w; + } + + /** + * It provides the number of OSDs of this striping policy object + * + * @return The number of OSDs. It will always be greater than zero. + */ + public long getWidth() { + return width; + } + + /** + * Returns the name of the policy. + * + * @return + */ + public abstract String getPolicyName(); + + /** + * Returns the last objectID of the file with this filesize. + * + * @param fileSize + * filesize in bytes + * @return + */ + public long calculateLastObject(long fileSize) { + return getObject(fileSize - 1); + } + + /** + * Convenient method for getting the size of a stripe in bytes. + * + * @param objectNumber + * Number of the object to get the stripe size. + * @return The number of bytes of the stripe + */ + public long getStripeSize(long objID) { + return 1 + getFirstByte(objID) - getLastByte(objID); + } + + public int hashCode() { + return asMap().hashCode(); + } + + public boolean equals(Object obj) { + + if (this == obj) + return true; + if ((obj == null) || (obj.getClass() != this.getClass())) + return false; + + StripingPolicy other = (StripingPolicy) obj; + + JSONString Iam, ItIs; + try { + Iam = asJSONString(); + ItIs = other.asJSONString(); + } catch (JSONException ex) { + throw new IllegalArgumentException(); + } + + return Iam.equals(ItIs); + } + + /** + * It generates a mapped representation of this object + * + * @return The mapped representation of the object + */ + public abstract Map asMap(); + + /** + * It gives a JSON string which represents the object. + * + * @return The string representing the object + */ + public abstract JSONString asJSONString() throws JSONException; + + /** + * It parses a string and recovers the striping policy contained in it + * + * @param plain + * The string containing a striping policy + * @return The object contained in "plain" + */ + public static StripingPolicy readFromJSON(JSONString plain) + throws JSONException { + Map translater = (Map) JSONParser + .parseJSON(plain); + + return readFromJSON(translater); + } + + /** + * It parses a string and recovers the striping policy contained in it + * + * @param mappedObject + * The map containing a striping policy + * @return The object contained in mappedObject + */ + public static StripingPolicy readFromJSON(Map mappedObject) + throws JSONException { + StripingPolicy translation; + + if (mappedObject.containsKey(JSON_STRIPING_POLICY_TOKEN)) { + String selector = (String) mappedObject + .get(JSON_STRIPING_POLICY_TOKEN); + + // add here additional striping policies + if (selector.equals(RAID0.POLICY_NAME)) + translation = RAID0.readFromJSON(mappedObject); + else + throw new JSONException("Unknown striping policy: " + selector); + } else + throw new JSONException("There is no striping policy in the object"); + + return translation; + } + + /** + * Provides the corresponding object for this byte-offset. + * @param offset + * @return + */ + public abstract long getObject(long offset); + + /** + * Provides the first byte of this object. + * @param offset + * @return + */ + public abstract long getFirstByte(long object); + + /** + * Provides the last byte of this object. + * @param offset + * @return + */ + public abstract long getLastByte(long object); + + /** + * Returns a list of all needed information about the objects which + * represents this byte-range. If the byte-range only covers one object, + * there will be only one entry, otherwise 2 entries. On this case the first + * contains the object where the byte-range starts and the second the object + * where it ends. + */ + public abstract List getObjects(long firstByte, long lastByte); + + /** + * Provides the OSD position in this row for the given offset. + * @param offset + * @return + */ + public abstract int getOSDByOffset(long offset); + + /** + * Provides the OSD position in this row for the given object. + * @param object + * @return + */ + public abstract int getOSDByObject(long object); + + /** + * Provides the containing row of the object. + * + * @param absObjId + * @return + */ + public abstract long getRow(long absObjId); + + /** + * Returns all needed information where the data of the given object is positioned in the other striping policy. + * Useful for Re-Striping. + * @param localObjectID objectID for THIS striping policy + * @param otherPolicy striping policy for which the data should be converted + * @return see method "getObjects(long firstByte, long lastByte)" + */ + public List getOtherObjects(long localObjectID, + StripingPolicy otherPolicy) { + return otherPolicy.getObjects(this.getFirstByte(localObjectID), this + .getLastByte(localObjectID)); + } + + /* + * old code + */ + public abstract boolean isLocalObject(long absObjId, long relOsdNo); +} diff --git a/servers/src/org/xtreemfs/common/trace/Tracer.java b/servers/src/org/xtreemfs/common/trace/Tracer.java new file mode 100644 index 0000000000000000000000000000000000000000..d0ea4602ea14fb89928c26cdfda5f1c37d0b8601 --- /dev/null +++ b/servers/src/org/xtreemfs/common/trace/Tracer.java @@ -0,0 +1,104 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package org.xtreemfs.common.trace; + +import java.io.FileOutputStream; +import java.io.IOException; +import java.util.logging.Level; +import java.util.logging.Logger; +import org.xtreemfs.common.logging.Logging; + +/** + * + * @author bjko + */ +public class Tracer { + + /** + * Set this to true to enable trace log file for all requests. + * @attention: MUST BE SET TO FALSE FOR NORMAL OPERATIONS. + */ + public static final boolean COLLECT_TRACES = false; + + public enum TraceEvent { + + RECEIVED('>'), + RESPONSE_SENT('<'), + ERROR_SENT('E'); + + private final char eventType; + TraceEvent(char eventType) { + this.eventType = eventType; + } + public char getEventType() { + return this.eventType; + } + }; + + private static Tracer theInstance; + + + private final String traceFileName; + + private final FileOutputStream fos; + + private Tracer(String traceFileName) throws IOException { + this.traceFileName = traceFileName; + theInstance = this; + + fos = new FileOutputStream(traceFileName,true); + Logging.logMessage(Logging.LEVEL_INFO, this,"TRACING IS ENABLED, THIS WILL CAUSE PERFORMANCE TO BE REDUCED!"); + fos.write("#requestId;internal rq sequence no;event;component;message\n".getBytes()); + } + + /** + * Initialize the tracer. + * @param traceFileName file name to write trace data to (append mode). + * @throws java.io.IOException if the file cannot be opened + */ + public static void initialize(String traceFileName) throws IOException { + new Tracer(traceFileName); + } + + private void writeTraceRecord(String requestId, long intRqSeqNo, TraceEvent event, String component, String message) { + StringBuffer sb = new StringBuffer(); + + if (requestId != null) + sb.append(requestId); + + sb.append(';'); + sb.append(intRqSeqNo); + sb.append(';'); + sb.append(event.getEventType()); + sb.append(';'); + if (component != null) + sb.append(component); + sb.append(';'); + if (message != null) + sb.append(message); + sb.append("\n"); + try { + fos.write(sb.toString().getBytes()); + } catch (IOException ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this,ex); + } + } + + public static void trace(String requestId, long intRqSeqNo, TraceEvent event, String component, String message) { + assert(theInstance != null): "Tracer not initialized"; + theInstance.writeTraceRecord(requestId, intRqSeqNo, event, component, message); + } + + @Override + public void finalize() { + try { + fos.close(); + } catch (IOException ex) { + } + } + + +} diff --git a/servers/src/org/xtreemfs/common/util/FSUtils.java b/servers/src/org/xtreemfs/common/util/FSUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..2d8e7e30e0fc441bf1f6e02c86d3ca141c260906 --- /dev/null +++ b/servers/src/org/xtreemfs/common/util/FSUtils.java @@ -0,0 +1,184 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin and + Barcelona Supercomputing Center - Centro Nacional de Supercomputacion. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB), Jesús Malo (BSC) + */ + +package org.xtreemfs.common.util; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileFilter; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.nio.MappedByteBuffer; +import java.nio.channels.FileChannel; +import java.util.ArrayList; +import java.util.List; + +import org.xtreemfs.common.logging.Logging; + +/** + * A class containing helper functions for working with the local file system. + * + * @author stender + */ +public class FSUtils { + + /** + * Recursively deletes all contents of the given directory. + * + * @param file + * the directory to delete + */ + public static void delTree(File file) { + + if (!file.exists()) + return; + + for (File f : file.listFiles()) { + if (f.isDirectory()) + delTree(f); + else + f.delete(); + } + + file.delete(); + } + + /** + * Copies a whole directory tree to another directory. + * + * @param srcFile + * the source tree + * @param trgFile + * the target point where to copy the source tree + * @throws IOException + * if an I/O error occurs + */ + public static void copyTree(File srcFile, File trgFile) throws IOException { + + if (srcFile.isDirectory()) { + + trgFile.mkdir(); + for (File file : srcFile.listFiles()) + copyTree(file, new File(trgFile, file.getName())); + + } else { + + FileChannel in = null, out = null; + + try { + + try { + in = new FileInputStream(srcFile).getChannel(); + out = new FileOutputStream(trgFile).getChannel(); + + long size = in.size(); + MappedByteBuffer buf = in.map(FileChannel.MapMode.READ_ONLY, 0, size); + + out.write(buf); + + } finally { + if (in != null) + in.close(); + if (out != null) + out.close(); + } + + } finally { + + if (in != null) + in.close(); + if (out != null) + out.close(); + } + } + } + + /** + * Returns the free disk space on the partition storing the given directory. + * + * @param dir + * the directory stored in the partition + * @return the free disk space (for non-privileged users) + */ + public static long getFreeSpace(String dir) { + + BufferedReader buf = null; + + // try to retrieve the file size via the native 'stat' command +// try { +// Process p = Runtime.getRuntime().exec("stat -f --format %a " + dir); +// buf = new BufferedReader(new InputStreamReader(p.getInputStream())); +// long result = Long.parseLong(buf.readLine()) * 4096; +// +// return result; +// +// } catch (Exception exc) { + + Logging + .logMessage(Logging.LEVEL_DEBUG, null, + "a problem with 'stat' occurred - command probably not available on local platform"); + Logging.logMessage(Logging.LEVEL_DEBUG, null, + "using the Java mechanism for retrieving free space on the object partition"); + + // if some problem occurs, use the dedicated Java mechanism instead + return new File(dir).getUsableSpace(); + +// } finally { +// if (buf != null) +// try { +// buf.close(); +// } catch (IOException e) { +// Logging.logMessage(Logging.LEVEL_ERROR, null, e); +// } +// } + } + + public static File[] listRecursively(File rootDir, FileFilter filter) { + List list = new ArrayList(); + listRecursively(rootDir, filter, list); + return list.toArray(new File[list.size()]); + } + + private static void listRecursively(File rootDir, FileFilter filter, List list) { + + if (!rootDir.exists()) + return; + + // first, all files in subdirectories + File[] nestedDirs = rootDir.listFiles(new FileFilter() { + public boolean accept(File pathname) { + return pathname.isDirectory(); + } + }); + + for (File dir : nestedDirs) + listRecursively(dir, filter, list); + + for (File f : rootDir.listFiles(filter)) + list.add(f); + } +} diff --git a/servers/src/org/xtreemfs/common/util/NetUtils.java b/servers/src/org/xtreemfs/common/util/NetUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..0bec912a1f2d5d8e4c5f1964f08fd19ee2f0184f --- /dev/null +++ b/servers/src/org/xtreemfs/common/util/NetUtils.java @@ -0,0 +1,164 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ +package org.xtreemfs.common.util; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InterfaceAddress; +import java.net.NetworkInterface; +import java.util.Enumeration; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import org.xtreemfs.common.clients.RPCClient; + +public class NetUtils { + + /** + * Returns a list of mappings for all reachable network endpoints. + * + * @param port + * the port to assign to the mappings + * @param protocol + * the protocol for the endpoint + * @return a list of mappings + * @throws IOException + */ + public static List> getReachableEndpoints(int port, String protocol) + throws IOException { + + List> endpoints = new LinkedList>(); + + // first, try to find a globally reachable endpoint + Enumeration ifcs = NetworkInterface.getNetworkInterfaces(); + while (ifcs.hasMoreElements()) { + + NetworkInterface ifc = ifcs.nextElement(); + List addrs = ifc.getInterfaceAddresses(); + + // // prefer global addresses to local ones + // Collections.sort(addrs, new Comparator() { + // public int compare(InterfaceAddress o1, InterfaceAddress o2) { + // int o1global = o1.getAddress().isAnyLocalAddress() ? -1 : 1; + // int o2global = o2.getAddress().isAnyLocalAddress() ? -1 : 1; + // return o1global - o2global; + // } + // + // }); + + for (InterfaceAddress addr : addrs) { + + InetAddress inetAddr = addr.getAddress(); + if (inetAddr.isLoopbackAddress() || inetAddr.isLinkLocalAddress()) + continue; + + if (!(inetAddr.isLinkLocalAddress() || inetAddr.isSiteLocalAddress())) { + endpoints.add(RPCClient.generateMap("address", inetAddr.getHostAddress(), + "port", port, "protocol", protocol, "ttl", 3600, "match_network", "*")); + break; + } + + // endpoints.add(RPCClient.generateMap("address", + // inetAddr.getHostAddress(), "port", + // port, "protocol", protocol, "ttl", 3600, "match_network", + // (inetAddr + // .isLinkLocalAddress() + // || inetAddr.isSiteLocalAddress() ? inetAddr.getHostAddress() + // + "/" + // + getSubnetMaskString(addr.getNetworkPrefixLength()) : + // "*"))); + } + + // stop searching for endpoints if an endpoint has been found + if (!endpoints.isEmpty()) + break; + } + + // if no globally reachable endpoints are available, pick the first + // locally reachable endpoint + if (endpoints.isEmpty()) { + + ifcs = NetworkInterface.getNetworkInterfaces(); + NetworkInterface ifc = ifcs.nextElement(); + List addrs = ifc.getInterfaceAddresses(); + + // first, find all globally-reachable endpoints + ifcs = NetworkInterface.getNetworkInterfaces(); + while (ifcs.hasMoreElements()) { + + // if there is no "public" IP check for a site local address to + // use + for (InterfaceAddress addr : addrs) { + + InetAddress inetAddr = addr.getAddress(); + + if (inetAddr.isSiteLocalAddress()) { + endpoints.add(RPCClient.generateMap("address", inetAddr.getHostAddress(), + "port", port, "protocol", protocol, "ttl", 3600, "match_network", "*")); + break; + } + } + + if(!endpoints.isEmpty()) + break; + } + } + + return endpoints; + + } + + private static String getSubnetMaskString(short prefixLength) { + + long addr = (0xFFFFFFFFL << (32 - prefixLength)) & 0xFFFFFFFFL; + StringBuffer sb = new StringBuffer(); + for (int i = 3; i >= 0; i--) { + sb.append((addr & (0xFF << (i * 8))) >> (i * 8)); + if (i > 0) + sb.append("."); + } + + return sb.toString(); + } + + public static void main(String[] args) throws Exception { + + System.out.println("all network interfaces: "); + Enumeration ifcs = NetworkInterface.getNetworkInterfaces(); + while (ifcs.hasMoreElements()) { + for (InterfaceAddress addr : ifcs.nextElement().getInterfaceAddresses()) { + InetAddress inetAddr = addr.getAddress(); + System.out.println(inetAddr + ", loopback: " + inetAddr.isLoopbackAddress() + + ", linklocal: " + inetAddr.isLinkLocalAddress() + ", reachable: " + + inetAddr.isReachable(1000)); + } + } + + System.out.println("\nsuitable network interfaces: "); + for (Map endpoint : NetUtils.getReachableEndpoints(32640, "http")) + System.out.println(endpoint); + } + +} diff --git a/servers/src/org/xtreemfs/common/util/OutputUtils.java b/servers/src/org/xtreemfs/common/util/OutputUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..bf6d17a8ffcc074e479b58d1093b671b04701b4b --- /dev/null +++ b/servers/src/org/xtreemfs/common/util/OutputUtils.java @@ -0,0 +1,141 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin and + Barcelona Supercomputing Center - Centro Nacional de Supercomputacion. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB), Jesús Malo (BSC) + */ + +package org.xtreemfs.common.util; + +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; + +/** + * + * @author bjko + */ +public final class OutputUtils { + + public static final char[] trHex = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', + 'B', 'C', 'D', 'E', 'F' }; + + public static final String byteToHexString(byte b) { + StringBuilder sb = new StringBuilder(2); + sb.append(trHex[((b >> 4) & 0x0F)]); + sb.append(trHex[(b & 0x0F)]); + return sb.toString(); + } + + public static final String byteArrayToHexString(byte[] array) { + StringBuilder sb = new StringBuilder(2 * array.length); + for (byte b : array) { + sb.append(trHex[((b >> 4) & 0x0F)]); + sb.append(trHex[(b & 0x0F)]); + } + return sb.toString(); + } + + public static final String byteArrayToFormattedHexString(byte[] array) { + StringBuilder sb = new StringBuilder(2 * array.length); + for (int i = 0; i < array.length; i++) { + sb.append(trHex[((array[i] >> 4) & 0x0F)]); + sb.append(trHex[(array[i] & 0x0F)]); + if (i % 4 == 3) { + if (i % 16 == 15) + sb.append("\n"); + else + sb.append(" "); + } + + } + return sb.toString(); + } + + public static final String stackTraceToString(Throwable th) { + + PrintStream ps = null; + try { + ByteArrayOutputStream out = new ByteArrayOutputStream(); + ps = new PrintStream(out); + if (th != null) + th.printStackTrace(ps); + + return new String(out.toByteArray()); + + } finally { + if (ps != null) + ps.close(); + } + + } + + public static String formatBytes(long bytes) { + + double kb = bytes / 1024.0; + double mb = bytes / (1024.0 * 1024.0); + double gb = bytes / (1024.0 * 1024.0 * 1024.0); + double tb = bytes / (1024.0 * 1024.0 * 1024.0 * 1024.0); + + if (tb >= 1.0) { + return String.format("%.2f TB", tb); + } else if (gb >= 1.0) { + return String.format("%.2f GB", gb); + } else if (mb >= 1.0) { + return String.format("%.2f MB", mb); + } else if (kb >= 1.0) { + return String.format("%.2f kB", kb); + } else { + return bytes + " bytes"; + } + } + + public static String escapeToXML(String st) { + st = st.replace("&", "&"); + st = st.replace("'", "'"); + st = st.replace("<", "<"); + st = st.replace(">", ">"); + st = st.replace("\"", """); + return st; + } + + public static String unescapeFromXML(String st) { + st = st.replace("&", "&"); + st = st.replace("'", "'"); + st = st.replace("<", "<"); + st = st.replace(">", ">"); + st = st.replace(""", "\""); + return st; + } + + public static byte[] hexStringToByteArray(String hexString) { + + assert (hexString.length() % 2 == 0); + byte[] bytes = new byte[hexString.length() / 2]; + + for (int i = 0; i < hexString.length(); i += 2) { + int b = Integer.parseInt(hexString.substring(i, i + 2), 16); + bytes[i / 2] = b >= 128? (byte) (b - 256): (byte) b; + } + + return bytes; + } + +} diff --git a/servers/src/org/xtreemfs/common/uuids/ServiceUUID.java b/servers/src/org/xtreemfs/common/uuids/ServiceUUID.java new file mode 100644 index 0000000000000000000000000000000000000000..b048da9dc234d409749f5e7a5723fb4ca0e84f0e --- /dev/null +++ b/servers/src/org/xtreemfs/common/uuids/ServiceUUID.java @@ -0,0 +1,175 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.common.uuids; + +import java.io.Serializable; +import java.net.InetSocketAddress; +import org.xtreemfs.common.TimeSync; +import org.xtreemfs.common.buffer.ASCIIString; + +/** + * Encapsules the UUID and InetSocketAddress for a service. + * @author bjko + */ +public final class ServiceUUID implements Serializable { + + private final String uuid; + + private InetSocketAddress address; + + private String protocol; + + private long validUntil; + + private UUIDCacheEntry cacheEntry; + + private final UUIDResolver nonSingleton; + + /** + * Creates a new ServiceUUID. + * @param uuid the uuid string + */ + public ServiceUUID(String uuid) { + this.uuid = uuid; + this.validUntil = 0; + this.nonSingleton = null; + } + + /** + * Creates a new ServiceUUID with an individual UUIDresolver (rather than the global instance) + * @param uuid the uuid string + */ + public ServiceUUID(String uuid, UUIDResolver nonSingleton) { + this.uuid = uuid; + this.validUntil = 0; + this.nonSingleton = nonSingleton; + } + + /** + * Creates a new ServiceUUID. + * @param uuid the uuid string. + */ + public ServiceUUID(ASCIIString uuid) { + this(uuid.toString()); + + } + + /** + * Resolves the uuid to a InetSocketAddress and protocol. + * @throws org.xtreemfs.common.uuids.UnknownUUIDException if the uuid cannot + * be resolved (not local, no mapping on DIR). + */ + public void resolve() throws UnknownUUIDException { + updateMe(); + } + + /** + * Retrieves the InetSocketAddress for the service. + * @return the InetSocketAddress of the service + * @throws org.xtreemfs.common.uuids.UnknownUUIDException if the UUID cannot be resolved + */ + public InetSocketAddress getAddress() throws UnknownUUIDException { + if (validUntil > TimeSync.getLocalSystemTime()) { + cacheEntry.setLastAccess(TimeSync.getLocalSystemTime()); + } else { + updateMe(); + } + return address; + } + + /** + * Retrieves the protocol (hhtp,https) for the service. + * @return the protocol of the service + * @throws org.xtreemfs.common.uuids.UnknownUUIDException if the UUID cannot be resolved + */ + public String getProtocol() throws UnknownUUIDException { + if (validUntil > TimeSync.getLocalSystemTime()) { + cacheEntry.setLastAccess(TimeSync.getLocalSystemTime()); + } else { + updateMe(); + } + return protocol; + } + + /** + * Returns the full URl of the service. + * @return the URL of the service + * @throws org.xtreemfs.common.uuids.UnknownUUIDException if the UUID cannot be resolved + */ + public String toURL() throws UnknownUUIDException { + if (validUntil > TimeSync.getLocalSystemTime()) { + cacheEntry.setLastAccess(TimeSync.getLocalSystemTime()); + } else { + updateMe(); + } + return protocol+"://"+address.getHostName()+":"+address.getPort(); + } + + /** + * Get a details of the UUID mapping. + * @return details of the UUID mapping. + */ + public String debugString() { + return this.uuid+" -> "+this.protocol+" "+this.address+" (still valid for "+((validUntil-TimeSync.getLocalSystemTime())/1000)+"s)"; + } + + /** + * return the UUID string + * @return UUID string + */ + public String toString() { + return this.uuid; + } + + @Override + public boolean equals(Object other) { + try { + final ServiceUUID o = (ServiceUUID)other; + return this.uuid.equals(o.uuid); + } catch (ClassCastException ex) { + return false; + } + } + + @Override + public int hashCode() { + return uuid.hashCode(); + } + + /** + * updates the UUID mapping via UUIDResolver + * @throws org.xtreemfs.common.uuids.UnknownUUIDException + */ + private void updateMe() throws UnknownUUIDException { + if (nonSingleton == null) { + cacheEntry = UUIDResolver.resolve(this.uuid); + } else { + cacheEntry = UUIDResolver.resolve(this.uuid, nonSingleton); + } + this.address = cacheEntry.getResolvedAddr(); + this.validUntil = cacheEntry.getValidUntil(); + this.protocol = cacheEntry.getProtocol(); + } +} diff --git a/servers/src/org/xtreemfs/common/uuids/UUIDCacheEntry.java b/servers/src/org/xtreemfs/common/uuids/UUIDCacheEntry.java new file mode 100644 index 0000000000000000000000000000000000000000..ca023ada59b564ec84e6f6589b519c2e625341e4 --- /dev/null +++ b/servers/src/org/xtreemfs/common/uuids/UUIDCacheEntry.java @@ -0,0 +1,103 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB) + */ +package org.xtreemfs.common.uuids; + +import java.net.InetSocketAddress; +import org.xtreemfs.common.TimeSync; + +/** + * Cache entry for the UUIDResolver. + * @author bjko + */ +class UUIDCacheEntry { + + private String uuid; + + private InetSocketAddress resolvedAddr; + + private long validUntil; + + private long lastAccess; + + private String protocol; + + private boolean sticky; + + public UUIDCacheEntry(String uuid, String protocol, InetSocketAddress resolvedAddr, + long validUntil) { + this.uuid = uuid; + this.protocol = protocol; + this.resolvedAddr = resolvedAddr; + this.validUntil = validUntil; + this.lastAccess = TimeSync.getLocalSystemTime(); + } + + public String getUuid() { + return uuid; + } + + public void setUuid(String uuid) { + this.uuid = uuid; + } + + public InetSocketAddress getResolvedAddr() { + return resolvedAddr; + } + + public void setResolvedAddr(InetSocketAddress resolvedAddr) { + this.resolvedAddr = resolvedAddr; + } + + public long getValidUntil() { + return validUntil; + } + + public void setValidUntil(long validUntil) { + this.validUntil = validUntil; + } + + public long getLastAccess() { + return lastAccess; + } + + public void setLastAccess(long lastAccess) { + this.lastAccess = lastAccess; + } + + public String getProtocol() { + return protocol; + } + + public void setProtocol(String protocol) { + this.protocol = protocol; + } + + public boolean isSticky() { + return sticky; + } + + public void setSticky(boolean sticky) { + this.sticky = sticky; + } +} diff --git a/servers/src/org/xtreemfs/common/uuids/UUIDResolver.java b/servers/src/org/xtreemfs/common/uuids/UUIDResolver.java new file mode 100644 index 0000000000000000000000000000000000000000..141a8abbd8e10e641b6744b7c2edf67c8aee0111 --- /dev/null +++ b/servers/src/org/xtreemfs/common/uuids/UUIDResolver.java @@ -0,0 +1,287 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.common.uuids; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import org.xtreemfs.common.TimeSync; +import org.xtreemfs.common.auth.NullAuthProvider; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.clients.dir.DIRClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.util.NetUtils; +import org.xtreemfs.foundation.json.JSONException; + +/** + * Resolves UUID to InetSocketAddress+Protocol mappings. + * @author bjko + */ +public final class UUIDResolver extends Thread { + + Map cache; + + protected transient boolean quit; + + protected final DIRClient dir; + + protected final String authString; + + protected final List myNetworks; + + /** + * interval between two cache cleanups/renewals in milliseconds + */ + public final int cacheCleanInterval; + + public final int maxUnusedEntry ; + + protected static transient UUIDResolver theInstance; + + + protected UUIDResolver(DIRClient client, int cacheCleanInterval, int maxUnusedEntry, + boolean singleton) throws JSONException,IOException { + + super("UUID Resolver"); + setDaemon(true); + + cache = new ConcurrentHashMap(); + quit = false; + this.dir = client; + this.maxUnusedEntry = maxUnusedEntry; + this.cacheCleanInterval = cacheCleanInterval; + + if (singleton) { + assert(theInstance == null); + theInstance = this; + } + authString = NullAuthProvider.createAuthString("services", "xtreemfs"); + List> ntwrks = NetUtils.getReachableEndpoints(0, "http"); + myNetworks = new ArrayList(ntwrks.size()); + for (Map network : ntwrks) { + myNetworks.add((String)network.get("match_network")); + } + } + + /** + * Starts the UUIDResolver thread. + * @param client a DIRClient used to resolve non-cached and non-local mappings + * @param cacheCleanInterval the interval between two cleanup/renewals of cache entries (in ms) + * @param maxUnusedEntry the duration for which to keep an unused entry (in ms, should be set to several tens of minutes) + * @throws org.xtreemfs.foundation.json.JSONException + * @throws java.io.IOException + */ + public static synchronized void start(DIRClient client, + int cacheCleanInterval, int maxUnusedEntry) throws JSONException,IOException { + if (theInstance == null) { + new UUIDResolver(client, cacheCleanInterval, maxUnusedEntry,true); + theInstance.start(); + Logging.logMessage(Logging.LEVEL_DEBUG, null,"started UUIDResolver"); + } else { + Logging.logMessage(Logging.LEVEL_INFO, null,"UUIDResolver already running!"); + } + } + + public static synchronized UUIDResolver startNonSingelton(DIRClient client, + int cacheCleanInterval, int maxUnusedEntry) throws JSONException,IOException { + UUIDResolver tmp = new UUIDResolver(client, cacheCleanInterval, maxUnusedEntry,false); + tmp.start(); + return tmp; + } + + public static boolean isRunning() { + return theInstance != null; + } + + static UUIDCacheEntry resolve(String uuid) throws UnknownUUIDException { + assert (theInstance != null); + + UUIDCacheEntry entry = theInstance.cache.get(uuid); + //check if it is still valid + if ((entry != null) && (entry.getValidUntil() > TimeSync.getLocalSystemTime())) { + entry.setLastAccess(TimeSync.getLocalSystemTime()); + return entry; + } + return theInstance.fetchUUID(uuid); + } + + static UUIDCacheEntry resolve(String uuid, UUIDResolver nonSingleton) throws UnknownUUIDException { + + UUIDCacheEntry entry = nonSingleton.cache.get(uuid); + //check if it is still valid + if ((entry != null) && (entry.getValidUntil() > TimeSync.getLocalSystemTime())) { + entry.setLastAccess(TimeSync.getLocalSystemTime()); + return entry; + } + return nonSingleton.fetchUUID(uuid); + } + + UUIDCacheEntry fetchUUID(String uuid) throws UnknownUUIDException { + if (dir == null) + throw new UnknownUUIDException("there is no mapping for "+uuid+". Attention: local mode enabled, no remote lookup possible."); + RPCResponse>>> r = null; + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this,"loading uuid mapping for "+uuid); + try { + r = dir.getAddressMapping(uuid, authString); + Logging.logMessage(Logging.LEVEL_DEBUG, this,"sent request to DIR"); + r.waitForResponse(2000); + List> l = r.get().get(uuid); + Logging.logMessage(Logging.LEVEL_DEBUG, this,"received response for "+uuid); + if ((l == null) || (l.size() == 1)) { + Logging.logMessage(Logging.LEVEL_DEBUG, this,"NO UUID MAPPING FOR: "+uuid); + throw new UnknownUUIDException("uuid "+uuid+" is not registered at directory server"); + } + List> mappings = (List>) l.get(1); + for (int i = 0; i < mappings.size(); i++) { + Map addrMapping = mappings.get(i); + final String network = (String)addrMapping.get("match_network"); + if (myNetworks.contains(network) || (network.equals("*"))) { + final String address = (String)addrMapping.get("address"); + final String protocol = (String)addrMapping.get("protocol"); + final int port = (int) ((Long)addrMapping.get("port")).intValue(); + final long validUntil = TimeSync.getLocalSystemTime() + ((Long)addrMapping.get("ttl"))*1000; + final InetSocketAddress endpoint = new InetSocketAddress(address,port); + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this,"matching uuid record found for uuid "+uuid+" with network "+network); + UUIDCacheEntry e = new UUIDCacheEntry(uuid, protocol, endpoint, validUntil); + cache.put(uuid, e); + return e; + } + } + Logging.logMessage(Logging.LEVEL_DEBUG, this,"NO UUID MAPPING FOR: "+uuid); + throw new UnknownUUIDException("there is no matching entry for my network in the uuid address mapping. The service at "+uuid+ + " is either not reachable from this machine or the mapping entry is misconfigured."); + } catch (InterruptedException ex) { + throw new UnknownUUIDException("cannot retrieve mapping from server due to IO error: "+ex); + } catch (IOException ex) { + throw new UnknownUUIDException("cannot retrieve mapping from server due to IO error: "+ex); + } catch (Exception ex) { + ex.printStackTrace(); + throw new UnknownUUIDException("cannot retrieve mapping from server due to invalid data sent by the server: "+ex); + } finally { + if (r != null) + r.freeBuffers(); + } + } + + @Override + public void run() { + List updates = new LinkedList(); + do { + Iterator iter = cache.values().iterator(); + while (iter.hasNext()) { + final UUIDCacheEntry entry = iter.next(); + if (entry.isSticky()) + continue; + if (entry.getLastAccess() + maxUnusedEntry < TimeSync.getLocalSystemTime()) { + //dump entry! + iter.remove(); + Logging.logMessage(Logging.LEVEL_DEBUG, this,"removed entry from UUID cache: "+entry.getUuid()); + } else { + //check if update is necessary + if (entry.getValidUntil() < TimeSync.getLocalSystemTime()+cacheCleanInterval) { + //renew entry... + try { + updates.add(fetchUUID(entry.getUuid())); + } catch (Exception ex) { + Logging.logMessage(Logging.LEVEL_WARN, this,"cannot refresh UIID mapping: "+ex); + iter.remove(); + } + } + } + + } + try { + sleep(cacheCleanInterval); + } catch (InterruptedException ex) { + } + } while (!quit); + } + + /** + * Add a UUID which is mapped on localhost + * @param localUUID the UUID to map + * @param port the port to map the UUID to + * @param useSSL defines the protocol + */ + public static void addLocalMapping(String localUUID, int port, boolean useSSL) { + assert(theInstance != null); + + UUIDCacheEntry e = new UUIDCacheEntry(localUUID, + (useSSL ? "https" : "http"), + new InetSocketAddress("localhost",port), + Long.MAX_VALUE); + + e.setSticky(true); + theInstance.cache.put(localUUID, e); + } + + public static void addLocalMapping(ServiceUUID uuid, int port, boolean useSSL) { + addLocalMapping(uuid.toString(), port, useSSL); + } + + public static void shutdown(UUIDResolver nonSingleton) { + nonSingleton.quit = true; + nonSingleton.interrupt(); + } + + public static void shutdown() { + if (theInstance != null) { + theInstance.quit = true; + theInstance.interrupt(); + theInstance = null; + Logging.logMessage(Logging.LEVEL_DEBUG, null,"UUIDREsolver shut down"); + } else { + Logging.logMessage(Logging.LEVEL_DEBUG, null,"UUIDREsolver was already shut down or is not running"); + } + } + + public static String getCache() { + StringBuilder sb = new StringBuilder(); + for (UUIDCacheEntry e : theInstance.cache.values()) { + sb.append(e.getUuid()); + sb.append(" -> "); + sb.append(e.getProtocol()); + sb.append(" "); + sb.append(e.getResolvedAddr()); + if (e.isSticky()) { + sb.append(" - STICKY"); + } else { + sb.append(" - valid for "); + sb.append((e.getValidUntil() - TimeSync.getLocalSystemTime())/1000l); + sb.append("s"); + } + sb.append("\n"); + } + return sb.toString(); + } + +} diff --git a/servers/src/org/xtreemfs/common/uuids/UnknownUUIDException.java b/servers/src/org/xtreemfs/common/uuids/UnknownUUIDException.java new file mode 100644 index 0000000000000000000000000000000000000000..0fddb31e54095b8016e8e260aefe2fcd9421754d --- /dev/null +++ b/servers/src/org/xtreemfs/common/uuids/UnknownUUIDException.java @@ -0,0 +1,39 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.common.uuids; + +import java.io.IOException; + +/** + * Thrown when a UUID cannot be mapped to a service's InetSocketAddress and Protoco. + * @author bjko + */ +public class UnknownUUIDException extends IOException { + + public UnknownUUIDException(String message) { + super(message); + } + +} diff --git a/servers/src/org/xtreemfs/dir/DIR.java b/servers/src/org/xtreemfs/dir/DIR.java new file mode 100644 index 0000000000000000000000000000000000000000..673343be34a42a2ef7ae6082fe6f46078035bb5f --- /dev/null +++ b/servers/src/org/xtreemfs/dir/DIR.java @@ -0,0 +1,91 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ +package org.xtreemfs.dir; + +import java.io.IOException; + +import org.xtreemfs.common.logging.Logging; + +/** + * This class can be used to start a new instance of the Directory Service. + * + * @author stender + * + */ +public class DIR { + + /** + * @param args + * the command line arguments + */ + public static void main(String[] args) { + + String configFileName = "../config/dirconfig.properties"; + + if (args.length != 1) { + System.out.println("using default config file " + configFileName); + } else { + configFileName = args[0]; + } + + DIRConfig config = null; + try { + config = new DIRConfig(configFileName); + } catch (IOException ex) { + ex.printStackTrace(); + return; + } + + Logging.start(config.getDebugLevel()); + + Logging + .logMessage(Logging.LEVEL_INFO, null, "JAVA_HOME=" + + System.getProperty("java.home")); + + try { + final RequestController rq = new RequestController(config); + rq.startup(); + + Runtime.getRuntime().addShutdownHook(new Thread() { + @Override + public void run() { + try { + Logging.logMessage(Logging.LEVEL_INFO, this, "received shutdown signal!"); + rq.shutdown(); + Logging.logMessage(Logging.LEVEL_INFO, this, "DIR shotdown complete"); + } catch (Exception ex) { + ex.printStackTrace(); + } + } + }); + } catch (Exception ex) { + Logging.logMessage(Logging.LEVEL_ERROR, null, ex); + Logging.logMessage(Logging.LEVEL_DEBUG, null, + "System could not start up due to an exception. Aborted."); + System.exit(1); + } + + } + +} diff --git a/servers/src/org/xtreemfs/dir/DIRConfig.java b/servers/src/org/xtreemfs/dir/DIRConfig.java new file mode 100644 index 0000000000000000000000000000000000000000..677d1b6a736886c0c66c6387da66dd75df4ca9be --- /dev/null +++ b/servers/src/org/xtreemfs/dir/DIRConfig.java @@ -0,0 +1,68 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.dir; + +import java.io.IOException; +import java.util.Properties; + +import org.xtreemfs.common.config.ServiceConfig; + +/** + * + * @author bjko + */ +public class DIRConfig extends ServiceConfig { + + private String dbDir; + + private String authenticationProvider; + + /** Creates a new instance of OSDConfig */ + public DIRConfig(String filename) throws IOException { + super(filename); + read(); + } + + public DIRConfig(Properties prop) throws IOException { + super(prop); + read(); + } + + public void read() throws IOException { + super.read(); + + this.dbDir = this.readRequiredString("database.dir"); + this.authenticationProvider = readRequiredString("authentication_provider"); + } + + public String getDbDir() { + return dbDir; + } + + public String getAuthenticationProvider() { + return authenticationProvider; + } + +} diff --git a/servers/src/org/xtreemfs/dir/DIRRequest.java b/servers/src/org/xtreemfs/dir/DIRRequest.java new file mode 100644 index 0000000000000000000000000000000000000000..d6fd04b8fed96f4c5d09467f8ab4a8d7bd6d1579 --- /dev/null +++ b/servers/src/org/xtreemfs/dir/DIRRequest.java @@ -0,0 +1,50 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.dir; + +import java.net.InetSocketAddress; + +import org.xtreemfs.common.Request; +import org.xtreemfs.foundation.pinky.PinkyRequest; +import org.xtreemfs.foundation.speedy.SpeedyRequest; + +public class DIRRequest extends Request { + + public SpeedyRequest sr; + + public InetSocketAddress srEndpoint; + + public RequestDetails details; + + public DIRRequest() { + this(null); + } + + public DIRRequest(PinkyRequest pr) { + super(pr); + sr = null; + details = new RequestDetails(); + } +} diff --git a/servers/src/org/xtreemfs/dir/DIRRequestListener.java b/servers/src/org/xtreemfs/dir/DIRRequestListener.java new file mode 100644 index 0000000000000000000000000000000000000000..f94218f11031c3aa85c19d431159fca32e5a2daf --- /dev/null +++ b/servers/src/org/xtreemfs/dir/DIRRequestListener.java @@ -0,0 +1,32 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.dir; + + +public interface DIRRequestListener { + + public void dsRequestDone(DIRRequest request); + +} diff --git a/servers/src/org/xtreemfs/dir/DirService.java b/servers/src/org/xtreemfs/dir/DirService.java new file mode 100644 index 0000000000000000000000000000000000000000..53550a60e4074ef075ec484c71f4453e1a5d9349 --- /dev/null +++ b/servers/src/org/xtreemfs/dir/DirService.java @@ -0,0 +1,820 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.dir; + +import java.io.File; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; + +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.json.JSONString; + +/** + * Implements the functionality provided by the Directory Service. + * + * @author stender + * + */ +public class DirService { + + public enum Attrs { + uuid, version, lastUpdated, owner, type, name, organization, country, uri, publicKey + } + + public static final String TABLE_NAME = "TABLE"; + + public static final String COL_UUID = "UUID"; + + public static final String COL_ATTR = "ATTR"; + + public static final String COL_VAL = "VAL"; + + public static final String COL_MAPPING = "MAPPING"; + + public static final String COL_OWNER = "OWNER"; + + public static final String COL_VERSION = "VERSION"; + + private DIRRequestListener requestListener; + + private final Connection conEntities; + + private final Connection conMappings; + + private Map timestamps; + + /** + * Creates a new Directory Service using a database stored at the given path + * in the local file system tree. + * + * @param dbPath + * the path to the database directory + * @throws SQLException + * if the database could not be initialized properly + */ + public DirService(String dbPath) throws SQLException { + + this.timestamps = new HashMap(); + + try { + Class.forName("org.hsqldb.jdbcDriver"); + } catch (Exception e) { + Logging.logMessage(Logging.LEVEL_ERROR, this, + "ERROR: failed to load HSQLDB JDBC driver."); + throw new RuntimeException(e); + } + + new File(dbPath).mkdirs(); + + Properties info = new Properties(); + info.setProperty("shutdown", "true"); + info.setProperty("user", "sa"); + conEntities = DriverManager.getConnection("jdbc:hsqldb:file:" + dbPath + "/ds-entities", + info); + conEntities.setAutoCommit(true); + + conMappings = DriverManager.getConnection("jdbc:hsqldb:file:" + dbPath + "/ds-mappings", + info); + conMappings.setAutoCommit(true); + + // check whether the entities table exists already + // if the table does not exist yet, create it + if (!tableExists(conEntities)) { + String sql = "CREATE TABLE " + TABLE_NAME + " (" + COL_UUID + " VARCHAR(128) NOT NULL," + + COL_ATTR + " VARCHAR(128) NOT NULL," + COL_VAL + " VARCHAR(1024) NOT NULL);"; + + Statement statement = conEntities.createStatement(); + statement.execute(sql); + statement.close(); + } + + // check whether the mappings table exists already + // if the table does not exist yet, create it + if (!tableExists(conMappings)) { + String sql = "CREATE TABLE " + TABLE_NAME + " (" + COL_UUID + " VARCHAR(128) NOT NULL," + + COL_OWNER + " VARCHAR(128) NOT NULL," + COL_MAPPING + " VARCHAR(1024) NOT NULL," + + COL_VERSION + " INTEGER NOT NULL);"; + + Statement statement = conMappings.createStatement(); + statement.execute(sql); + statement.close(); + } + + } + + public void shutdown() throws SQLException { + conMappings.createStatement().execute("shutdown"); + conMappings.close(); + conEntities.createStatement().execute("shutdown"); + conEntities.close(); + } + + /** + * Registers or updates an entity at the Directory Service. + * + *

+ * First, an authorization check is performed. Access is always granted if + * no entity with the given UUID exists yet. If an entity already exists, + * access is only granted if the user ID associated with the request is + * equal to the user ID associated with the existing entity. + * + *

+ * If the request is sufficiently authorized and the entity exists already, + * oldVersion is compared to the version which is currently + * associated with the entity. Unless both version strings are equal, + * registration fails with an error message indicating that an attempt was + * made to update an entry with an outdated version. + * + *

+ * If authorization and version check are successful, all entries given in + * data are atomically updated. This includes a calculation of a + * new version string, as well as an update of the 'lastUpdated' attribute. + * + * @param request + * the request context + * @param uuid + * the UUID of the entity + * @param data + * a map containing attribute-value pairs defining the entity + * @param oldVersion + * the former version number of the entry, which the update + * refers to + * @throws SQLException + * if an error occured while updating the database + * @throws UserException + * if the operation failed due to an invalid argument + */ + public void registerEntity(DIRRequest request, String uuid, Map data, + long oldVersion) throws SQLException, UserException { + + Statement statement = conEntities.createStatement(); + + try { + + conEntities.setAutoCommit(false); + + // check if an owner has already been defined for the entry; + // if so, check if the user requesting the update is authorized to + // modify the entry + boolean ownerExists = false; + + String sql = SQLQueryHelper.createQueryStatement(TABLE_NAME, new String[] { COL_VAL }, + COL_UUID + "='" + uuid + "' and " + COL_ATTR + "='" + Attrs.owner + "'"); + + ResultSet rs = statement.executeQuery(sql); + try { + if (rs.next()) { + ownerExists = true; + String owner = rs.getString(1); + checkAuthorization(owner, request, uuid); + } + } finally { + rs.close(); + } + + // check if the user has the correct version to update + if (ownerExists && !data.isEmpty()) { + sql = SQLQueryHelper.createQueryStatement(TABLE_NAME, new String[] { COL_VAL }, + COL_UUID + "='" + uuid + "' and " + COL_ATTR + "='" + Attrs.version + "'"); + rs = statement.executeQuery(sql); + + try { + if (rs.next()) { + + // check the version + long versionInDB = rs.getLong(1); + if (versionInDB != oldVersion) + throw new UserException( + "version mismatch: received update for version '" + oldVersion + + "', latest version is '" + versionInDB + "'"); + } + } finally { + rs.close(); + } + } + + long timestamp = System.currentTimeMillis() / 1000; + + // add the owner entry if it does not exist + if (!ownerExists) { + // insert the new attribute-value pair + sql = SQLQueryHelper.createInsertStatement(TABLE_NAME, null, new Object[] { uuid, + Attrs.owner.toString(), request.details.userId }); + statement.executeUpdate(sql); + } + + for (String attr : data.keySet()) { + + if (attr.equals(Attrs.version.toString()) + || attr.equals(Attrs.lastUpdated.toString())) + throw new UserException("invalid attribute name: '" + attr + + "' cannot be changed explicitly"); + + // delete the former attribute-value pair, if existing + sql = SQLQueryHelper.createDeleteStatement(TABLE_NAME, COL_UUID + "='" + uuid + + "' and " + COL_ATTR + "='" + attr + "'"); + statement.executeUpdate(sql); + + // if a value has been assigned, insert the new attribute-value + // pair + String value = (String) data.get(attr); + + if (value != null && value.length() > 0) { + + sql = SQLQueryHelper.createInsertStatement(TABLE_NAME, null, new Object[] { + uuid, attr, value }); + statement.executeUpdate(sql); + } + + } + + // calculate the new version number + long version = oldVersion + 1; + + // delete the former version number + sql = SQLQueryHelper.createDeleteStatement(TABLE_NAME, COL_UUID + "='" + uuid + + "' and " + COL_ATTR + "='" + Attrs.version + "'"); + statement.executeUpdate(sql); + + // update the version number + sql = SQLQueryHelper.createInsertStatement(TABLE_NAME, new String[] { COL_UUID, + COL_ATTR, COL_VAL }, new Object[] { uuid, Attrs.version.toString(), version }); + statement.executeUpdate(sql); + + // update the timestamp + timestamps.put(uuid, timestamp); + + // commit the transaction + conEntities.commit(); + + MessageUtils.marshallResponse(request, version); + this.notifyRequestListener(request); + + } catch (UserException exc) { + conEntities.rollback(); + throw exc; + } catch (SQLException exc) { + conEntities.rollback(); + throw exc; + } finally { + statement.close(); + } + + } + + /** + * Queries the Directory Service. + * + *

+ * The method returns a set of entities in the form of a mapping from UUIDs + * to maps containing sets of attribute-value pairs associated with the + * corresponding UUIDs. + * + *

+ * The set of entities included in the result set is restricted by the query + * map. Only such entities are included that match the query map, i.e. that + * have attribute-value pairs equal to or at least covered by patterns + * contained in the query map. Entities are only included in the result set + * if each attribute from the query map is also attached to the entity. + * Similarly, each value mapped by an attribute in the query map must also + * be mapped by an attribute attached to the entity, with the exception that + * an asterisk ('*') indicates that any value is allowed. + * + *

+ * The attributes included in the result set are restricted by the given + * list of attributes. If this list is null or empty, all + * attributes of all matching entities are included. + * + * @param request + * the request context + * @param queryMap + * a mapping defining the query + * @param attrs + * a set of attributes to which all entities included in the + * result set are reduced + * + * @throws SQLException + * if an error occured while querying the database + */ + public void getEntities(DIRRequest request, Map queryMap, List attrs) + throws SQLException { + + // TODO: check whether some fancy SQL statement will perform this task + // more efficiently + + Statement statement = conEntities.createStatement(); + + try { + + // first, get a list of potential UUIDs which might belong to the + // query + // result; most probably, this will significantly reduce the amount + // of + // entries to be checked in the second step + StringBuffer sb = new StringBuffer(); + for (String key : queryMap.keySet()) { + + String value = (String) queryMap.get(key); + + if (sb.length() != 0) + sb.append("OR "); + + if (key.equalsIgnoreCase(Attrs.uuid.toString()) && !value.equals("*")) { + sb.append("("); + sb.append(COL_UUID); + sb.append("='"); + sb.append(value); + sb.append("')"); + continue; + } + + if (!key.equalsIgnoreCase(Attrs.lastUpdated.toString())) { + + sb.append("("); + sb.append(COL_ATTR); + sb.append("='"); + sb.append(key); + sb.append("'"); + + if (!value.equals("*")) { + sb.append(" AND "); + sb.append(COL_VAL); + sb.append("='"); + sb.append(value); + sb.append("'"); + } + + sb.append(")"); + } + + } + + String sql = SQLQueryHelper.createQueryStatement(TABLE_NAME, new String[] { COL_UUID }, + sb.toString()); + ResultSet rs = statement.executeQuery(sql); + + Set uuids = new HashSet(); + try { + while (rs.next()) + uuids.add(rs.getString(1)); + } finally { + rs.close(); + } + + // for each potential entry, check whether all requirements are + // fulfilled; if so, add the entry to the result set + Map> result = new HashMap>(); + for (String uuid : uuids) { + + // get all entities with the given UUID from the database + sql = SQLQueryHelper.createQueryStatement(TABLE_NAME, new String[] { COL_ATTR, + COL_VAL }, COL_UUID + "='" + uuid + "'"); + rs = statement.executeQuery(sql); + + // add all entries from the database + Map entity = new HashMap(); + entity.put(Attrs.uuid.toString(), uuid); + try { + while (rs.next()) + entity.put(rs.getString(1), rs.getString(2)); + } finally { + rs.close(); + } + + // add the lastUpdated entry from the timestamp map if it exists + if (timestamps.containsKey(uuid)) + entity.put(Attrs.lastUpdated.toString(), timestamps.get(uuid).toString()); + + // if the entry matches the query map, remove all + // attribute-value + // pairs not defined in 'attrs' and add the resulting entry to + // the + // result set + if (matches(entity, queryMap)) { + + if (attrs == null || attrs.size() == 0) + result.put(uuid, entity); + + else { + + // prune the result set with the aid of 'attrs' + + Map prunedEntry = new HashMap(); + for (String key : attrs) { + String value = (String) entity.get(key); + if (value != null) + prunedEntry.put(key, value); + } + + result.put(uuid, prunedEntry); + } + } + } + + MessageUtils.marshallResponse(request, result); + this.notifyRequestListener(request); + + } finally { + statement.close(); + } + } + + /** + * Deregisters an entity from the Directory Service. + * + *

+ * If an entity with the given UUID exists, a check is performed whether the + * user ID associated with the request is equal to the user ID associated + * with the database entry. The deregistration will only be performed if + * both user IDs match. + * + * @param request + * the request context + * @param uuid + * the UUID of the entity to remove + * @throws SQLException + * if an error occured while updating the database + * @throws UserException + * if the operation failed due to an invalid argument + */ + public void deregisterEntity(DIRRequest request, String uuid) throws SQLException, UserException { + + conEntities.setAutoCommit(true); + Statement statement = conEntities.createStatement(); + + // check if an owner has already been defined for the entry; + // if so, check if the user requesting the deletion is the owner + String sql = SQLQueryHelper.createQueryStatement(TABLE_NAME, new String[] { COL_VAL }, + COL_UUID + "='" + uuid + "' and " + COL_ATTR + "='" + Attrs.owner + "'"); + ResultSet rs = statement.executeQuery(sql); + + try { + + if (rs.next()) { + String owner = rs.getString(1); + checkAuthorization(owner, request, uuid); + } + + // delete all attribute-value pairs associated with the UUID from + // the database + sql = SQLQueryHelper.createDeleteStatement(TABLE_NAME, COL_UUID + "='" + uuid + "'"); + statement.executeUpdate(sql); + + // remove last timestamp from the hash map + timestamps.remove(uuid); + + MessageUtils.marshallResponse(request, null); + this.notifyRequestListener(request); + + } finally { + rs.close(); + statement.close(); + } + } + + public void registerAddressMapping(DIRRequest request, String uuid, + List> mapping, long oldVersion) throws SQLException, UserException { + + Statement statement = conMappings.createStatement(); + + try { + + conEntities.setAutoCommit(false); + + // First, check whether a mapping has been registered already. If + // so, check whether the requesting user is authorized to change the + // mapping. + String sql = SQLQueryHelper.createQueryStatement(TABLE_NAME, new String[] { COL_OWNER, + COL_VERSION }, COL_UUID + "='" + uuid + "'"); + + ResultSet rs = statement.executeQuery(sql); + long versionInDB = 0; + String owner = null; + try { + if (rs.next()) { + + // check whether user is authorized + owner = rs.getString(1); + checkAuthorization(owner, request, uuid); + + // check whether version is correct + versionInDB = rs.getLong(2); + if (versionInDB != oldVersion) + throw new UserException("version mismatch: received update for version '" + + oldVersion + "', latest version is '" + versionInDB + "'"); + } + } finally { + rs.close(); + } + + // do not change the owner if the entry exists already + if(owner == null) + owner = request.details.userId; + + // delete the old mapping, if necessary + sql = SQLQueryHelper.createDeleteStatement(TABLE_NAME, COL_UUID + "='" + uuid + "'"); + statement.executeUpdate(sql); + + // add the new mapping + sql = SQLQueryHelper.createInsertStatement(TABLE_NAME, new String[] { COL_UUID, + COL_OWNER, COL_MAPPING, COL_VERSION }, new Object[] { uuid, owner, + JSONParser.writeJSON(mapping), oldVersion + 1 }); + statement.executeUpdate(sql); + + // commit the transaction + conEntities.commit(); + + MessageUtils.marshallResponse(request, oldVersion + 1); + this.notifyRequestListener(request); + + } catch (UserException exc) { + conEntities.rollback(); + throw exc; + } catch (SQLException exc) { + conEntities.rollback(); + throw exc; + } catch (JSONException exc) { + conEntities.rollback(); + throw new UserException("cannot convert map to JSON: " + exc); + } finally { + statement.close(); + } + } + + public void getAddressMapping(DIRRequest request, String uuid) throws SQLException, + JSONException { + + Statement statement = conMappings.createStatement(); + Map> results = new HashMap>(); + + // get all entries + if (uuid.equals("")) { + + // query all mappings + String sql = SQLQueryHelper.createQueryStatement(TABLE_NAME, new String[] { COL_UUID, + COL_VERSION, COL_MAPPING }, null); + + ResultSet rs = statement.executeQuery(sql); + try { + while (rs.next()) { + List result = new ArrayList(2); + result.add(rs.getLong(2)); // version + result.add(JSONParser.parseJSON(new JSONString(rs.getString(3)))); // mapping + + results.put(rs.getString(1), result); + } + } finally { + rs.close(); + } + + } else { + + // query the mapping with the given UUID + String sql = SQLQueryHelper.createQueryStatement(TABLE_NAME, new String[] { COL_UUID, + COL_VERSION, COL_MAPPING }, COL_UUID + "='" + uuid + "'"); + ResultSet rs = statement.executeQuery(sql); + + try { + if (rs.next()) { + List result = new ArrayList(3); + result.add(rs.getLong(2)); // version + result.add(JSONParser.parseJSON(new JSONString(rs.getString(3)))); // mapping + // / + // / + // mapping + + results.put(rs.getString(1), result); + } + } finally { + rs.close(); + } + } + + MessageUtils.marshallResponse(request, results); + this.notifyRequestListener(request); + } + + public void deregisterAddressMapping(DIRRequest request, String uuid) throws SQLException, + UserException { + + conEntities.setAutoCommit(true); + + // First, check whether a mapping has been registered already. If + // so, check whether the requesting user is authorized to change the + // mapping. + Statement statement = conMappings.createStatement(); + String sql = SQLQueryHelper.createQueryStatement(TABLE_NAME, new String[] { COL_OWNER }, + COL_UUID + "='" + uuid + "'"); + ResultSet rs = statement.executeQuery(sql); + + try { + if (rs.next()) { + String owner = rs.getString(1); + checkAuthorization(owner, request, uuid); + } + + // delete the mapping + sql = SQLQueryHelper.createDeleteStatement(TABLE_NAME, COL_UUID + "='" + uuid + "'"); + statement.executeUpdate(sql); + + MessageUtils.marshallResponse(request, null); + this.notifyRequestListener(request); + + } finally { + rs.close(); + statement.close(); + } + } + + /** + * Returns the current system time in milliseconds since 1/1/70 + */ + public void getGlobalTime(DIRRequest request) { + MessageUtils.marshallResponse(request, System.currentTimeMillis()); + this.notifyRequestListener(request); + } + + public void setRequestListener(DIRRequestListener listener) { + requestListener = listener; + } + + protected void notifyRequestListener(DIRRequest request) { + if (requestListener != null) + requestListener.dsRequestDone(request); + else + throw new RuntimeException("listener must not be null!"); + } + + protected Map> getEntityDBDump() throws SQLException { + + Statement statement = conEntities.createStatement(); + String sql = "SELECT * FROM " + TABLE_NAME; + ResultSet rs = statement.executeQuery(sql); + + Map> dump = new HashMap>(); + + for (int i = 0; rs.next(); i++) { + String uuid = rs.getString(1); + Map entry = dump.get(uuid); + if (entry == null) { + entry = new HashMap(); + dump.put(uuid, entry); + } + + entry.put(rs.getString(2), rs.getString(3)); + + if (!entry.containsKey(Attrs.lastUpdated.toString())) { + Object timeStamp = timestamps.get(uuid); + if (timeStamp != null) + entry.put(Attrs.lastUpdated.toString(), timeStamp.toString()); + } + } + + rs.close(); + + return dump; + } + + protected Map getMappingDBDump() throws SQLException, JSONException { + + Statement statement = conMappings.createStatement(); + String sql = "SELECT * FROM " + TABLE_NAME; + ResultSet rs = statement.executeQuery(sql); + + Map dump = new HashMap(); + + for (int i = 0; rs.next(); i++) { + String uuid = rs.getString(1); + dump.put(uuid, new Object[] { rs.getString(2), + JSONParser.parseJSON(new JSONString(rs.getString(3))), rs.getLong(4) + "" }); + } + + rs.close(); + + return dump; + } + + /** + * Checks whether a given entity matches a given query map. + * + * @param entity + * @param query + * @return + */ + private boolean matches(Map entity, Map query) { + + for (String key : query.keySet()) { + + String value = (String) query.get(key); + + if (!entity.containsKey(key) + || (!value.equals("*") && !value.equals((String) entity.get(key)))) + return false; + } + + return true; + } + + private static boolean tableExists(Connection con) throws SQLException { + + boolean exists = false; + + Statement statement = con.createStatement(); + try { + ResultSet rs = statement.executeQuery("SELECT * FROM " + TABLE_NAME + ";"); + rs.close(); + exists = true; + } catch (SQLException exc) { + if (exc.getErrorCode() != -22) // table does not exist + throw exc; + } finally { + statement.close(); + } + + return exists; + } + + private static void checkAuthorization(String owner, DIRRequest request, String uuid) + throws UserException { + + if (owner.equals(request.details.userId) || request.details.superUser) + return; + + throw new UserException("authorization failure: '" + uuid + "' is owned by '" + owner + + "', but attempted to be modified by '" + request.details.userId + + "'. Entries may only be modified by their owner or a superuser."); + } + + // public static void main(String[] args) throws Exception { + // + // DirService ds = new DirService("/tmp/dirservice/ds"); + // + // // register a new entity + // Map data = new HashMap(); + // data.put("someKey", "someValue"); + // data.put("anotherKey", "anotherValue"); + // DSRequest req = new DSRequest(); + // req.userId = "me"; + // ds.registerEntity(req, "theUUID", data, 0L); + // + // data.clear(); + // data.put("someKey", "bla"); + // ds.registerEntity(req, "anotherUUID", data, 0L); + // + // // query the Directory Service + // Map query = new HashMap(); + // List keys = new LinkedList(); + // keys.add("lastUpdated"); + // query.put("someKey", "*"); + // query.put("anotherKey", "anotherValue"); + // + // // Map> result = ds.getEntities(req, query, + // // keys); + // // System.out.println(result); + // + // // deregister the entity + // ds.deregisterEntity(req, "theUUID"); + // ds.deregisterEntity(req, "anotherUUID"); + // // + // // // query again + // // result = ds.getEntities(req, new HashMap(), keys); + // // System.out.println(result); + // + // ds.shutdown(); + // } + +} diff --git a/servers/src/org/xtreemfs/dir/DirServiceStage.java b/servers/src/org/xtreemfs/dir/DirServiceStage.java new file mode 100644 index 0000000000000000000000000000000000000000..184c82baabdb21b5b6906e65d801fbc272f2442e --- /dev/null +++ b/servers/src/org/xtreemfs/dir/DirServiceStage.java @@ -0,0 +1,222 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.dir; + +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.LinkedBlockingQueue; + +import org.xtreemfs.common.auth.AuthenticationException; +import org.xtreemfs.common.auth.AuthenticationProvider; +import org.xtreemfs.common.auth.UserCredentials; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.LifeCycleThread; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.pinky.HTTPHeaders; +import org.xtreemfs.mrc.brain.ErrNo; +import org.xtreemfs.mrc.brain.UserException; + +public class DirServiceStage extends LifeCycleThread { + + private final LinkedBlockingQueue queue; + + private final DirService dirService; + + private final AuthenticationProvider auth; + + public DirServiceStage(String dbDir, AuthenticationProvider auth) throws SQLException { + + super("Directory Service"); + this.auth = auth; + + dirService = new DirService(dbDir); + queue = new LinkedBlockingQueue(); + } + + public void processRequest(DIRRequest request) { + queue.add(request); + } + + public void shutdown() throws SQLException { + interrupt(); + dirService.shutdown(); + } + + public void setRequestListener(DIRRequestListener listener) { + assert (listener != null); + dirService.setRequestListener(listener); + } + + public void run() { + + try { + + notifyStarted(); + + for (;;) { + + DIRRequest request = null; + try { + if (isInterrupted()) + break; + request = queue.take(); + } catch (InterruptedException e1) { + break; + } + + try { + + Object args = MessageUtils.unmarshallRequest(request); + + // parse the user Id from the "AUTHORIZATION" header + if (request.details.userId == null) { + String authHeader = request.getPinkyRequest().requestHeaders + .getHeader(HTTPHeaders.HDR_AUTHORIZATION); + + if (authHeader == null) + throw new UserException(ErrNo.EPERM, "authorization mechanism required"); + + UserCredentials cred = null; + try { + cred = auth.getEffectiveCredentials(authHeader, request.getPinkyRequest() + .getChannelIO()); + request.details.superUser = cred.isSuperUser(); + request.details.userId = cred.getUserID(); + } catch (AuthenticationException ex) { + throw new UserException(ErrNo.EPERM, ex.getMessage()); + } + } + + executeCommand(request, args); + + } catch (Exception exc) { + MessageUtils.marshallException(request, exc); + dirService.notifyRequestListener(request); + } + + } + + } catch (Throwable th) { + notifyCrashed(th instanceof Exception ? (Exception) th : new Exception(th)); + return; + } + + notifyStopped(); + } + + private void executeCommand(DIRRequest request, Object args) { + + try { + + // convert the arguments to a corresponding object array + Object[] argArray = null; + if (args != null) + try { + List argList = (List) args; + argList.add(0, request); + argArray = argList.toArray(); + } catch (ClassCastException exc) { + argArray = new Object[] { request, args }; + } + + // find the appropriate brain method + Method m = findMethod(request.getPinkyRequest().requestURI, argArray); + + // invoke the brain method + if (args == null) + m.invoke(dirService, request); + else + m.invoke(dirService, argArray); + + } catch (InvocationTargetException exc) { + MessageUtils.marshallException(request, exc.getCause()); + dirService.notifyRequestListener(request); + } catch (Exception exc) { + MessageUtils.marshallException(request, exc); + dirService.notifyRequestListener(request); + Logging.logMessage(Logging.LEVEL_ERROR, this, exc); + } + } + + private Method findMethod(String name, Object[] args) throws NoSuchMethodException { + + Method[] methods = dirService.getClass().getMethods(); + Method m = null; + + for (Method method : methods) { + + if (method.getName().equals(name)) { + + Class[] paramTypes = method.getParameterTypes(); + if (args.length > 1 && args.length != paramTypes.length) + continue; + + boolean ok = true; + // TODO: check params + // for (int i = 0; i < paramTypes.length; i++) { + // + // if (argsArray[i] != null + // && !paramTypes[i].isInstance(argsArray[i])) { + // ok = false; + // } + // } + + if (ok) { + m = method; + break; + } + } + } + + if (m == null) { + + List list = new ArrayList(args.length); + for (Object arg : args) + if (!(arg instanceof DIRRequest)) + list.add(arg); + + String argList = null; + try { + argList = JSONParser.writeJSON(list); + } catch (JSONException exc) { + exc.printStackTrace(); + } + + throw new NoSuchMethodException("could not find appropriate method '" + name + + "' for arguments " + argList); + } + + return m; + } + + protected Object[] getDBDump() throws SQLException, JSONException { + return new Object[] { dirService.getEntityDBDump(), dirService.getMappingDBDump() }; + } + +} diff --git a/servers/src/org/xtreemfs/dir/MessageUtils.java b/servers/src/org/xtreemfs/dir/MessageUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..efefac26088c5a92deb2abfb100f5868c4aa3054 --- /dev/null +++ b/servers/src/org/xtreemfs/dir/MessageUtils.java @@ -0,0 +1,176 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.dir; + +import java.net.InetSocketAddress; +import java.nio.CharBuffer; +import java.util.HashMap; +import java.util.Map; + +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.util.OutputUtils; +import org.xtreemfs.foundation.json.JSONCharBufferString; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.json.JSONString; +import org.xtreemfs.foundation.pinky.HTTPHeaders; +import org.xtreemfs.foundation.pinky.HTTPUtils; +import org.xtreemfs.foundation.pinky.HTTPUtils.DATA_TYPE; + +/** + * Routines for marshalling and unmarshalling JSON in request bodies. + * + * @author bjko + */ +public class MessageUtils { + + public static void marshallResponse(DIRRequest req, Object res) { + + assert (req != null); + + try { + ReusableBuffer bbuf = ReusableBuffer.wrap(JSONParser.writeJSON(res).getBytes( + HTTPUtils.ENC_UTF8)); + req.getPinkyRequest().setResponse(HTTPUtils.SC_OKAY, bbuf, HTTPUtils.DATA_TYPE.JSON); + + } catch (JSONException exc) { + marshallException(req, exc); + } + } + + public static void marshallResponse(DIRRequest req, Object res, HTTPHeaders additionalHeaders) { + + assert (req != null); + + try { + ReusableBuffer bbuf = ReusableBuffer.wrap(JSONParser.writeJSON(res).getBytes( + HTTPUtils.ENC_UTF8)); + req.getPinkyRequest() + .setResponse(HTTPUtils.SC_OKAY, bbuf, HTTPUtils.DATA_TYPE.JSON, + additionalHeaders); + + } catch (JSONException exc) { + marshallException(req, exc); + } + } + + public static void marshallException(DIRRequest req, Map excMap, + boolean userException) { + try { + + ReusableBuffer body = ReusableBuffer.wrap(JSONParser.writeJSON(excMap).getBytes( + HTTPUtils.ENC_UTF8)); + + req.getPinkyRequest().setResponse(userException ? HTTPUtils.SC_USER_EXCEPTION + : HTTPUtils.SC_SERVER_ERROR, body, HTTPUtils.DATA_TYPE.JSON); + } catch (JSONException ex) { + Logging.logMessage(Logging.LEVEL_ERROR, null, ex); + req.getPinkyRequest().setResponse(HTTPUtils.SC_SERVER_ERROR); + } + } + + public static void marshallException(DIRRequest req, Throwable exc) { + + String stackTrace = null; + + // encapsulate the stack trace in a string, unless the exception is a + // user exception + if (!(exc instanceof UserException)) + stackTrace = OutputUtils.stackTraceToString(exc); + + Map excMap = new HashMap(); + excMap.put("exceptionName", exc.toString()); + excMap.put("errorMessage", exc.getMessage()); + excMap.put("stackTrace", stackTrace); + if (exc instanceof UserException) + excMap.put("errno", ((UserException) exc).getErrno()); + + marshallException(req, excMap, exc instanceof UserException); + } + + public static void setRedirect(DIRRequest req, String target) { + HTTPHeaders headers = new HTTPHeaders(); + headers.addHeader(HTTPHeaders.HDR_LOCATION); + req.getPinkyRequest().setResponse(HTTPUtils.SC_SEE_OTHER, null, DATA_TYPE.JSON, headers); + } + + public static Object unmarshallRequestOld(DIRRequest request) throws JSONException { + String body = null; + + assert (request != null); + assert (request.getPinkyRequest() != null); + + if (request.getPinkyRequest().requestBody != null) { + byte bdy[] = null; + if (request.getPinkyRequest().requestBody.hasArray()) { + bdy = request.getPinkyRequest().requestBody.array(); + } else { + bdy = new byte[request.getPinkyRequest().requestBody.capacity()]; + request.getPinkyRequest().requestBody.position(0); + request.getPinkyRequest().requestBody.get(bdy); + } + + body = new String(bdy, HTTPUtils.ENC_UTF8); + return JSONParser.parseJSON(new JSONString(body)); + } else { + return null; + } + + } + + public static Object unmarshallRequest(DIRRequest request) throws JSONException { + String body = null; + + assert (request != null); + assert (request.getPinkyRequest() != null); + + if (request.getPinkyRequest().requestBody != null) { + request.getPinkyRequest().requestBody.position(0); + CharBuffer utf8buf = HTTPUtils.ENC_UTF8.decode(request.getPinkyRequest().requestBody.getBuffer()); + return JSONParser.parseJSON(new JSONCharBufferString(utf8buf)); + } else { + return null; + } + + } + + public static InetSocketAddress addrFromString(String hostAndPort) + throws IllegalArgumentException { + int dpoint = hostAndPort.lastIndexOf(':'); + if (dpoint == -1) { + throw new IllegalArgumentException("InetSocketAddress as String needs a : character"); + } + String host = hostAndPort.substring(0, dpoint); + int port = 0; + try { + port = Integer.valueOf(hostAndPort.substring(dpoint + 1)); + } catch (NumberFormatException ex) { + throw new IllegalArgumentException("Port is not a number in " + hostAndPort); + } + return new InetSocketAddress(host, port); + } + +} diff --git a/servers/src/org/xtreemfs/dir/RequestController.java b/servers/src/org/xtreemfs/dir/RequestController.java new file mode 100644 index 0000000000000000000000000000000000000000..8b311f36b29f61f995b327270f086a4af18a3230 --- /dev/null +++ b/servers/src/org/xtreemfs/dir/RequestController.java @@ -0,0 +1,399 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.dir; + +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.FileWriter; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.sql.SQLException; +import java.util.Collections; +import java.util.Date; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import org.xtreemfs.common.auth.AuthenticationProvider; +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.util.OutputUtils; +import org.xtreemfs.foundation.LifeCycleListener; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.pinky.HTTPUtils; +import org.xtreemfs.foundation.pinky.PinkyRequest; +import org.xtreemfs.foundation.pinky.PinkyRequestListener; +import org.xtreemfs.foundation.pinky.PipelinedPinky; +import org.xtreemfs.foundation.pinky.SSLOptions; + +/** + * This class comtains the workflow of the MRC server and directs the requestst + * to the appropriate stages + * + * @author bjko + */ +public class RequestController implements PinkyRequestListener, DIRRequestListener, + LifeCycleListener { + + private PipelinedPinky pinkyStage; + + private DirServiceStage dirServiceStage; + + private DIRConfig config; + + private int _stat_numRequests; + + private final String statusPageTemplate; + + private enum Vars { + MAXMEM(""), + FREEMEM(""), + AVAILPROCS(""), + BPSTATS(""), + PORT(""), + DEBUG(""), + NUMCON(""), + PINKYQ(""), + NUMREQS(""), + TIME(""), + TABLEDUMP(""); + + private String template; + + Vars(String template) { + this.template = template; + } + + public String toString() { + return template; + } + } + + /** Creates a new instance of RequestController */ + public RequestController(DIRConfig config) throws Exception { + + try { + this.config = config; + + final AuthenticationProvider auth = (AuthenticationProvider) Class.forName( + config.getAuthenticationProvider()).newInstance(); + auth.initialize(config.isUsingSSL()); + + /** set up all stages */ + + dirServiceStage = new DirServiceStage(config.getDbDir(), auth); + dirServiceStage.setRequestListener(this); + dirServiceStage.setLifeCycleListener(this); + + pinkyStage = config.isUsingSSL() ? new PipelinedPinky(config.getPort(), config + .getAddress(), this, new SSLOptions(config.getServiceCredsFile(), config + .getServiceCredsPassphrase(), config.getServiceCredsContainer(), config + .getTrustedCertsFile(), config.getTrustedCertsPassphrase(), config + .getTrustedCertsContainer(), false)) : new PipelinedPinky(config.getPort(), + config.getAddress(), this); + pinkyStage.setLifeCycleListener(this); + + /** load status page template */ + + StringBuffer sb = null; + try { + InputStream is = this.getClass().getClassLoader().getResourceAsStream( + "org/xtreemfs/dir/templates/status.html"); + if (is == null) + is = this.getClass().getResourceAsStream("../templates/status.html"); + BufferedReader br = new BufferedReader(new InputStreamReader(is)); + sb = new StringBuffer(); + String line = br.readLine(); + while (line != null) { + sb.append(line + "\n"); + line = br.readLine(); + } + br.close(); + } catch (Exception ex) { + Logging.logMessage(Logging.LEVEL_DEBUG, this, ex); + } + if (sb == null) { + statusPageTemplate = "

Template was not found, unable to show status page!

"; + } else { + statusPageTemplate = sb.toString(); + } + + Logging.logMessage(Logging.LEVEL_INFO, this, + "[ I | DIR ] operational, listening on port " + config.getPort()); + + } catch (Exception exc) { + shutdown(); + throw exc; + } + } + + public void startup() throws Exception { + + try { + dirServiceStage.start(); + pinkyStage.start(); + + pinkyStage.waitForStartup(); + dirServiceStage.waitForStartup(); + + } catch (Exception exc) { + shutdown(); + throw exc; + } + } + + public void shutdown() throws Exception { + + // create status page snapshot for debugging purposes + try { + String statusPageSnapshot = getStatusPage(); + BufferedWriter writer = new BufferedWriter(new FileWriter(config.getDbDir() + + "/.status.html")); + writer.write(statusPageSnapshot); + writer.close(); + } catch (Exception exc) { + // ignore + } + + if (pinkyStage != null) + pinkyStage.shutdown(); + if (dirServiceStage != null) + dirServiceStage.shutdown(); + + if (pinkyStage != null) + pinkyStage.waitForShutdown(); + if (dirServiceStage != null) + dirServiceStage.waitForShutdown(); + } + + // --------------------- LISTENERS ------------------------------ + + public void receiveRequest(PinkyRequest theRequest) { + + DIRRequest rq = new DIRRequest(theRequest); + + try { + if (theRequest.requestURI.charAt(0) == '/') { + + if (theRequest.requestURI.length() == 1) { + + // generate status HTTP page + String statusPage = getStatusPage(); + + ReusableBuffer bbuf = ReusableBuffer.wrap(statusPage + .getBytes(HTTPUtils.ENC_ASCII)); + theRequest.setResponse(HTTPUtils.SC_OKAY, bbuf, HTTPUtils.DATA_TYPE.HTML); + pinkyStage.sendResponse(rq.getPinkyRequest()); + return; + + } else + theRequest.requestURI = theRequest.requestURI.substring(1); + } + + if (theRequest.requestURI.length() > 0) { + if (theRequest.requestURI.charAt(0) == '.') { + // system command + handleSystemCall(rq); + } else { + try { + // delegate to the Directory Service Stage + dirServiceStage.processRequest(rq); + } catch (IllegalStateException e) { + // queue is full + theRequest.setClose(true); + theRequest.setResponse(HTTPUtils.SC_SERV_UNAVAIL); + pinkyStage.sendResponse(theRequest); + } + + } + } else { + theRequest.setClose(true); + theRequest.setResponse(HTTPUtils.SC_BAD_REQUEST); + pinkyStage.sendResponse(theRequest); + } + + } catch (IndexOutOfBoundsException e) { + theRequest.setClose(true); + theRequest.setResponse(HTTPUtils.SC_BAD_REQUEST); + pinkyStage.sendResponse(theRequest); + } catch (Exception e) { + theRequest.setClose(true); + theRequest.setResponse(HTTPUtils.SC_SERVER_ERROR); + pinkyStage.sendResponse(theRequest); + } + + } + + public void handleSystemCall(DIRRequest rq) { + try { + if (rq.getPinkyRequest().requestURI.equals(".shutdown")) { + // shutdown the whole Directory Service + rq.getPinkyRequest().setResponse(HTTPUtils.SC_OKAY); + pinkyStage.sendResponse(rq.getPinkyRequest()); + shutdown(); + } else { + rq.getPinkyRequest().setResponse(HTTPUtils.SC_NOT_IMPLEMENTED); + pinkyStage.sendResponse(rq.getPinkyRequest()); + } + + } catch (IOException ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + } catch (Exception ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + } + } + + public void dsRequestDone(DIRRequest rq) { + if (rq.getPinkyRequest().isReady()) { + pinkyStage.sendResponse(rq.getPinkyRequest()); + _stat_numRequests++; + } + } + + private String getStatusPage() throws SQLException, JSONException { + + long time = System.currentTimeMillis(); + + Object[] dbDump = getDBDump(); + + Map> entities = (Map>) dbDump[0]; + Map mappings = (Map) dbDump[1]; + + StringBuilder dump = new StringBuilder(); + dump + .append("
"); + dump + .append(""); + for (String uuid : mappings.keySet()) { + Object[] entry = mappings.get(uuid); + List> mapping = (List>) entry[1]; + + dump.append("
Address Mapping
UUIDmapping
"); + dump.append(uuid); + dump.append(""); + dump.append(""); + dump.append("
"); + for (int i = 0; i < mapping.size(); i++) { + dump.append(""); + } + dump.append("
"); + Map map = mapping.get(i); + String endpoint = map.get("protocol") + "://" + map.get("address") + ":" + + map.get("port"); + dump.append(""); + dump.append(endpoint); + dump.append(""); + dump.append(map.get("match_network")); + dump.append(""); + dump.append(map.get("ttl")); + dump.append("
version: "); + dump.append(entry[2]); + dump.append("
"); + } + dump.append("
"); + + dump + .append("
"); + dump + .append(""); + for (String uuid : entities.keySet()) { + Map entry = entities.get(uuid); + dump.append(""); + } + dump.append("
Data Mapping
UUIDmapping
"); + dump.append(uuid); + dump.append(""); + List keys = new LinkedList(entry.keySet()); + Collections.sort(keys); + for (String key : keys) { + if (key.equals("version")) + continue; + dump.append(""); + } + dump.append("
"); + dump.append(key); + dump.append(""); + dump.append(entry.get(key)); + if (key.equals("lastUpdated")) { + dump.append(" ("); + dump.append(new Date(Long.parseLong(entry.get(key)) * 1000)); + dump.append(")"); + } else if (key.equals("free") || key.equals("total") || key.endsWith("RAM")) { + dump.append(" bytes ("); + dump.append(OutputUtils.formatBytes(Long.parseLong(entry.get(key)))); + dump.append(")"); + } else if (key.equals("load")) { + dump.append("%"); + } + dump.append("
version: "); + dump.append(entry.get("version")); + dump.append("
"); + + String tmp = null; + try { + tmp = statusPageTemplate.replace(Vars.AVAILPROCS.toString(), Runtime.getRuntime() + .availableProcessors() + + " bytes"); + } catch (Exception e) { + tmp = statusPageTemplate; + } + tmp = tmp.replace(Vars.FREEMEM.toString(), Runtime.getRuntime().freeMemory() + " bytes"); + tmp = tmp.replace(Vars.MAXMEM.toString(), Runtime.getRuntime().maxMemory() + " bytes"); + tmp = tmp.replace(Vars.BPSTATS.toString(), BufferPool.getStatus()); + tmp = tmp.replace(Vars.PORT.toString(), Integer.toString(config.getPort())); + tmp = tmp.replace(Vars.DEBUG.toString(), Integer.toString(config.getDebugLevel())); + tmp = tmp.replace(Vars.NUMCON.toString(), Integer.toString(pinkyStage.getNumConnections())); + tmp = tmp.replace(Vars.PINKYQ.toString(), Integer.toString(pinkyStage.getTotalQLength())); + tmp = tmp.replace(Vars.NUMREQS.toString(), Integer.toString(_stat_numRequests)); + tmp = tmp.replace(Vars.TIME.toString(), new Date(time).toString() + " (" + time + ")"); + tmp = tmp.replace(Vars.TABLEDUMP.toString(), dump.toString()); + + return tmp; + + } + + public void crashPerformed() { + try { + shutdown(); + } catch (Exception e) { + Logging.logMessage(Logging.LEVEL_ERROR, this, e); + } + } + + public void shutdownPerformed() { + // ignore + } + + public void startupPerformed() { + // ignore + } + + private Object[] getDBDump() throws SQLException, JSONException { + return dirServiceStage.getDBDump(); + } + +} diff --git a/servers/src/org/xtreemfs/dir/RequestDetails.java b/servers/src/org/xtreemfs/dir/RequestDetails.java new file mode 100644 index 0000000000000000000000000000000000000000..138cf0c1b27fd8f6e63388b29c408d1362e13b76 --- /dev/null +++ b/servers/src/org/xtreemfs/dir/RequestDetails.java @@ -0,0 +1,53 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Christian Lorenz (ZIB), Jan Stender (ZIB) + */ +package org.xtreemfs.dir; + +import java.util.Map; + +/** + * + * 29.09.2008 + * + * @author clorenz + */ +public final class RequestDetails { + public String userId; + + public boolean superUser; + + public boolean authenticated; + + public boolean authorized; + + public Map context; + + /** + * + */ + public RequestDetails() { + userId = null; + authenticated = false; + authorized = false; + } +} diff --git a/servers/src/org/xtreemfs/dir/SQLQueryHelper.java b/servers/src/org/xtreemfs/dir/SQLQueryHelper.java new file mode 100644 index 0000000000000000000000000000000000000000..881b4fb800d66f6ad91e3743656a53a7308a0871 --- /dev/null +++ b/servers/src/org/xtreemfs/dir/SQLQueryHelper.java @@ -0,0 +1,163 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.dir; + + +public class SQLQueryHelper { + + public static String createInsertStatement(String tableName, + String[] columnNames, Object[] values) { + + StringBuffer sb = new StringBuffer(); + sb.append("INSERT INTO "); + sb.append(tableName); + + if (columnNames != null) { + sb.append(" ("); + sb.append(arrayToString(columnNames)); + sb.append(")"); + } + + sb.append(" VALUES ("); + sb.append(valArrayToString(values)); + sb.append(")"); + + return sb.toString(); + } + + public static String createDeleteStatement(String tableName, + String whereStatement) { + StringBuffer sb = new StringBuffer(); + sb.append("DELETE FROM "); + sb.append(tableName); + sb.append(" WHERE "); + sb.append(whereStatement); + + return sb.toString(); + } + + public static String createQueryStatement(String tableName, + String[] columnNames, String whereStatement) { + + StringBuffer sb = new StringBuffer(); + sb.append("SELECT "); + + if (columnNames != null) + sb.append(arrayToString(columnNames)); + else + sb.append("*"); + + sb.append(" FROM "); + sb.append(tableName); + + if (whereStatement != null && whereStatement.length() > 0) { + sb.append(" WHERE "); + sb.append(whereStatement); + } + + return sb.toString(); + } + + public static String createQueryStatement(String[] tableNames, + String[] columnNames, String whereStatement) { + + return createQueryStatement(arrayToString(tableNames), columnNames, + whereStatement); + } + + public static String createLeftJoinQueryStatement(String[] tableNames, + String[] colIds, String[] leftJoinTableNames, String onStatement, + String whereStatement) { + + StringBuffer sb = new StringBuffer(); + sb.append("SELECT DISTINCT "); + sb.append(colIds == null ? "*" : arrayToString(colIds)); + sb.append(" FROM "); + sb.append(arrayToString(tableNames, " JOIN ")); + sb.append(" LEFT JOIN "); + sb.append(arrayToString(leftJoinTableNames)); + sb.append(" ON "); + sb.append(onStatement); + if (whereStatement != null) { + sb.append(" WHERE "); + sb.append(whereStatement); + } + + return sb.toString(); + } + + public static String createUpdateStatement(String tableName, + String[] columnNames, Object[] values, String whereStatement) { + + StringBuffer sb = new StringBuffer(); + sb.append("UPDATE "); + sb.append(tableName); + sb.append(" SET "); + + for (int i = 0; i < columnNames.length; i++) { + if (i > 0) + sb.append(", "); + sb.append(columnNames[i]); + sb.append(" = "); + sb.append(values[i] instanceof String ? "\"" + values[i] + "\"" + : values[i]); + } + + if (whereStatement != null) { + sb.append(" WHERE "); + sb.append(whereStatement); + } + + return sb.toString(); + } + + private static String arrayToString(String[] array, String separator) { + StringBuffer sb = new StringBuffer(); + for (int i = 0; i < array.length; i++) { + sb.append(array[i]); + if (i < array.length - 1) + sb.append(separator); + } + + return sb.toString(); + } + + private static String arrayToString(String[] array) { + return arrayToString(array, ", "); + } + + private static String valArrayToString(Object[] vals) { + StringBuffer sb = new StringBuffer(); + for (int i = 0; i < vals.length; i++) { + sb.append(vals[i] instanceof String ? "'" + vals[i] + "'" + : vals[i]); + if (i < vals.length - 1) + sb.append(", "); + } + + return sb.toString(); + } + +} diff --git a/servers/src/org/xtreemfs/dir/UserException.java b/servers/src/org/xtreemfs/dir/UserException.java new file mode 100644 index 0000000000000000000000000000000000000000..acbb9ec4a3e2acabe21adcef51329848832d3b82 --- /dev/null +++ b/servers/src/org/xtreemfs/dir/UserException.java @@ -0,0 +1,59 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.dir; + +/** + * This exception is thrown if something + * + * @author bjko, stender + */ +public class UserException extends java.lang.Exception { + + private int errno = 0; + + /** + * Creates a new instance of XtreemFSException without detail + * message. + */ + public UserException() { + } + + public UserException(int errno) { + this.errno = errno; + } + + public UserException(String message) { + super(message); + } + + public UserException(int errno, String message) { + super(message + " (errno=" + errno + ")"); + this.errno = errno; + } + + public int getErrno() { + return this.errno; + } +} diff --git a/servers/src/org/xtreemfs/dir/templates/status.html b/servers/src/org/xtreemfs/dir/templates/status.html new file mode 100644 index 0000000000000000000000000000000000000000..7571fd03e1932078628db35b334bf057473e20e1 --- /dev/null +++ b/servers/src/org/xtreemfs/dir/templates/status.html @@ -0,0 +1,111 @@ + + + XtreemFS Directory Service + + + + +

DIR

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Configuration +
TCP port
Debug Level
+ Load +
# HTTP connections (pinky)
HTTP server (pinky) queue length
+ Transfer +
# requests processed
+ VM Info / Memory +
Buffer Pool stats
+ Time +
global XtreemFS time
+ Database Dump +
+ + \ No newline at end of file diff --git a/servers/src/org/xtreemfs/foundation/LifeCycleListener.java b/servers/src/org/xtreemfs/foundation/LifeCycleListener.java new file mode 100644 index 0000000000000000000000000000000000000000..1aee1b5f75eb92f8f7a5ceab0a2b8d354928550b --- /dev/null +++ b/servers/src/org/xtreemfs/foundation/LifeCycleListener.java @@ -0,0 +1,40 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB) + */ +package org.xtreemfs.foundation; + +/** + * Notifies a process of a life cycle event. + * + * @author stender + * + */ +public interface LifeCycleListener { + + public void startupPerformed(); + + public void shutdownPerformed(); + + public void crashPerformed(); + +} diff --git a/servers/src/org/xtreemfs/foundation/LifeCycleThread.java b/servers/src/org/xtreemfs/foundation/LifeCycleThread.java new file mode 100644 index 0000000000000000000000000000000000000000..7c3c4262f9150a72c3dcc05cc9a6dcd99414444d --- /dev/null +++ b/servers/src/org/xtreemfs/foundation/LifeCycleThread.java @@ -0,0 +1,150 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.foundation; + +/** + * A base class for threads representing a life cycle. It offers methods for + * blocking other threads until a certain life cycle event has occured. It + * currently supports two life cycle-related events: startup and shutdown. + * + * @author stender + * + */ +public class LifeCycleThread extends Thread { + + private final Object startLock; + + private final Object stopLock; + + private boolean started; + + private boolean stopped; + + private Exception exc; + + private LifeCycleListener listener; + + public LifeCycleThread(String name) { + super(name); + startLock = new Object(); + stopLock = new Object(); + } + + /** + * This method should be invoked by subclasses when the startup procedure + * has been completed. + */ + protected void notifyStarted() { + synchronized (startLock) { + started = true; + startLock.notifyAll(); + if (listener != null) + listener.startupPerformed(); + } + } + + /** + * This method should be invoked by subclasses when the shutdown procedure + * has been completed. + */ + protected void notifyStopped() { + synchronized (stopLock) { + stopped = true; + stopLock.notifyAll(); + if (listener != null) + listener.shutdownPerformed(); + } + } + + /** + * This method should be invoked by subclasses when the thread has crashed. + */ + protected void notifyCrashed(Exception exc) { + + synchronized (startLock) { + this.exc = exc; + started = true; + startLock.notifyAll(); + } + + synchronized (stopLock) { + this.exc = exc; + stopped = true; + stopLock.notifyAll(); + } + + if (listener != null) + listener.crashPerformed(); + } + + /** + * Synchronously waits for a notification indicating that the startup + * procedure has been completed. + * + * @throws Exception + * if an error occured during the startup procedure + */ + public void waitForStartup() throws Exception { + synchronized (startLock) { + + while (!started) + startLock.wait(); + + if (exc != null) + throw exc; + } + } + + /** + * Synchronously waits for a notification indicating that the shutdown + * procedure has been completed. + * + * @throws Exception + * if an error occured during the shutdown procedure + */ + public void waitForShutdown() throws Exception { + synchronized (stopLock) { + + if (!started) + return; + while (!stopped) + stopLock.wait(); + + if (exc != null) + throw exc; + } + } + + /** + * Sets a listener waiting for life cycle events. + * + * @param listener + * the listener + */ + public void setLifeCycleListener(LifeCycleListener listener) { + this.listener = listener; + } + +} diff --git a/servers/src/org/xtreemfs/foundation/json/JSONCharBufferString.java b/servers/src/org/xtreemfs/foundation/json/JSONCharBufferString.java new file mode 100644 index 0000000000000000000000000000000000000000..e21aecf404d644d5d314742847f095c3e07bacbf --- /dev/null +++ b/servers/src/org/xtreemfs/foundation/json/JSONCharBufferString.java @@ -0,0 +1,76 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin and + Barcelona Supercomputing Center - Centro Nacional de Supercomputacion. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB), Jesús Malo (BSC) + */ + +package org.xtreemfs.foundation.json; + +import java.nio.BufferUnderflowException; +import java.nio.CharBuffer; + +/** + * + * @author bjko + */ +public class JSONCharBufferString implements JSONInput { + + CharBuffer cb; + + /** Creates a new instance of JSONCharBufferString */ + public JSONCharBufferString(CharBuffer cb) { + assert (cb != null); + + this.cb = cb; + this.cb.position(0); + } + + public char read() throws JSONException { + try { + return cb.get(); + } catch(BufferUnderflowException ex) { + throw new JSONException("Reached end of buffer"); + } + } + + public int skip(int skip) { + try { + + cb.position(cb.position()+skip); + + return skip; + + } catch (IllegalArgumentException e) { + return 0; + } + + } + + public String toString() { + return "JSONCharBufferString backed by "+cb.toString(); + } + + public boolean hasMore() { + return cb.hasRemaining(); + } + +} diff --git a/servers/src/org/xtreemfs/foundation/json/JSONException.java b/servers/src/org/xtreemfs/foundation/json/JSONException.java new file mode 100644 index 0000000000000000000000000000000000000000..7efadc43873ca9b30e0ce8cabdb2970fda6a398e --- /dev/null +++ b/servers/src/org/xtreemfs/foundation/json/JSONException.java @@ -0,0 +1,52 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin and + Barcelona Supercomputing Center - Centro Nacional de Supercomputacion. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB), Jesús Malo (BSC) + */ + +package org.xtreemfs.foundation.json; + +/** + * Thrown by the JSON parser and writer. + * + * @author bjko + */ +public class JSONException extends java.lang.Exception { + + /** + * Creates a new instance of JSONException without detail + * message. + */ + public JSONException() { + } + + /** + * Constructs an instance of JSONException with the specified + * detail message. + * + * @param msg + * the detail message. + */ + public JSONException(String msg) { + super(msg); + } +} diff --git a/servers/src/org/xtreemfs/foundation/json/JSONInput.java b/servers/src/org/xtreemfs/foundation/json/JSONInput.java new file mode 100644 index 0000000000000000000000000000000000000000..9e96509c8d489811ac11f19bcd1fe6304f77caa1 --- /dev/null +++ b/servers/src/org/xtreemfs/foundation/json/JSONInput.java @@ -0,0 +1,63 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin and + Barcelona Supercomputing Center - Centro Nacional de Supercomputacion. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB), Jesús Malo (BSC) + */ + +package org.xtreemfs.foundation.json; + +/** + * + * @author bjko + */ +public interface JSONInput { + + + /** + * reads a single char + * + * @return the character at the current position is returned + */ + public char read() throws JSONException; + + /** + * It checks if there are more characters + */ + public boolean hasMore(); + + /** + * Skips skip characters + * + * @param skip + * num characters to skip + * @return the number of characters skipped + */ + public int skip(int skip); + + /** + * Get a string representation. + * + * @return A string representation of the this JSONString. + */ + public String toString(); + +} diff --git a/servers/src/org/xtreemfs/foundation/json/JSONParser.java b/servers/src/org/xtreemfs/foundation/json/JSONParser.java new file mode 100644 index 0000000000000000000000000000000000000000..7e1fe23f88f6738df78e603666cfceb855c037fa --- /dev/null +++ b/servers/src/org/xtreemfs/foundation/json/JSONParser.java @@ -0,0 +1,344 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin and + Barcelona Supercomputing Center - Centro Nacional de Supercomputacion. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB), Jesús Malo (BSC) + */ + +package org.xtreemfs.foundation.json; + +import java.math.BigDecimal; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; + +/** + * JSON Parser routines. This parser accepts any value as top level element, not + * just an object or array. + * + * @author bjko + */ +public class JSONParser { + + /** + * Creates a new instance of JSONParser + */ + public JSONParser() { + } + + private static String parseString(JSONInput input) throws JSONException { + + boolean nonEscaped = true; + StringBuilder str = new StringBuilder(); + + while (input.hasMore()) { + char ch = input.read(); + + if (nonEscaped) { + if (ch == '\\') { + nonEscaped = false; + continue; + } + else if (ch == '"') { + return str.toString(); + } + else { + str.append(ch); + } + } + else { + if (ch == 'n') { + str.append('\n'); + } else if (ch == 'r') { + str.append('\r'); + } else if (ch == 't') { + str.append('\t'); + } else { + str.append(ch); + } + } + + nonEscaped = true; + } + + throw new JSONException("[ E | JSONParser ] Unexpected end while parsing string"); + + } + + private static Object parseNumber(JSONInput input) throws JSONException { + + StringBuilder str = new StringBuilder(); + input.skip(-1); + + boolean isFP = false; + + while (input.hasMore()) { + char ch = input.read(); + + if ((ch == '-') || (ch >= '0') && (ch <= '9')) { + str.append(ch); + } else if ((ch == '.') || (ch == 'E') || (ch == 'e')) { + str.append(ch); + isFP = true; + } else { + input.skip(-1); + if (isFP) + return new BigDecimal(str.toString()); + else + return Long.valueOf(str.toString()); + } + } + + if (isFP) + return new BigDecimal(str.toString()); + else + return Long.valueOf(str.toString()); + } + + private static Object parseArray(JSONInput input) throws JSONException { + LinkedList arr = new LinkedList(); + + while (input.hasMore()) { + char ch = input.read(); + + if (ch == ']') { + return arr; + } else if (ch == ',') { + arr.add(parseJSON(input)); + } else if ((ch == ' ') || (ch == '\t')) { + continue; + } else { + input.skip(-1); + arr.add(parseJSON(input)); + } + } + + throw new JSONException("[ E | JSONParser ] Unexpected end while parsing array"); + } + + private static Object parseObject(JSONInput input) throws JSONException { + + HashMap map = new HashMap(); + while (input.hasMore()) { + char ch = input.read(); + + if (ch == '}') { + return map; + } + // skip all ws + if ((ch == ' ') || (ch == '\t')) { + continue; + } + + String name = parseString(input); + ch = input.read(); + + while ((ch == ' ') || (ch == '\t')) { + ch = input.read(); + } + + if (ch != ':') { + throw new JSONException("[ E | JSONParser ] Unexpected token '" + + ((char) ch) + "' or EOF. Expected : in Object."); + } + + while ((ch == ' ') || (ch == '\t')) { + ch = input.read(); + } + + Object value = parseJSON(input); + map.put(name, value); + ch = input.read(); + + while ((ch == ' ') || (ch == '\t')) { + ch = input.read(); + } + + if (ch == '}') { + return map; + } + if (ch != ',') { + throw new JSONException("[ E | JSONParser ] Unexpected token '" + + ((char) ch) + "' or EOF. Expected , or } in Object."); + } + } + + throw new JSONException("[ E | JSONParser ] Unexpected end while parsing object"); + } + + /** + * Parses a JSON message. + * + * @return the objects encoded in input. + * @attention This routine may cause a StackOverflow exception when parsing + * incorrect, very deep or maliciously malformed JSON messages. + * @param input + * the JSON string + * @throws org.xtreemos.wp34.mrc.utils.JSONException + * if input is not valid JSON + */ + public static Object parseJSON(JSONInput input) throws JSONException { + + while (input.hasMore()) { + char ch = input.read(); + + if (ch == '[') { + return parseArray(input); + } else if (ch == '{') { + return parseObject(input); + } else if (ch == '"') { + return parseString(input); + } else if ((ch == '-') || ((ch >= '0') && (ch <= '9'))) { + return parseNumber(input); + } else if (ch == 't') { + input.skip(3); + return Boolean.valueOf(true); + } else if (ch == 'f') { + input.skip(4); + return Boolean.valueOf(false); + } else if (ch == 'n') { + input.skip(3); + return null; + } else if ((ch == ' ') || (ch == '\t')) { + continue; + } else { + throw new JSONException("[ E | JSONParser ] Unexpected token '" + + ((char) ch) + "' expected Object, Array or Value."); + } + } + + throw new JSONException("[ E | JSONParser ] Unexpected end while parsing root element"); + } + + /** + * Creates a JSON encoded message from an object. Can handle Boolean, + * Integer, Long, BigDecimal, List and Map. + * + * @param input + * object to encode, objects can be nested. + * @return a JSON encoded message + * @throws org.xtreemos.wp34.mrc.utils.JSONException + * if there are one or more objects it cannot encode + */ + public static String writeJSON(Object input) throws JSONException { + return writeJSON(input,new StringBuilder()).toString(); + } + + /** + * Creates a JSON encoded message from an object. Can handle Boolean, + * Integer, Long, BigDecimal, List and Map. + * + * @param input + * object to encode, objects can be nested. + * @return a JSON encoded message + * @throws org.xtreemos.wp34.mrc.utils.JSONException + * if there are one or more objects it cannot encode + */ + public static StringBuilder writeJSON(Object input, StringBuilder result) throws JSONException { + + if (input == null) { + return result.append("null"); + } else if (input instanceof Boolean) { + return result.append(((Boolean) input).booleanValue() ? "true" : "false"); + } else if (input instanceof BigDecimal) { + return result.append(((BigDecimal) input).toString()); + } else if (input instanceof Integer) { + return result.append((Integer) input); + } else if (input instanceof Long) { + return result.append((Long) input); + } else if (input instanceof String) { + return writeJSONString(input,result); + } else if (input instanceof Map) { + return writeJSONObject(input,result); + } else if (input instanceof Collection) { + return writeJSONArray(input,result); + } else { + throw new JSONException( + "[ E | JSONParser ] Unexpected Object type: " + + input.getClass().getName()); + } + } + + private static StringBuilder writeJSONObject(Object input, StringBuilder result) throws JSONException { + Map map = (Map) input; + result.append("{"); + int i = 1; + for (Object key : map.keySet()) { + writeJSONString(key.toString(),result); + result.append(":"); + writeJSON(map.get(key),result); + if (i < map.size()) + result.append(","); + i++; + } + return result.append("}"); + } + + private static StringBuilder writeJSONArray(Object input, StringBuilder result) throws JSONException { + Collection arr = (Collection) input; + result.append("["); + int i = 1; + for (Object obj : arr) { + writeJSON(obj,result); + if (i < arr.size()) + result.append(","); + i++; + } + return result.append("]"); + } + + private static StringBuilder writeJSONString(Object input, StringBuilder result) { + /* + * This is 10 times faster than using str.replace + */ + final String str = input.toString(); + result.append("\""); + for (int i = 0; i < str.length(); i++) { + char ch = str.charAt(i); + + switch (ch) { + case '\n' : result.append("\\n"); break; + case '\r' : result.append("\\r"); break; + case '\t' : result.append("\\t"); break; + case '"' : result.append("\\\""); break; + case '\\' : result.append("\\\\"); break; + case '/' : result.append("\\/"); break; + default: result.append(ch); + } + } + result.append("\""); + return result; + } + + + public static String toJSON(Object... args) throws JSONException { + List argList = new ArrayList(args.length); + for (Object arg : args) + argList.add(arg); + + return writeJSON(argList); + } + +} diff --git a/servers/src/org/xtreemfs/foundation/json/JSONString.java b/servers/src/org/xtreemfs/foundation/json/JSONString.java new file mode 100644 index 0000000000000000000000000000000000000000..8006e2ef0d7de8a18398875bd74db5bd1e11e297 --- /dev/null +++ b/servers/src/org/xtreemfs/foundation/json/JSONString.java @@ -0,0 +1,126 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin and + Barcelona Supercomputing Center - Centro Nacional de Supercomputacion. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB), Jesús Malo (BSC) + */ + +package org.xtreemfs.foundation.json; + +/** + * This class is necessary because the StringReader cannot skip back once the + * end is reached. + * + * @author bjko + */ +public class JSONString implements JSONInput { + + String str; + + int position; + + /** + * Creates a new instance of JSONString + * + * @param str + * the JSON message + */ + public JSONString(String str) { + this.str = str; + position = 0; + } + + /** + * reads a single char + * + * @return the character at the current position is returned + */ + public char read() throws JSONException { + try { + return str.charAt(position++); + } + catch (StringIndexOutOfBoundsException ex) { + throw new JSONException("Reach the end of the string"); + } + } + + /** + */ + public boolean hasMore() { + return position < str.length(); + } + + + /** + * Skips skip characters + * + * @param skip + * num characters to skip + * @return the number of characters skipped + */ + public int skip(int skip) { + if (((position + skip) < 0) || ((position + skip) >= str.length())) { + return 0; + } else { + position = position + skip; + return skip; + } + } + + /** + * Get a string representation. + * + * @return A string representation of the this JSONString. + */ + public String toString() { + return "JSONString pos=" + position + " str=" + str; + } + + /** + * @author Jesús Malo (jmalo) + */ + public boolean equals(Object obj) { + if(this == obj) return true; + if((obj == null) || (obj.getClass() != this.getClass())) return false; + + JSONString other = (JSONString) obj; + + String Iam = toString(); + String ItIs = other.toString(); + + return Iam.equals(ItIs); + } + + /** + * @author Jesús Malo (jmalo) + */ + public int hashCode() { + return toString().hashCode(); + } + + /** It provides the String of the JSONString + * @author Jesús Malo (jmalo) + * @return The JSONString of the object + */ + public String asString() { + return str; + } +} diff --git a/servers/src/org/xtreemfs/foundation/pinky/ConnectionRemover.java b/servers/src/org/xtreemfs/foundation/pinky/ConnectionRemover.java new file mode 100644 index 0000000000000000000000000000000000000000..386360e2639eec5bd1295af48416c7ff2d243b69 --- /dev/null +++ b/servers/src/org/xtreemfs/foundation/pinky/ConnectionRemover.java @@ -0,0 +1,112 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB), Christian Lorenz (ZIB) + */ + +package org.xtreemfs.foundation.pinky; + +import java.io.IOException; +import java.nio.channels.Selector; +import java.util.Iterator; +import java.util.Queue; +import java.util.concurrent.atomic.AtomicInteger; +import org.xtreemfs.common.logging.Logging; + +/** + * Removes lingering connections. + * + * @author bjko + */ +public class ConnectionRemover extends Thread { + + private final Queue connections; + + private final AtomicInteger counter; + + int cleanupInterval; + + boolean quit; + + private final Selector selector; + + /** + * Creates a new instance of ConnectionRemover + * + * @param connections + * the list of all active connections + * @param interval + * time between runs + */ + public ConnectionRemover(Queue connections, AtomicInteger counter, Selector selector, int interval) { + super("Pinky Connection Remover"); + this.connections = connections; + this.cleanupInterval = interval; + this.quit = false; + this.counter = counter; + this.selector = selector; + } + + /** + * Shuts the thread down. + */ + public void quitThread() { + this.quit = true; + this.interrupt(); + } + + /** + * Main loop. + */ + public void run() { + while (!quit) { + Iterator conIt = connections.iterator(); + while (conIt.hasNext()) { + ConnectionState con = conIt.next(); + if (!con.active.get() && (con.pipeline.isEmpty())) { + if (con.channel.isOpen()) { + try { + if (con.channel.shutdown(con.channel.keyFor(selector))) { + counter.decrementAndGet(); + con.channel.close(); + con.active.set(false); + con.freeBuffers(); + //con.requestHeaders = null; + Logging.logMessage(Logging.LEVEL_DEBUG, this, "connection to " + con.channel.socket().getRemoteSocketAddress() + " closed"); + + conIt.remove(); + } + } catch (IOException e) { + Logging.logMessage(Logging.LEVEL_ERROR, this, e); + } + } + } else { + con.active.set(false); + } + } + try { + this.sleep(cleanupInterval); + } catch (InterruptedException ex) { + } + } + Logging.logMessage(Logging.LEVEL_DEBUG, this, "shutdown complete"); + } +} diff --git a/servers/src/org/xtreemfs/foundation/pinky/ConnectionState.java b/servers/src/org/xtreemfs/foundation/pinky/ConnectionState.java new file mode 100644 index 0000000000000000000000000000000000000000..24270bd8dbe70c77bb99b4169eb6da035dc475af --- /dev/null +++ b/servers/src/org/xtreemfs/foundation/pinky/ConnectionState.java @@ -0,0 +1,408 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB), Christian Lorenz (ZIB) + */ + +package org.xtreemfs.foundation.pinky; + +import java.net.InetSocketAddress; +import java.nio.ByteBuffer; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.pinky.channels.ChannelIO; + +/** + * ontains a buffer and an active marker for each connection. Includes also the + * parsing routine and state machine state. + * + * @author bjko + */ +public class ConnectionState { + + /** + * Size of input buffer + */ + public static final int BUFFSIZE = 1024 * 64; + + + /** + * Maximum body size to accept. + */ + public static final int MAX_BODY_SIZE = 1024 * 1024 * 10; + + /** + * Maximum header size + */ + public static final int MAX_HDR_SIZE = 4096; + + /** + * Initial Size of buffer for headers + */ + public static final int INIT_HDR_BUF = 128; + + /** + * Buffer holding the incomming data + */ + public final ReusableBuffer data; + + /** + * The channel associated w/ this connection + */ + public final ChannelIO channel; + + /** + * will be set to false by a periodical clean up task. Inactive connections + * (timed out) will be closed and removed. + */ + public final AtomicBoolean active; + + /** + * Buffer for receiving the headers. + */ + public StringBuilder requestHeaders; + + /** + * The data of the request that is currently being parsed + * may be incomplete while parsing + */ + private PinkyRequest currentRq; + + /** + * The request that should be sent to the client. + */ + public PinkyRequest toSend; + + public long remainingBytes = 0; + + public ByteBuffer sendData[]; + + /** + * Status of the parser state machine if connection is idle + */ + public static final int STATUS_IDLE = 0; + + /** + * Status of the parser state machine if reading the headers + */ + public static final int STATUS_READ_HEADERS = 1; + + /** + * Status of the parser state machine after receiving a CRLF + */ + public static final int STATUS_CRLF = 5; + + /** + * Status of the parser state machine if request was parsed + */ + public static final int STATUS_PARSE_REQUEST = 2; + + /** + * Status of the parser state machine while reading the body + */ + public static final int STATUS_READ_BODY = 3; + + /** + * Current status of the request parser state machine + */ + private int status; + + /** + * The request pipeline for that connection. + */ + public final List pipeline; + + private boolean closed; + + private int length; + + /** + * set to a non-null value when the channel has been authenticated with a + * HTTP Digest Authentication + */ + public String userName; + + /** + * used to store the nounce used in HTTP digest authentication + */ + public String httpDigestNounce; + + + + + /** + * Creates a new instance of ConnectionStatus + * + * @param channel + * the channel to which this state object belongs. + */ + public ConnectionState(ChannelIO channel) { + + closed = false; + + active = new AtomicBoolean(true); + + this.channel = channel; + + //data = ByteBuffer.allocateDirect(BUFFSIZE); + data = BufferPool.allocate(BUFFSIZE); + + this.status = STATUS_IDLE; + + toSend = null; + + pipeline = new LinkedList(); + + } + + /** + * This is the main parsing method. It parses the available data in the + * buffer. + * + * @return a list of requests or null if there is no request available + */ + public PinkyRequest processBuffer() { + + // loop until data is empty + while (data.hasRemaining()) { + + switch (this.status) { + case STATUS_IDLE: { + // prepare request + this.requestHeaders = new StringBuilder(INIT_HDR_BUF); + this.status = STATUS_READ_HEADERS; + this.currentRq = new PinkyRequest(); + // TRANSITION + break; + } + + case STATUS_READ_HEADERS: { + char ch = (char) (data.get() & 0xFF); + if (ch == '\n') { + // TRANSITION + this.requestHeaders.append(ch); + this.status = STATUS_CRLF; + } else if (ch != '\r') { + // ignore \r s + this.requestHeaders.append(ch); + // check for overflows... + if (this.requestHeaders.length() >= MAX_HDR_SIZE) { + PinkyRequest rq = new PinkyRequest(HTTPUtils.SC_BAD_REQUEST); + rq.setResponse(HTTPUtils.SC_BAD_REQUEST); + rq.register(this); + rq.setClose(true); + rq.responseSet = true; + Logging.logMessage(Logging.LEVEL_DEBUG,this,"Bad Request: Close channel for " + + this.channel.socket() + .getRemoteSocketAddress()+", max header size exceeded"); + Logging.logMessage(Logging.LEVEL_DEBUG, this, "header parsed so far: " + requestHeaders); + this.data.limit(0); + return rq; + } + } + break; + } + + case STATUS_CRLF: { + char ch = (char) (data.get() & 0xFF); + if (ch == '\r') { + // IGNORE \r + continue; + } + if (ch != '\n') { + // TRANSITION + this.requestHeaders.append(ch); + this.status = STATUS_READ_HEADERS; + continue; + } + // if a second \n comes, headers are done + // TRANSITION + this.status = STATUS_PARSE_REQUEST; + //no break here! fallthroug is required for requests w/ + //empty body. + } + + case STATUS_PARSE_REQUEST: { + // if there is a content length field, try to read the body + int nextNL = this.requestHeaders.indexOf("\n"); + int cPos = 0; + String ftLine = null; + //int length = 0; + + while (nextNL != -1) { + + String line = this.requestHeaders.substring(cPos, nextNL); + cPos = nextNL + 1; + nextNL = this.requestHeaders.indexOf("\n", cPos); + + if (ftLine == null) + ftLine = line; + else { + this.currentRq.requestHeaders.addHeader(line); + } + } + + //check some important headers + String cLength = this.currentRq.requestHeaders.getHeader(HTTPHeaders.HDR_CONTENT_LENGTH); + if (cLength != null) { + try { + length = Integer.valueOf(cLength); + if (length > MAX_BODY_SIZE) + throw new RuntimeException("Too Long"); + } catch (Exception ex) { + // no transition because con is closed anyway... + PinkyRequest rq = new PinkyRequest( + HTTPUtils.SC_BAD_REQUEST); + rq.setResponse(HTTPUtils.SC_BAD_REQUEST); + rq.register(this); + rq.setClose(true); + rq.responseSet = true; + Logging.logMessage(Logging.LEVEL_DEBUG,this,"Bad Request: Close channel for " + + this.channel.socket().getRemoteSocketAddress()+", Content-Length no integer: "+cLength+"/"+ex); + this.data.limit(0); + return rq; + } + } else { + //no body + length = 0; + } + + String conClose = this.currentRq.requestHeaders.getHeader("Connection"); + if (conClose != null) { + if (conClose.equalsIgnoreCase("close")) { + this.currentRq.closeConnection = true; + } + } + + if (ftLine == null) { + // this is an empty request...ignore it + // TRANSITION + this.status = STATUS_IDLE; + continue; + } + + //parse request line + int firstSpace = ftLine.indexOf(' '); + int lastSpace = ftLine.lastIndexOf(' '); + if ((firstSpace == -1) || (firstSpace == lastSpace)) { + PinkyRequest rq = new PinkyRequest( + HTTPUtils.SC_BAD_REQUEST); + rq.setResponse(HTTPUtils.SC_BAD_REQUEST); + rq.register(this); + rq.setClose(true); + rq.responseSet = true; + Logging.logMessage(Logging.LEVEL_DEBUG,this,"Bad Request (malformed request line): Close channel for " + + this.channel.socket().getRemoteSocketAddress()+", not a valid request line: "+ftLine); + this.data.limit(0); + return rq; + } else { + this.currentRq.requestMethod = ftLine.substring(0, firstSpace); + this.currentRq.requestURI = ftLine.substring(firstSpace+1, lastSpace); + } + //IGNORE the HTTP/1.1 for now + + if ( !this.currentRq.requestMethod.equals("GET") && + !this.currentRq.requestMethod.equals("PUT") && + !this.currentRq.requestMethod.equals("POST") && + !this.currentRq.requestMethod.equals("DELETE") && + !this.currentRq.requestMethod.equals("HEAD") ) { + //bad request + // flush the entire buffer! + // no transition because con is closed anyway... + PinkyRequest rq = new PinkyRequest(HTTPUtils.SC_NOT_IMPLEMENTED); + rq.setResponse(HTTPUtils.SC_NOT_IMPLEMENTED); + rq.register(this); + rq.setClose(true); + rq.responseSet = true; + Logging.logMessage(Logging.LEVEL_DEBUG,this,"Not Implemented: Close channel for " + + this.channel.socket() + .getRemoteSocketAddress()+" line is "+ftLine); + this.data.limit(0); + return rq; + } + + if (length > 0) { + // TRANISTION + this.status = STATUS_READ_BODY; + this.currentRq.requestBody = BufferPool.allocate(length); + } else { + // TRANSITION + this.status = STATUS_IDLE; + //String hdrs = this.requestHeaders.toString(); + //set to null to allow GC + this.requestHeaders = null; + + PinkyRequest tmp = currentRq; + currentRq = null; + tmp.register(this); + return tmp; + } + break; + } + + case STATUS_READ_BODY: { + // we assume the body to be raw data + if (data.remaining() <= this.currentRq.requestBody.remaining()) { + this.currentRq.requestBody.put(data); + } else { + int oldLimit = data.limit(); + data.limit(data.position()+this.currentRq.requestBody.remaining()); + assert(oldLimit > data.limit()); + this.currentRq.requestBody.put(data); + data.limit(oldLimit); + } + if (!this.currentRq.requestBody.hasRemaining()) { + // TRANSITION + this.status = STATUS_IDLE; + //String hdrs = this.requestHeaders.toString(); + this.requestHeaders = null; + PinkyRequest rq = currentRq; + currentRq = null; + rq.register(this); + return rq; + } + break; + } + + default : { + Logging.logMessage(Logging.LEVEL_ERROR,this,"Programmatic ERROR!"); + return null; + } + } + } + return null; + } + + public void freeBuffers() { + if (this.closed) + return; + BufferPool.free(this.data); + for (PinkyRequest rq : this.pipeline) { + rq.freeBuffer(); + } + this.closed = true; + } + +} diff --git a/servers/src/org/xtreemfs/foundation/pinky/HTTPHeaders.java b/servers/src/org/xtreemfs/foundation/pinky/HTTPHeaders.java new file mode 100644 index 0000000000000000000000000000000000000000..413cd6ff5680f24db360225f2af8dfdb925c8c58 --- /dev/null +++ b/servers/src/org/xtreemfs/foundation/pinky/HTTPHeaders.java @@ -0,0 +1,240 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB), Christian Lorenz (ZIB) + */ + +package org.xtreemfs.foundation.pinky; + +import java.util.ArrayList; +import java.util.Iterator; + +/** + * Simple class to encapsulate HTTP headers. + * This class is not thread safe + * @author bjko + */ +public class HTTPHeaders implements Iterable { + + /** Number of slots for headers to allocate initially + */ + private static final int INIT_NUM_HDRS = 10; + + /** HTTP header for content (body) type + */ + public static final String HDR_CONTENT_TYPE = "Content-Type"; + + /** HTTP header for content (body) length + */ + public static final String HDR_CONTENT_LENGTH = "Content-Length"; + + /** HTTP header for content range + */ + public static final String HDR_CONTENT_RANGE = "Content-Range"; + + /** HTTP header for authorization + */ + public static final String HDR_AUTHORIZATION = "Authorization"; + + /** HTTP header to request authentication from client + */ + public static final String HDR_WWWAUTH = "WWW-Authenticate"; + + /** (non standard) HTTP header for sending replica locations + */ + public static final String HDR_XLOCATIONS = "X-Locations"; + + /** (non standard) HTTP header containing the capability issued by the MRC + */ + public static final String HDR_XCAPABILITY = "X-Capability"; + + /** (non standard) HTTP header for communicating file size updates to the MRC + */ + public static final String HDR_XNEWFILESIZE = "X-New-File-Size"; + + /** (non standard) HTTP header containing the object number + */ + public static final String HDR_XOBJECTNUMBER = "X-Object-Number"; + + public static final String HDR_XVERSIONNUMBER = "X-Version-Number"; + + /** (non standard) HTTP header for sending the target location for a file + * replication request + */ + public static final String HDR_XTARGETLOCATION = "X-Target-Location"; + + public static final String HDR_XINVALIDCHECKSUM = "X-Invalid-Checksum"; + + /** (non standard) HTTP header indicating what locations will be excluded + * @deprecated + */ + public static final String HDR_XEXCLUDEDLOCATION = "X-Excluded-Location"; + + /** (non standard) HTTP header indicating a fileID + */ + public static final String HDR_XFILEID = "X-FileID"; + + public static final String HDR_XLEASETO = "X-Lease-Timeout"; + + public static final String HDR_XREQUESTID = "X-Request-Id"; + + + /** HTTP header for location + */ + public static final String HDR_LOCATION = "Location"; + + /** list of headers + */ + private ArrayList hdrs; + + /** + * Creates a new instance of HTTPHeaders + */ + public HTTPHeaders() { + hdrs = new ArrayList(INIT_NUM_HDRS); + } + + /** adds an entry to the header list + */ + public void addHeader(String name, String value) { + hdrs.add(new HeaderEntry(name,value)); + } + + /** adds an entry to the header list + */ + public void addHeader(String name, int value) { + hdrs.add(new HeaderEntry(name,Integer.toString(value))); + } + + /** parses 'headerName: headerValue' strings. + */ + public void addHeader(String line) { + int colonPos = line.indexOf(':'); + if (colonPos > 0) { + + String name = line.substring(0,colonPos).trim(); + String value = line.substring(colonPos+1).trim(); + hdrs.add(new HeaderEntry(name,value)); + } + } + + /** sets a header value or adds that header if it was not in the list before + */ + public void setHeader(String name, String value) { + for (HeaderEntry he : hdrs) { + if (he.name.equalsIgnoreCase(name)) { + he.value = value; + return; + } + } + hdrs.add(new HeaderEntry(name,value)); + } + + /** It gets a header's value + * @param name Header's identifier + * @return The value of the header or null if the header doesn't exist in the object. + */ + public String getHeader(String name) { + for (HeaderEntry he : hdrs) { + if (he.name.equalsIgnoreCase(name)) { + return he.value; + } + } + return null; + } + + /** converts all header in the list into a + * HTTP conform header string + */ + public String toString() { + StringBuilder sb = new StringBuilder(); + this.append(sb); + return sb.toString(); + } + + public void append(StringBuilder sb) { + for (HeaderEntry he : hdrs) { + sb.append(he.name); + sb.append(": "); + sb.append(he.value); + sb.append(HTTPUtils.CRLF); + } + } + + public void parse(String headers) { + int nextNL = headers.indexOf("\n"); + int cPos = 0; + + while (nextNL != -1) { + + String line = headers.substring(cPos, nextNL); + cPos = nextNL + 1; + nextNL = headers.indexOf("\n", cPos); + + this.addHeader(line); + } + } + + public void copyFrom(HTTPHeaders other) { + for (HeaderEntry hdr : other.hdrs) { + this.setHeader(hdr.name, hdr.value); + } + } + + public int getSizeInBytes() { + int numBytes = 0; + for (HeaderEntry he : hdrs) { + numBytes += he.name.length()+2+he.value.length()+HTTPUtils.CRLF.length(); + } + return numBytes; + } + + /** simple class to store header name and value + */ + public static final class HeaderEntry { + public String name; + public String value; + public HeaderEntry(String name, String value) { + this.name = name; + this.value = value; + } + } + + public Iterator iterator() { + return new Iterator() { + + int position = 0; + + public boolean hasNext() { + return position < hdrs.size(); + } + + public HeaderEntry next() { + return hdrs.get(position++); + } + + public void remove() { + throw new UnsupportedOperationException("Not supported yet."); + } + }; + } + +} diff --git a/servers/src/org/xtreemfs/foundation/pinky/HTTPUtils.java b/servers/src/org/xtreemfs/foundation/pinky/HTTPUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..c22d80f3a15abbfb85132a6dd10cb64dc6f5a0ca --- /dev/null +++ b/servers/src/org/xtreemfs/foundation/pinky/HTTPUtils.java @@ -0,0 +1,501 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB), Christian Lorenz (ZIB) + */ + +package org.xtreemfs.foundation.pinky; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.SocketChannel; +import java.nio.charset.Charset; + +/** + * Utilities for assembling HTTP messages. + * + * @author bjko + */ +public class HTTPUtils { + + /** + * UTF8 encoding, used for JSON messages + */ + public static final Charset ENC_UTF8 = Charset.forName("utf-8"); + + /** + * ASCII encoding, used for headers + */ + public static final Charset ENC_ASCII = Charset.forName("ascii"); + + /** + * HTTP Protocol version. Always + * + *
+     *
+     * HTTP/1.1
+     *
+     * 
+ */ + public static final String HTTP_VER = "HTTP/1.1"; + + /** + * Space character + */ + public static final char SP = ' '; + + /** + * Newline in HTTP. + */ + public static final String CRLF = "\r\n"; + + /** + * Content type for JSON content. + */ + public static final String JSON_TYPE = "text/plain; charset=UTF-8"; + + /** + * Default encoding for textual data (like JSON). + */ + public static final String JSON_ENCODING = HTTPHeaders.HDR_CONTENT_TYPE + ": " + + JSON_TYPE; + + /** + * Content type for HTML content. + */ + public static final String HTML_TYPE = "text/html; charset=UTF-8"; + + /** + * Default encoding for textual data (like JSON). + */ + public static final String HTML_ENCODING = HTTPHeaders.HDR_CONTENT_TYPE + ": " + + HTML_TYPE; + + /** + * Content type for binary content. + */ + public static final String BIN_TYPE = "application/octet-stream"; + + /** + * Encoding for binary data. + */ + public static final String BIN_ENCODING = HTTPHeaders.HDR_CONTENT_TYPE + ": " + BIN_TYPE; + + /** + * Status code for success. + */ + public static final int SC_OKAY = 200; + + /** + * Status code for see other (redirect) + */ + public static final int SC_SEE_OTHER = 303; + + /** + * Status code for bad requests. + */ + public static final int SC_BAD_REQUEST = 400; + + /** + * Status code for 404. + */ + public static final int SC_NOT_FOUND = 404; + + /** + * Status code returned if a procedure threw a user exception (i.e. not a + * server error). This is NOT standard HTTP/1.1 but custom codes are + * possibel according to the standard. + */ + public static final int SC_USER_EXCEPTION = 420; + + /** + * Status code for internal server errors. + */ + public static final int SC_SERVER_ERROR = 500; + + /** + * Status code for methods not implemented in this server. These are nearly + * all methods ;-) + */ + public static final int SC_NOT_IMPLEMENTED = 501; + + /** + * Status code for service unavailable. + */ + public static final int SC_SERV_UNAVAIL = 503; + + public static final int SC_UNAUTHORIZED = 401; + + /** + * Content type + */ + public static enum DATA_TYPE { + /** + * for binary data + */ + BINARY(BIN_TYPE), + /** + * for JSON messages + */ + JSON(JSON_TYPE), + /** + * for HTML messages + */ + HTML(HTML_TYPE); + + private String name; + DATA_TYPE(String name) { + this.name = name; + } + @Override + public String toString() { + return name; + } + }; + + // Tokens of the HTTP methods + /** + * Token of the GET method + */ + public static final String GET_TOKEN = "GET"; + + /** + * Token of the PUT method + */ + public static final String PUT_TOKEN = "PUT"; + + /** + * Token of the POST method + */ + public static final String POST_TOKEN = "POST"; + + /** + * Token of the DELETE method + */ + public static final String DELETE_TOKEN = "DELETE"; + + /** + * Generate the textual message for a status code. + * + * @param statusCode + * status code + * @return The String representing the status code. + */ + public static String getReasonPhrase(int statusCode) { + switch (statusCode) { + case SC_OKAY: + return "OK"; + case SC_SEE_OTHER: + return "See Other"; + case SC_BAD_REQUEST: + return "Bad Request"; + case SC_NOT_FOUND: + return "Not Found"; + case SC_SERVER_ERROR: + return "Internal Server Error"; + case SC_NOT_IMPLEMENTED: + return "Not Implemented"; + default: + return ""; + } + } + + /** + * Sends a response to a client. + * + * @param conn + * client + * @param statusCode + * code to send + * @param body + * the message body to send to the client + * @param close + * if true the connection will be closed afterwards + * @throws java.io.IOException + * passes all IOException from underlying IO primitives + */ + public static void sendResponse(SocketChannel conn, int statusCode, String body, boolean close) + throws IOException { + sendResponse(conn, statusCode, body.getBytes(ENC_UTF8), close); + } + + /** + * Sends a response to a client. + * + * @param conn + * client + * @param statusCode + * code to send + * @param body + * the message body to send to the client + * @param close + * if true the connection will be closed afterwards + * @throws java.io.IOException + * passes all IOException from underlying IO primitives + */ + public static void sendResponse(SocketChannel conn, int statusCode, byte[] body, boolean close) + throws IOException { + String hdr = HTTP_VER + SP + statusCode + SP + getReasonPhrase(statusCode) + CRLF; + hdr += JSON_ENCODING + CRLF; + + // hdr += "Cache-Control: no-cache"+CRLF; + if (body != null) { + hdr += "Content-Length:" + SP + body.length + CRLF; + + } else + hdr += "Content-Length: 0" + CRLF; + if (close) + hdr += "Connection: close" + CRLF; + + hdr += CRLF; + + // System.out.println("RQCONTENTS "+hdr); + + if (body == null) { + conn.write(ByteBuffer.wrap(hdr.getBytes(ENC_ASCII))); + } else { + conn.write(ByteBuffer.wrap(hdr.getBytes(ENC_ASCII))); + conn.write(ByteBuffer.wrap(body)); + } + } + + /** + * Sends a response to a client. + * + * @param bdyLen + * length of body data + * @param type + * contents + * @param conn + * client + * @param statusCode + * code to send + * @throws java.io.IOException + * passes all IOException from underlying IO primitives + */ + public static void sendHeaders(SocketChannel conn, int statusCode, long bdyLen, DATA_TYPE type) + throws IOException { + String hdr = HTTP_VER + SP + statusCode + SP + getReasonPhrase(statusCode) + CRLF; + switch (type) { + case JSON: + hdr += JSON_ENCODING + CRLF; + break; + case HTML: + hdr += HTML_ENCODING + CRLF; + break; + default: + hdr += BIN_ENCODING + CRLF; + } + + // hdr += "Cache-Control: no-cache"+CRLF; + hdr += "Content-Length:" + SP + bdyLen + CRLF; + + hdr += CRLF; + // System.out.println("RQCONTENTS "+hdr); + conn.write(ByteBuffer.wrap(hdr.getBytes(ENC_ASCII))); + + } + + /** + * Sends a response to a client. + * + * @param bdyLen + * length of data + * @param type + * content type + * @param statusCode + * code to send + * @return the headers to send + */ + public static byte[] getHeaders(int statusCode, long bdyLen, DATA_TYPE type) { + String hdr = HTTP_VER + SP + statusCode + SP + getReasonPhrase(statusCode) + CRLF; + switch (type) { + case JSON: + hdr += JSON_ENCODING + CRLF; + break; + case HTML: + hdr += HTML_ENCODING + CRLF; + break; + default: + hdr += BIN_ENCODING + CRLF; + } + + // hdr += "Cache-Control: no-cache"+CRLF; + hdr += "Content-Length:" + SP + bdyLen + CRLF; + + // hdr += "X-DEBUG-RQID: "+rqID.getAndIncrement()+CRLF; + + hdr += CRLF; + // System.out.println("RQCONTENTS "+hdr); + return hdr.getBytes(ENC_ASCII); + + } + + /** + * Sends a response to a client. + * + * @param bdyLen + * length of data + * @param type + * content type + * @param statusCode + * code to send + * @return the headers to send + */ + public static byte[] getHeaders(int statusCode, long bdyLen, DATA_TYPE type, + String additionalHeaders) { + String hdr = HTTP_VER + SP + statusCode + SP + getReasonPhrase(statusCode) + CRLF; + switch (type) { + case JSON: + hdr += JSON_ENCODING + CRLF; + break; + case HTML: + hdr += HTML_ENCODING + CRLF; + break; + default: + hdr += BIN_ENCODING + CRLF; + } + + // hdr += "Cache-Control: no-cache"+CRLF; + hdr += "Content-Length:" + SP + bdyLen + CRLF; + + // hdr += "X-DEBUG-RQID: "+rqID.getAndIncrement()+CRLF; + + if (additionalHeaders != null) + hdr += additionalHeaders; + + hdr += CRLF; + // System.out.println("RQCONTENTS "+hdr); + return hdr.getBytes(ENC_ASCII); + + } + + /** + * Create a request. + * + * @param method + * HTTP method, + * + *
+     *
+     * GET
+     *
+     * 
+ * + * or + * + *
+     *
+     * PUT
+     *
+     * 
+ * + * @param URI + * the uri to request + * @param byteRange + * the range to request, or null + * @param bdyLen + * length of request body + * @param type + * body content type + * @return the headers to send before the body + */ + public static byte[] getRequest(String method, String URI, String byteRange, String authString, + long bdyLen, DATA_TYPE type, HTTPHeaders aHdrs) { + String hdr = method + SP + URI + SP + HTTP_VER + CRLF; + switch (type) { + case JSON: + hdr += JSON_ENCODING + CRLF; + break; + case HTML: + hdr += HTML_ENCODING + CRLF; + break; + default: + hdr += BIN_ENCODING + CRLF; + } + + // hdr += "Cache-Control: no-cache"+CRLF; + // hdr += "Host: localhost"+CRLF; + hdr += HTTPHeaders.HDR_CONTENT_LENGTH + ":" + SP + bdyLen + CRLF; + if (authString != null) + hdr += HTTPHeaders.HDR_AUTHORIZATION + ":" + SP + authString + CRLF; + if (byteRange != null) { + hdr += HTTPHeaders.HDR_CONTENT_RANGE + ":" + SP + byteRange + CRLF; + } + + if (aHdrs != null) + hdr += aHdrs.toString(); + + hdr += CRLF; + // System.out.println("RQCONTENTS "+hdr); + return hdr.getBytes(ENC_ASCII); + + } + + /** + * It creates a request without body. The user must specify the required + * headers. + * + * @param method + * The HTTP method of the request + * @param URI + * URI field of the new request + * @param headers + * Headers of the request + */ + public static byte[] getRequest(String method, String URI, HTTPHeaders headers) { + String hdr = method + SP + URI + SP + HTTP_VER + CRLF; + hdr += headers.toString(); + + hdr += CRLF; + return hdr.getBytes(ENC_ASCII); + } + + /** + * @header header to look for. Must be UPPER CASE! + */ + public static boolean compareHeaderName(String header, String line) { + if (header.length() > line.length()) + return false; + + for (int i = 0; i < header.length(); i++) { + if (header.charAt(i) != Character.toUpperCase(line.charAt(i))) + return false; + } + return true; + } + + public static boolean isContentLength(String line) { + // Content-length + return (line.charAt(0) == 'C' || line.charAt(0) == 'c') + && (line.charAt(6) == 't' || line.charAt(6) == 'T') + && (line.charAt(13) == 'h' || line.charAt(13) == 'H'); + + } + + public static boolean isContentRange(String line) { + return (line.charAt(0) == 'C' || line.charAt(0) == 'c') + && (line.charAt(6) == 't' || line.charAt(6) == 'T') + && (line.charAt(12) == 'e' || line.charAt(12) == 'E'); + + } + +} diff --git a/servers/src/org/xtreemfs/foundation/pinky/PinkyInterface.java b/servers/src/org/xtreemfs/foundation/pinky/PinkyInterface.java new file mode 100644 index 0000000000000000000000000000000000000000..afcbb6ff1cbb3badeef53763c125e22d7bb547a0 --- /dev/null +++ b/servers/src/org/xtreemfs/foundation/pinky/PinkyInterface.java @@ -0,0 +1,59 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB), Christian Lorenz (ZIB) + */ + +package org.xtreemfs.foundation.pinky; + +import java.io.IOException; + +/** + * + * @author bjko + */ +public interface PinkyInterface extends Runnable { + + /** + * Sends a response to the client that sent the request. + * + * @param rq + * The request that contains response data. + * @throws java.io.IOException + * if this cannot be sent + */ + public void sendResponse(PinkyRequest rq) throws IOException; + + /** + * Registers a listener for incomming request. + * + * @param rl + * A listener. + * @attention YOU MUST REGISTER A LISTENER BEFORE USING PINKY! + */ + public void registerListener(PinkyRequestListener rl); + + /** + * Gracefully shut down all connections and the servers. + */ + public void shutdown(); + +} diff --git a/servers/src/org/xtreemfs/foundation/pinky/PinkyRequest.java b/servers/src/org/xtreemfs/foundation/pinky/PinkyRequest.java new file mode 100644 index 0000000000000000000000000000000000000000..d3b9a44d25875561c1bb02c48c724215dee5f586 --- /dev/null +++ b/servers/src/org/xtreemfs/foundation/pinky/PinkyRequest.java @@ -0,0 +1,488 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB), Christian Lorenz (ZIB) + */ + +package org.xtreemfs.foundation.pinky; + +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.SocketAddress; + +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.xtreemfs.common.Capability; +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.checksums.ChecksumFactory; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.util.OutputUtils; +import org.xtreemfs.foundation.pinky.channels.ChannelIO; + +/** + * Represents a request sent by the client and the response generated. + * + * @author bjko + */ +public class PinkyRequest { + + /** + * the URI the client requested + */ + public String requestURI; + + /** + * the request HTTP method + */ + public String requestMethod; + + /** + * the headers sent by the client + */ + public HTTPHeaders requestHeaders; + + /** + * the body sent by the client + */ + public ReusableBuffer requestBody; + + /** + * request body length (can be smaller than requestBody.capacity() because of buffer reuse) + */ + public int requestBdyLength; + + /** + * if set to true the request is + * requeued after Pinky has sent + * the byte buffer. The requestBuffer is + * not freed after sending! + */ + public boolean streaming; + + /** + * This can be used by the + * handler for streaming requests. This values + * is not modified by Pinky. + */ + public long streamPosition; + + // ----- + + public ReusableBuffer responseHeaders; + + public ReusableBuffer responseBody; + + + int statusCode; + + boolean closeConnection; + + HTTPUtils.DATA_TYPE responseType; + + // ----- + // stuff used by the pinky server + protected ConnectionState client; + + protected volatile boolean ready; + + public boolean responseSet; + + // for debugging only + public int debugRqId = 0; + long _receiveTime = -1; + + /** Creates a new instance of PinkyRequest */ + public PinkyRequest() { + this.requestURI = ""; + this.requestMethod = ""; + this.requestHeaders = new HTTPHeaders(); + this.requestBody = null; + + this.responseHeaders = null; + this.responseBody = null; + this.statusCode = HTTPUtils.SC_OKAY; + this.closeConnection = false; + this.responseType = HTTPUtils.DATA_TYPE.JSON; + + this.client = null; + this.ready = false; + this.streaming = false; + this.streamPosition = 0; + this.responseSet = false; + } + + /** + * creates a new request with just a HTTP status code + * + * @param statusCode + * HTTP status code + */ + public PinkyRequest(int statusCode) { + this(); + this.statusCode = statusCode; + } + + /** + * creates a new request. + * + * @param requestMethod + * HTTP method + * @param requestURI + * uri requested + * @param requestByteRange + * byte raneg requested + * @param requestHeaders + * headers sent by client + */ + public PinkyRequest(String requestMethod, String requestURI, + String requestByteRange, HTTPHeaders requestHeaders) { + this(); + this.requestURI = requestURI; + this.requestMethod = requestMethod; + this.requestHeaders = requestHeaders; + } + + /** + * creates a new request with a body + * + * @param requestMethod + * HTTP method + * @param requestURI + * uri requested + * @param requestByteRange + * byte raneg requested + * @param requestHeaders + * headers sent by client + * @param requestBody + * the body data sent by the client + */ + public PinkyRequest(String requestMethod, String requestURI, + String requestByteRange, HTTPHeaders requestHeaders, + ReusableBuffer requestBody) { + this(); + this.requestURI = requestURI; + this.requestMethod = requestMethod; + this.requestHeaders = requestHeaders; + this.requestBody = requestBody; + } + + /** + * Sets the request URI and the request body. + * + * WARNING: The method also frees the former request body! All references to + * the former request body become invalid. + * + * @param uri + * @param body + */ + public void setURIAndBody(String requestURI, ReusableBuffer requestBody) { + this.requestURI = requestURI; + + BufferPool.free(this.requestBody); + this.requestBody = requestBody; + } + + /** + * Associates the request with a connection + * + * @param client + * the client's connection + */ + public void register(ConnectionState client) { + this.client = client; + client.pipeline.add(this); + } + + /** + * sets the status code to send back to the client + * + * @param statusCode + * HTTP status code + */ + public void setResponse(int statusCode) { + assert(this.responseSet == false) : "response already set"; + this.statusCode = statusCode; + this.responseBody = null; + this.responseHeaders = ReusableBuffer.wrap(HTTPUtils.getHeaders( + this.statusCode, 0, this.responseType)); + this.responseSet = true; + } + + /** + * sets the response with a body (contents) + * + * @param statusCode + * HTTP status code to deliver to client + * @param responseBody + * data to send to the client + * @param responseType + * content type + */ + public void setResponse(int statusCode, ReusableBuffer responseBody, + HTTPUtils.DATA_TYPE responseType) { + assert(this.responseSet == false) : "response already set"; + this.statusCode = statusCode; + this.responseType = responseType; + this.responseBody = responseBody; + this.responseHeaders = ReusableBuffer.wrap(HTTPUtils.getHeaders( + this.statusCode, + (responseBody != null) ? responseBody.capacity() : 0, + this.responseType)); + this.responseSet = true; + } + + /** + * sets the response with a string (contents) + * + * @param statusCode + * HTTP status code to deliver to client + * @param responseBody + * data to send to the client + * @param responseType + * content type + */ + public void setResponse(int statusCode, String responseText) { + assert(this.responseSet == false) : "response already set"; + this.statusCode = statusCode; + this.responseType = HTTPUtils.DATA_TYPE.JSON; + if (responseText != null) + this.responseBody = ReusableBuffer.wrap(responseText.getBytes(HTTPUtils.ENC_UTF8)); + this.responseHeaders = ReusableBuffer.wrap(HTTPUtils.getHeaders( + this.statusCode, + (responseBody != null) ? responseBody.capacity() : 0, + this.responseType)); + this.responseSet = true; + } + + /** + * sets the response with a body (contents) + * + * @param statusCode + * HTTP status code to deliver to client + * @param responseBody + * data to send to the client + * @param responseType + * content type + * @param additionalheaders + * HTTPHeaders containing additional headers + */ + public void setResponse(int statusCode, ReusableBuffer responseBody, + HTTPUtils.DATA_TYPE responseType, HTTPHeaders additionalHeaders) { + assert(this.responseSet == false) : "response already set"; + this.statusCode = statusCode; + this.responseType = responseType; + this.responseBody = responseBody; + this.responseHeaders = ReusableBuffer.wrap(HTTPUtils.getHeaders( + this.statusCode, + (responseBody != null) ? responseBody.capacity() : 0, + this.responseType, + additionalHeaders == null? null: additionalHeaders.toString())); + this.responseSet = true; + } + + /** + * Indicates if connection should be closed after delivering the response. + * + * @param closeAfterSend + * if true, connection is closed + */ + public void setClose(boolean closeAfterSend) { + this.closeConnection = closeAfterSend; + } + + /** It provides the byte array of the body + * @return The array of bytes contained in the body of the request or null if there wasn't body in the message + * + * @author Jesús Malo (jmalo) + */ + public byte[] getBody() { + byte[] body = null; + + if (requestBody != null) body = requestBody.array(); + + return body; + } + + /** check if the request has a response and can be sent + */ + public boolean isReady() { + return responseSet; + } + + public void active() { + this.client.active.set(true); + } + + public InetSocketAddress getClientAddress() { + SocketAddress addr = this.client.channel.socket().getRemoteSocketAddress(); + if(addr instanceof InetSocketAddress) + return (InetSocketAddress) addr; + else + return null; + } + + public String toString() { + String origin; + try { + origin = this.client.channel.socket().getRemoteSocketAddress().toString(); + } + catch (NullPointerException ex) { + origin = "UNKNOWN"; + } + + String resp = "null"; + if (this.responseBody != null) { + if (this.responseBody.capacity() < 256) { + resp = new String(this.responseBody.array()); + } else { + resp = "too large ("+this.responseBody.capacity()+"), stream position: "+this.responseBody.position(); + } + } + + String rq = "null"; + if (this.requestBody != null) { + if (this.requestBody.capacity() < 256) { + rq = new String(this.requestBody.array()); + } else { + rq = "too large ("+this.requestBody.capacity()+"), stream position: "+this.requestBody.position(); + } + } + + String respHdrs = null; + if(responseHeaders != null) + respHdrs = new String(responseHeaders.array()); + + return "PinkyRequest from "+ origin +"\n"+ + "\tURI "+this.requestURI+"\n"+ + "\tMethod "+this.requestMethod+"\n"+ + "\t-----------\n"+ + "\tRqHdrs "+this.requestHeaders+"\n"+ + "\tRqBody "+rq+"\n"+ + "\t-----------\n"+ + "\tRespHdrs "+respHdrs+"\n"+ + "\tRespBody "+resp; + + } + + void freeBuffer() { + BufferPool.free(this.requestBody); + BufferPool.free(this.responseBody); + BufferPool.free(this.responseHeaders); + + this.requestBody = null; + this.responseHeaders = null; + this.responseBody = null; + } + + public ChannelIO getChannelIO() { + return client.channel; + } + + public boolean requestAuthentication(String username, String password) { + if (client.httpDigestNounce == null) { + client.httpDigestNounce = Long.toHexString(System.currentTimeMillis()+(long)(Math.random()*10000.0))+ + Long.toHexString((long) (Math.random() * Long.MAX_VALUE)); + } + if (client.userName == null) { + String authHeader = this.requestHeaders.getHeader(HTTPHeaders.HDR_AUTHORIZATION); + if (authHeader == null) { + HTTPHeaders hdrs = new HTTPHeaders(); + hdrs.addHeader(HTTPHeaders.HDR_WWWAUTH, "Digest realm=\"xtreemfs\",opaque=\"ignoreme\",algorithm=\"MD5\",nonce=\""+client.httpDigestNounce+"\""); + this.setResponse(HTTPUtils.SC_UNAUTHORIZED, null, HTTPUtils.DATA_TYPE.HTML,hdrs); + return false; + } else { + System.out.println("hdr: "+authHeader); + + if (!authHeader.startsWith("Digest")) { + HTTPHeaders hdrs = new HTTPHeaders(); + hdrs.addHeader(HTTPHeaders.HDR_WWWAUTH, "Digest realm=\"xtreemfs\",opaque=\"ignoreme\",algorithm=\"MD5\",nonce=\""+client.httpDigestNounce+"\""); + this.setResponse(HTTPUtils.SC_UNAUTHORIZED, null, HTTPUtils.DATA_TYPE.HTML,hdrs); + return false; + } + + //check header... + Pattern p = Pattern.compile("username=\\\"(\\S+)\\\""); + Matcher m = p.matcher(authHeader); + m.find(); + final String cUsername = m.group(1); + + p = Pattern.compile("uri=\\\"(\\S+)\\\""); + m = p.matcher(authHeader); + m.find(); + final String cURI = m.group(1); + + + p = Pattern.compile("response=\\\"(\\S+)\\\""); + m = p.matcher(authHeader); + m.find(); + final String cResponse = m.group(1); + + p = Pattern.compile("nonce=\\\"(\\S+)\\\""); + m = p.matcher(authHeader); + m.find(); + final String cNonce = m.group(1); + + if (!cNonce.equals(client.httpDigestNounce)) { + HTTPHeaders hdrs = new HTTPHeaders(); + hdrs.addHeader(HTTPHeaders.HDR_WWWAUTH, "Digest realm=\"xtreemfs\",opaque=\"ignoreme\",algorithm=\"MD5\",stale=true,nonce=\""+client.httpDigestNounce+"\""); + this.setResponse(HTTPUtils.SC_UNAUTHORIZED, null, HTTPUtils.DATA_TYPE.HTML,hdrs); + return false; + } + + try { + MessageDigest md5 = MessageDigest.getInstance("MD5"); + md5.update((username+":xtreemfs:"+password).getBytes()); + byte[] digest = md5.digest(); + final String HA1 = OutputUtils.byteArrayToHexString(digest).toLowerCase(); + + md5.update((this.requestMethod+":"+cURI).getBytes()); + digest = md5.digest(); + final String HA2 = OutputUtils.byteArrayToHexString(digest).toLowerCase(); + + md5.update((HA1+":"+client.httpDigestNounce+":"+HA2).getBytes()); + digest = md5.digest(); + final String response = OutputUtils.byteArrayToHexString(digest).toLowerCase(); + + if (!response.equals(cResponse)) { + HTTPHeaders hdrs = new HTTPHeaders(); + hdrs.addHeader(HTTPHeaders.HDR_WWWAUTH, "Digest realm=\"xtreemfs\",opaque=\"ignoreme\",algorithm=\"MD5\",nonce=\""+client.httpDigestNounce+"\""); + this.setResponse(HTTPUtils.SC_UNAUTHORIZED, null, HTTPUtils.DATA_TYPE.HTML,hdrs); + return false; + } + + client.userName = username; + Logging.logMessage(Logging.LEVEL_DEBUG, this,"channel authenticated as user "+client.userName); + return true; + } catch (NoSuchAlgorithmException ex) { + this.setResponse(HTTPUtils.SC_SERVER_ERROR); + return false; + } + } + } else { + return true; + } + } + +} + diff --git a/servers/src/org/xtreemfs/foundation/pinky/PinkyRequestListener.java b/servers/src/org/xtreemfs/foundation/pinky/PinkyRequestListener.java new file mode 100644 index 0000000000000000000000000000000000000000..c7375b848b614de30aeab4b53f5713f6ebb4b646 --- /dev/null +++ b/servers/src/org/xtreemfs/foundation/pinky/PinkyRequestListener.java @@ -0,0 +1,43 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB), Christian Lorenz (ZIB) + */ + +package org.xtreemfs.foundation.pinky; + +/** + * Listener interface to register for client requests with Pinky. + * + * @author bjko + */ +public interface PinkyRequestListener { + + /** + * Called when a request is received. + * + * @attention This operation blocks the Pinky server thread! + * @param theRequest + * the request received from the client. + */ + public void receiveRequest(PinkyRequest theRequest); + +} diff --git a/servers/src/org/xtreemfs/foundation/pinky/PipelinedPinky.java b/servers/src/org/xtreemfs/foundation/pinky/PipelinedPinky.java new file mode 100644 index 0000000000000000000000000000000000000000..220d284507594ce134550f1ccc836f497dfc3fc7 --- /dev/null +++ b/servers/src/org/xtreemfs/foundation/pinky/PipelinedPinky.java @@ -0,0 +1,714 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB), Christian Lorenz (ZIB) + */ + +package org.xtreemfs.foundation.pinky; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.nio.ByteBuffer; +import java.nio.channels.CancelledKeyException; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.SelectionKey; +import java.nio.channels.Selector; +import java.nio.channels.ServerSocketChannel; +import java.nio.channels.SocketChannel; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Queue; +import java.util.Set; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.atomic.AtomicInteger; + +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.LifeCycleThread; +import org.xtreemfs.foundation.pinky.channels.ChannelIO; +import org.xtreemfs.foundation.pinky.channels.SSLChannelIO; + +/** + * Main (and single) thread for the Pinky async IO server. + * + * @author bjko + */ +public class PipelinedPinky extends LifeCycleThread implements PinkyInterface { + + /** + * the server socket + */ + private final ServerSocketChannel socket; + + /** + * Selector for server socket + */ + private final Selector selector; + + /** + * If set to true thei main loop will exit upon next invocation + */ + private boolean quit; + + /** + * List of all active connections. + */ + private final Queue connections; + + /** + * Number of active connections. Counting in a ConcurrentQ is too expensive. + */ + private final AtomicInteger numCon; + + /** + * Cleaner thread that removes lingering connections after timeout. + */ + private ConnectionRemover crThr; + + /** + * A listener that receives all requests. + */ + private PinkyRequestListener receiver; + + /** + * maximu size of a client pipeline + */ + public static int MAX_CLIENT_QUEUE = 20000; + + /** + * if the Q was full we need at least + * CLIENT_Q_THR spaces before we start + * reading from the client again. + * This is to prevent it from oscillating + */ + public static int CLIENT_Q_THR = 5000; + + /** + * options for ssl connection + */ + private SSLOptions sslOptions; + + public static final int CONNECTION_REM_INTERVAL = 1000 * 60; + + /** + * Creates a new instance of the Pinky server without a secure channel + * + * @param bindPort + * port to bind the server socket to + * @param bindAddr + * device address to bind the server socket to (null = any) + * @param rl + * the listener to use or null if it will be specified later + * @throws java.io.IOException + * passes IO Exception when it cannot setup the server socket + */ + public PipelinedPinky(int bindPort, InetAddress bindAddr, PinkyRequestListener rl) throws IOException { + this(bindPort, bindAddr, rl, null); + } + + /** + * Creates a new instance of the Pinky server + * + * @param bindPort + * port to bind the server socket to + * @param rl + * the listener to use or null if it will be specified later + * @param sslOptions + * options for ssl connection, null for no SSL + * @throws java.io.IOException + * passes IO Exception when it cannot setup the server socket + */ + public PipelinedPinky(int bindPort, InetAddress bindAddr, PinkyRequestListener rl, SSLOptions sslOptions) throws IOException { + + super("Pinky thr." + bindPort); + this.numCon = new AtomicInteger(0); + + connections = new ConcurrentLinkedQueue(); + + // open server socket + socket = ServerSocketChannel.open(); + socket.configureBlocking(false); + socket.socket().setReceiveBufferSize(256 * 1024); + socket.socket().setReuseAddress(true); + socket.socket().bind(bindAddr == null ? new InetSocketAddress(bindPort) : new InetSocketAddress(bindAddr, bindPort)); + + // create a selector and register socket + selector = Selector.open(); + socket.register(selector, SelectionKey.OP_ACCEPT); + + // server is ready to accept connections now + + receiver = rl; + + this.sslOptions = sslOptions; + } + + public void start() { + + // start helper threads + crThr = new ConnectionRemover(connections, this.numCon, selector, CONNECTION_REM_INTERVAL); + crThr.start(); + + super.start(); + } + + /** + * DOES NOT REALLY WORK AT THE MOMENT! DO NOT USE! + * + * @param conn + */ + public void releaseConnection(ConnectionState conn) { + connections.remove(conn); + SelectionKey conKey = conn.channel.keyFor(selector); + conKey.cancel(); + selector.wakeup(); + } + + public int getNumConnections() { + return this.numCon.get(); + } + + public int getTotalQLength() { + int total = 0; + for (ConnectionState cs : connections) { + total += cs.pipeline.size(); + } + return total; + } + + /** + * DOES NOT REALLY WORK AT THE MOMENT! DO NOT USE! + */ + public void returnConnection(ConnectionState conn) throws IOException { + try { + conn.active.set(true); + if (!connections.contains(conn)) { + conn.channel.configureBlocking(false); + conn.channel.register(selector, SelectionKey.OP_READ, conn); + } + } catch (ClosedChannelException ex) { + throw new IOException("Cannot return connection because channel is closed!"); + } catch (IOException ex) { + throw ex; + } + connections.add(conn); + selector.wakeup(); + } + + /** + * Called to send a response. + * + * @attention rq must have a connection attached! + * @param rq + * the request to be sent + * @throws java.io.IOException + * passes all exceptions from the used IO primitives + */ + public void sendResponse(PinkyRequest rq) { + assert (rq != null) : "Request must not be null"; + assert (rq.client != null) : "Request is not associated with a client connection!"; + assert (rq.client.channel != null) : "Client connection has no channel!"; + assert (rq.responseSet) : "no response set for request, cannot send!"; + + SelectionKey key = rq.client.channel.keyFor(this.selector); + + if (key == null) { + //throw new RuntimeException("SelectionKey for client is null?!?!"); + //the client disconnected while process + rq.ready = true; + + Logging.logMessage(Logging.LEVEL_DEBUG, this, "sendResponse for disconnected client"); + + return; + } + try { + + synchronized (this) { + rq.ready = true; + key.interestOps(key.interestOps() | SelectionKey.OP_WRITE); + } + // make sure we can send it away! + selector.wakeup(); + } catch (CancelledKeyException e) { + Logging.logMessage(Logging.LEVEL_DEBUG, this, "sendResponse for disconnected client"); + return; + } + } + + /** + * registers a listener for client requests. Overwrites any prevoiusly + * installed listener. + * + * @param rl + * the listener or null to unregister + */ + public void registerListener(PinkyRequestListener rl) { + this.receiver = rl; + } + + /** + * Shuts the server down gracefully. All connections are closed. + */ + public void shutdown() { + try { + this.quit = true; + crThr.quitThread(); + crThr.join(); + selector.wakeup(); + } catch (InterruptedException exc) { + Logging.logMessage(Logging.LEVEL_ERROR, this, exc); + } + } + + public void restartReading() { + for (ConnectionState cs : connections) { + try { + SelectionKey key = cs.channel.keyFor(selector); + key.interestOps(key.interestOps() | SelectionKey.OP_READ); + } catch (CancelledKeyException ex) { + // don't care + Logging.logMessage(Logging.LEVEL_WARN, this, ex); + } + } + selector.wakeup(); + + } + + /** + * Pinky's main loop + */ + public void run() { + + try { + + // to ease debugging + Logging.logMessage(Logging.LEVEL_INFO, this, ((sslOptions != null) ? "SSL enabled " : "") + " pinky operational"); + notifyStarted(); + + // repeat until someone shuts the thread down + while (!quit) { + // try to select events... + try { + if (selector.select() == 0) { + continue; + } + } catch (CancelledKeyException ex) { + //who cares + } catch (IOException ex) { + Logging.logMessage(Logging.LEVEL_WARN, this, "Exception while selecting: " + ex); + continue; + } + + // fetch events + Set keys = selector.selectedKeys(); + Iterator iter = keys.iterator(); + + // process all events + while (iter.hasNext()) { + + SelectionKey key = iter.next(); + + // remove key from the list + iter.remove(); + try { + // ACCEPT A CONNECTION + if (key.isAcceptable()) { + + SocketChannel client = null; + ConnectionState con = null; + ChannelIO channelIO = null; + // FIXME: Better exception handling! + try { + + // accept that connection + client = socket.accept(); + + if (sslOptions == null) { + channelIO = new ChannelIO(client); + } else { + channelIO = new SSLChannelIO(client, sslOptions, false); + } + con = new ConnectionState(channelIO); + + // and configure it to be non blocking + // IMPORTANT! + client.configureBlocking(false); + client.register(selector, SelectionKey.OP_READ, con); + client.socket().setTcpNoDelay(true); + + numCon.incrementAndGet(); + + // this is used to hold the state and buffer for each + // connection + connections.add(con); + + Logging.logMessage(Logging.LEVEL_DEBUG, this, "connect from client at " + client.socket().getRemoteSocketAddress()); + + } catch (ClosedChannelException ex) { + Logging.logMessage(Logging.LEVEL_DEBUG, this, "cannot establish connection: " + ex); + if (channelIO != null) { + channelIO.close(); + } + continue; + } catch (IOException ex) { + Logging.logMessage(Logging.LEVEL_DEBUG, this, "cannot establish connection: " + ex); + if (channelIO != null) { + channelIO.close(); + } + continue; + } + } + + try { + // INPUT READY + if (key.isReadable()) { + ConnectionState con = (ConnectionState) key.attachment(); + + // make sure there is an attachment + if (con != null) { + if (!con.channel.isShutdownInProgress()) { + if (con.channel.doHandshake(key)) { + if (con.pipeline.size() > MAX_CLIENT_QUEUE) { + // client Q is full + if (!con.channel.socket().isOutputShutdown()) { + Logging.logMessage(Logging.LEVEL_ERROR, this, "Q full, stop reading " + con.channel.socket().getRemoteSocketAddress()); + + //we stop reading and thus delay the client + key.interestOps(key.interestOps() & ~SelectionKey.OP_READ); + //continue, because it is probably writable and we + //want to write it out... + } + } else { + // Q has space + int numread; // num bytes read from Socket + + try { + numread = con.channel.read(con.data.getBuffer()); + } catch (IOException ex) { + // read returns -1 when connection was closed + numread = -1; + } + + + if (numread == -1) { + // connection was closed... + Logging.logMessage(Logging.LEVEL_DEBUG, this, "client deconnected " + con.channel.socket().getRemoteSocketAddress()); + + // cancel the key, i.e. deregister from Selector + key.cancel(); + connections.remove(con); + + numCon.decrementAndGet(); + + try { + con.channel.close(); + } catch (IOException ex) { + Logging.logMessage(Logging.LEVEL_DEBUG, this, "exception while closing channel: " + ex); + // no one cares! + } + con.freeBuffers(); + // continue because key is cancelled! + continue; + } + + // so there is new data available + if (numread > 0) { + + // important to find lingering connections + con.active.set(true); + // channel.read(con.data); + + // prepare buffer for reading + con.data.flip(); + + // the parser may return multiple requests + // because this is async io + List requests = new LinkedList(); + + while (con.data.hasRemaining()) { + // as long as there is data we call the + // parser + PinkyRequest rq = con.processBuffer(); + + // null means there is no request + // complete + if (rq != null) { + //rq._receiveTime = System.nanoTime(); + requests.add(rq); + } + + } + // make buffer ready for reading again + con.data.compact(); + + if (receiver != null) { + // hand over requests for processing + for (PinkyRequest rq : requests) { + if (rq.statusCode == HTTPUtils.SC_OKAY) { + receiver.receiveRequest(rq); + } else { + this.sendResponse(rq); + } + } + } + + } + } + + } + } + } + } + + // CAN WRITE OUPUT + if (key.isWritable()) { + ConnectionState con = (ConnectionState) key.attachment(); + + con.active.set(true); + if (!con.channel.isShutdownInProgress()) { + if (con.channel.doHandshake(key)) { + boolean rqDone; + int numSent = 0; + do { + rqDone = false; + + if (con.toSend == null) { + synchronized (this) { + if (con.pipeline.size() > 0) { + if (con.pipeline.get(0).ready) { + con.toSend = con.pipeline.get(0); + con.toSend.responseHeaders.position(0); + con.remainingBytes = ((con.toSend.responseBody != null) ? con.toSend.responseBody.capacity() : 0) + + con.toSend.responseHeaders.capacity(); + if (con.toSend.responseBody == null) { + con.sendData = new ByteBuffer[]{con.toSend.responseHeaders.getBuffer()}; + } else { + con.toSend.responseBody.position(0); + con.sendData = new ByteBuffer[]{con.toSend.responseHeaders.getBuffer(), + con.toSend.responseBody.getBuffer() + }; + } + } else { + key.interestOps(key.interestOps() & ~SelectionKey.OP_WRITE); + break; + } + } else { + key.interestOps(key.interestOps() & ~SelectionKey.OP_WRITE); + break; + } + } + } + + //System.out.println("[ I | PipelinedPinky ] sent in loop : "+(numSent++)); + + + con.remainingBytes -= con.channel.write(con.sendData); + assert con.remainingBytes >= 0; + if (con.remainingBytes == 0 && con.channel.isFlushed()) { + //System.out.println("NEXT REQUEST!"); + rqDone = true; + } else { + //System.out.println("WAIT FOR NEXT BUFFER!"); + break; + } + + /*if (con.toSend.responseHeaders != null) { + con.channel.write(con.toSend.responseHeaders.getBuffer()); + if (!con.toSend.responseHeaders.hasRemaining()) { + BufferPool.free(con.toSend.responseHeaders); + con.toSend.responseHeaders = null; + // if there is no body we can skip the + // next round! + if (con.toSend.responseBody != null) { + con.toSend.responseBody.position(0); + } + } else { + // skip loop because buffer is full + break; + } + } + if (con.toSend.responseHeaders == null) { + // headers sent, send body + if (con.toSend.responseBody == null) { + rqDone = true; + } else { + if (con.toSend.responseBody.hasRemaining()) { + con.channel.write(con.toSend.responseBody.getBuffer()); + } + if (!con.toSend.responseBody.hasRemaining()) { + rqDone = true; + } else { + // skip loop because buffer is full + break; + } + } + }*/ + + if (rqDone) { + // close or fetch next request... + if (con.toSend.closeConnection) { + if (con.channel.shutdown(key)) { + closeConnection(key, con); + Logging.logMessage(Logging.LEVEL_DEBUG, this, "connection to " + con.channel.socket().getRemoteSocketAddress() + " closed"); + break; + } + } else { + //this is the requeue for "streaming" + if (con.toSend.streaming) { + PinkyRequest rq = con.toSend; + BufferPool.free(rq.responseBody); + BufferPool.free(rq.responseHeaders); + con.sendData = null; + con.toSend = null; + //notify the request listener (requeue) + receiver.receiveRequest(rq); + // do not continue with the rest... + break; + } + + /*if (con.toSend._receiveTime > 0) { + long dur = System.nanoTime()-con.toSend._receiveTime; + System.out.print("[ "+con.toSend.requestMethod+","+con.toSend.requestURI+" "+((double)dur)/1e6+"ms] "); + }*/ + + con.toSend.freeBuffer(); + con.toSend = null; + // we can do this safely because + // add/remove occur only in this thread + con.pipeline.remove(0); + + //if we're not interested in READ the client Q + //was full and we have to check, if there + //is space now. We have to do it here because + //break further down could destroy everything + if ((key.interestOps() & SelectionKey.OP_READ) == 0) { + if (con.pipeline.size() < (MAX_CLIENT_QUEUE - CLIENT_Q_THR)) { + //read from client again + key.interestOps(key.interestOps() | SelectionKey.OP_READ); + Logging.logMessage(Logging.LEVEL_ERROR, this, "Q for " + con.channel.socket().getRemoteSocketAddress() + " is ready again"); + } + } + + if (con.pipeline.size() > 0) { + if (con.pipeline.get(0).ready) { + // fetch next Q item + //con.toSend = con.pipeline.get(0); + //System.out.println("next request fetched!"); + continue; + } else { + break; + } + } else { + // Q empty, no more writables! + + key.interestOps(key.interestOps() & ~SelectionKey.OP_WRITE); + break; + } + + } + } + } while (rqDone); + } + } else { + if (con.channel.shutdown(key)) { + closeConnection(key, con); + } + } + continue; + } + } catch (IOException ex) { + try { + ConnectionState con = (ConnectionState) key.attachment(); + Logging.logMessage(Logging.LEVEL_INFO, this, "connection to " + con.channel.socket().getRemoteSocketAddress() + " broke:" + ex); + ex.printStackTrace(); + //throw away everything and close connection! + closeConnection(key, con); + + } catch (IOException ex2) { + Logging.logMessage(Logging.LEVEL_ERROR, this, "cannot close connection due to " + ex2); + //cannot do anything here -> ignore! + } + } + } catch (CancelledKeyException ex) { + Logging.logMessage(Logging.LEVEL_WARN, this,"key has been canceled: "+ex); + } + + } + } + + Logging.logMessage(Logging.LEVEL_DEBUG, this, "initiating gracefull shutdown..."); + // gracefully shutdown... + try { + for (ConnectionState cs : connections) { + try { + cs.freeBuffers(); + // TODO: non-blocking shutdown would be better + while (!(cs.channel.shutdown(cs.channel.keyFor(selector)))) { + } + } catch (IOException ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, "Exception when shutdown connection: " + cs.channel.socket().getRemoteSocketAddress() + " " + ex.toString()); + } finally { + try { + closeConnection(cs.channel.keyFor(selector), cs); + } catch (IOException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + } + } + selector.close(); + socket.close(); + } catch (IOException ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + return; + } + Logging.logMessage(Logging.LEVEL_INFO, this, "shutdown complete"); + notifyStopped(); + } catch (OutOfMemoryError ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + Logging.logMessage(Logging.LEVEL_ERROR, this, BufferPool.getStatus()); + notifyCrashed(new Exception(ex)); + } catch (Throwable th) { + Logging.logMessage(Logging.LEVEL_ERROR, this, th); + notifyCrashed(th instanceof Exception ? (Exception) th + : new Exception(th)); + } + } + + void closeConnection(SelectionKey key, ConnectionState con) throws IOException { + numCon.decrementAndGet(); + con.channel.close(); + con.active.set(false); + con.requestHeaders = null; + key.cancel(); + con.toSend = null; + con.freeBuffers(); + connections.remove(con); + selector.wakeup(); + Logging.logMessage(Logging.LEVEL_DEBUG, this, "connection to " + con.channel.socket().getRemoteSocketAddress() + " closed"); + } + + public SSLOptions getSSLOptions() { + return this.sslOptions; + } +} diff --git a/servers/src/org/xtreemfs/foundation/pinky/SSLOptions.java b/servers/src/org/xtreemfs/foundation/pinky/SSLOptions.java new file mode 100644 index 0000000000000000000000000000000000000000..3ea76adf4beace60dd25224d3ce200e8ca214591 --- /dev/null +++ b/servers/src/org/xtreemfs/foundation/pinky/SSLOptions.java @@ -0,0 +1,241 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB), Christian Lorenz (ZIB) + */ + +package org.xtreemfs.foundation.pinky; + +import java.io.FileInputStream; +import java.io.IOException; +import java.security.KeyManagementException; +import java.security.KeyStore; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.UnrecoverableKeyException; +import java.security.cert.CertificateException; + +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLContext; +import javax.net.ssl.TrustManagerFactory; + +/** + * Encapsulates the SSLOptions for the connections of pinky and speedy + * + * @author clorenz + */ +public class SSLOptions { + /** + * a Java JKS Keystore + */ + public final static String JKS_CONTAINER = "JKS"; + /** + * a PKCS12 Keystore + */ + public final static String PKCS12_CONTAINER = "PKCS12"; + + /** + * file with the private key and the public cert for the server + */ + private final String serverCredentialFile; + /** + * file with trusted public certs + */ + private final String trustedCertificatesFile; + + /** + * passphrase of the server credential file + */ + private final char[] serverCredentialFilePassphrase; + /** + * passphrase of the trusted certificates file + */ + private final char[] trustedCertificatesFilePassphrase; + + /** + * using symmetric encryption or only authenticating via certs + */ + private boolean authenticationWithoutEncryption; + + /** + * file format of the server credential file + */ + private final String serverCredentialFileContainer; + /** + * file format of the trusted certificates file + */ + private final String trustedCertificatesFileContainer; + + /** + * knows the used certs and more + */ + private final SSLContext sslContext; + + /** + * creates a new SSLOptions object, which uses PKCS12 Container and symmetric encryption + * @param serverCredentialFile file with the private key and the public cert for the server + * @param serverCredentialFilePassphrase passphrase of the server credential file + * @param trustedCertificatesFile file with trusted public certs + * @param trustedCertificatesFilePassphrase passphrase of the trusted certificates file + * @throws IOException + */ + public SSLOptions(String serverCredentialFile, + String serverCredentialFilePassphrase, + String trustedCertificatesFile, + String trustedCertificatesFilePassphrase) throws IOException { + this(serverCredentialFile, serverCredentialFilePassphrase, PKCS12_CONTAINER, trustedCertificatesFile, trustedCertificatesFilePassphrase, PKCS12_CONTAINER, false); + } + + /** + * creates a new SSLOptions object, which uses PKCS12 Container + * @param serverCredentialFile file with the private key and the public cert for the server + * @param serverCredentialFilePassphrase passphrase of the server credential file + * @param trustedCertificatesFile file with trusted public certs + * @param trustedCertificatesFilePassphrase passphrase of the trusted certificates file + * @param authenticationWithoutEncryption using symmetric encryption or only authenticating via certs + * @throws IOException + */ + public SSLOptions(String serverCredentialFile, + String serverCredentialFilePassphrase, + String trustedCertificatesFile, + String trustedCertificatesFilePassphrase, + boolean authenticationWithoutEncryption) throws IOException { + this(serverCredentialFile, serverCredentialFilePassphrase, PKCS12_CONTAINER, trustedCertificatesFile, trustedCertificatesFilePassphrase, PKCS12_CONTAINER, authenticationWithoutEncryption); + } + + /** + * creates a new SSLOptions object + * @param serverCredentialFile file with the private key and the public cert for the server + * @param serverCredentialFilePassphrase passphrase of the server credential file + * @param serverCredentialFileContainer file format of the server credential file + * @param trustedCertificatesFile file with trusted public certs + * @param trustedCertificatesFilePassphrase passphrase of the trusted certificates file + * @param trustedCertificatesFileContainer file format of the trusted certificates file + * @param authenticationWithoutEncryption using symmetric encryption or only authenticating via certs + * @throws IOException + */ + public SSLOptions(String serverCredentialFile, + String serverCredentialFilePassphrase, + String serverCredentialFileContainer, + String trustedCertificatesFile, + String trustedCertificatesFilePassphrase, + String trustedCertificatesFileContainer, + boolean authenticationWithoutEncryption) throws IOException { + this.serverCredentialFile = serverCredentialFile; + this.trustedCertificatesFile = trustedCertificatesFile; + + if (serverCredentialFilePassphrase != null) + this.serverCredentialFilePassphrase = serverCredentialFilePassphrase.toCharArray(); + else + this.serverCredentialFilePassphrase = null; + + if (trustedCertificatesFilePassphrase != null) + this.trustedCertificatesFilePassphrase = trustedCertificatesFilePassphrase.toCharArray(); + else + this.trustedCertificatesFilePassphrase = null; + + this.serverCredentialFileContainer = serverCredentialFileContainer; + this.trustedCertificatesFileContainer = trustedCertificatesFileContainer; + + this.authenticationWithoutEncryption = authenticationWithoutEncryption; + + sslContext = createSSLContext(); + } + + /** Create/initialize the SSLContext with key material + * @return the created and initialized SSLContext + * @throws IOException + */ + private SSLContext createSSLContext() throws IOException { + SSLContext sslContext = null; + try { + // First initialize the key and trust material. + KeyStore ksKeys = KeyStore.getInstance(serverCredentialFileContainer); + ksKeys.load(new FileInputStream(serverCredentialFile), serverCredentialFilePassphrase); + KeyStore ksTrust = KeyStore.getInstance(trustedCertificatesFileContainer); + ksTrust.load(new FileInputStream(trustedCertificatesFile), trustedCertificatesFilePassphrase); + + // KeyManager's decide which key material to use. + KeyManagerFactory kmf = KeyManagerFactory.getInstance("SunX509"); + kmf.init(ksKeys, serverCredentialFilePassphrase); + + // TrustManager's decide whether to allow connections. + TrustManagerFactory tmf = TrustManagerFactory.getInstance("SunX509"); + tmf.init(ksTrust); + + sslContext = SSLContext.getInstance("TLS"); + sslContext.init(kmf.getKeyManagers(), tmf.getTrustManagers(), null); + } catch (UnrecoverableKeyException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } catch (KeyManagementException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } catch (KeyStoreException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } catch (NoSuchAlgorithmException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } catch (CertificateException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + return sslContext; + } + + public boolean isAuthenticationWithoutEncryption() { + return this.authenticationWithoutEncryption; + } + + public void setAuthenticationWithoutEncryption( + boolean authenticationWithoutEncryption) { + this.authenticationWithoutEncryption = authenticationWithoutEncryption; + } + + public String getServerCredentialFile() { + return this.serverCredentialFile; + } + + public String getServerCredentialFileContainer() { + return this.serverCredentialFileContainer; + } + + public String getServerCredentialFilePassphrase() { + return this.serverCredentialFilePassphrase.toString(); + } + + public String getTrustedCertificatesFile() { + return this.trustedCertificatesFile; + } + + public String getTrustedCertificatesFileContainer() { + return this.trustedCertificatesFileContainer; + } + + public String getTrustedCertificatesFilePassphrase() { + return this.trustedCertificatesFilePassphrase.toString(); + } + + public SSLContext getSSLContext() { + return this.sslContext; + } +} diff --git a/servers/src/org/xtreemfs/foundation/pinky/channels/ChannelIO.java b/servers/src/org/xtreemfs/foundation/pinky/channels/ChannelIO.java new file mode 100644 index 0000000000000000000000000000000000000000..e306a358c9839ffb6689a859c2c7858501604e8b --- /dev/null +++ b/servers/src/org/xtreemfs/foundation/pinky/channels/ChannelIO.java @@ -0,0 +1,164 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. +This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based +Grid Operating System, see for more details. +The XtreemOS project has been developed with the financial support of the +European Commission's IST program under contract #FP6-033576. +XtreemFS is free software: you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation, either version 2 of the License, or (at your option) +any later version. +XtreemFS is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. +You should have received a copy of the GNU General Public License +along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Christian Lorenz (ZIB) + */ +package org.xtreemfs.foundation.pinky.channels; + +import java.io.IOException; +import java.net.Socket; +import java.net.SocketAddress; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.SelectableChannel; +import java.nio.channels.SelectionKey; +import java.nio.channels.Selector; +import java.nio.channels.SocketChannel; +import java.security.cert.Certificate; + +/** + * A abstraction of the SocketChannel + * + * @author clorenz + */ +public class ChannelIO { + + protected final SocketChannel channel; + + protected Certificate[] certs; + + protected Object attachment; + + public ChannelIO(SocketChannel channel) { + super(); + this.channel = channel; + this.certs = null; + attachment = null; + } + + public SelectableChannel configureBlocking(boolean block) + throws IOException { + return channel.configureBlocking(block); + } + + public boolean connect(SocketAddress remote) throws IOException { + return this.channel.connect(remote); + } + + public void close() throws IOException { + channel.socket().close(); + channel.close(); + } + + public boolean isBlocking() { + return channel.isBlocking(); + } + + public boolean isOpen() { + return channel.isOpen(); + } + + public SelectionKey keyFor(Selector sel) { + return channel.keyFor(sel); + } + + public int read(ByteBuffer dst) throws IOException { + return channel.read(dst); + } + + public SelectionKey register(Selector sel, int ops, Object att) + throws ClosedChannelException { + return channel.register(sel, ops, att); + } + + public Socket socket() { + return channel.socket(); + } + + public String toString() { + return channel.toString(); + } + + public int validOps() { + return channel.validOps(); + } + + public int write(ByteBuffer src) throws IOException { + return channel.write(src); + } + + public long write(ByteBuffer[] src) throws IOException { + return channel.write(src); + } + + public boolean finishConnect() throws IOException { + return this.channel.finishConnect(); + } + + public boolean isConnectionPending() { + return this.channel.isConnectionPending(); + } + + /** + * does the handshake if needed + * @param key + * @return true, if handshake is completed + * @throws IOException + */ + public boolean doHandshake(SelectionKey key) throws IOException { + return true; + } + + /** + * prepares the channel for closing + * this can take more than 1 call + * @param key + * @return true, if channel is ready for closing + * @throws IOException + */ + public boolean shutdown(SelectionKey key) throws IOException { + return true; + } + + /** + * is channel in closing-procedure? + * @return + */ + public boolean isShutdownInProgress() { + return false; + } + + /** + * is there remaining data in channel-buffers, which must be flushed? + * @return + */ + public boolean isFlushed() { + return true; + } + + public Certificate[] getCerts() { + return certs; + } + + public Object getAttachment() { + return attachment; + } + + public void setAttachment(Object attachment) { + this.attachment = attachment; + } +} diff --git a/servers/src/org/xtreemfs/foundation/pinky/channels/SSLChannelIO.java b/servers/src/org/xtreemfs/foundation/pinky/channels/SSLChannelIO.java new file mode 100644 index 0000000000000000000000000000000000000000..504ddbf3fe55e5783c6d3b8ecb84c3e2eb038c96 --- /dev/null +++ b/servers/src/org/xtreemfs/foundation/pinky/channels/SSLChannelIO.java @@ -0,0 +1,656 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Christian Lorenz (ZIB) + */ + +package org.xtreemfs.foundation.pinky.channels; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.SelectionKey; +import java.nio.channels.SocketChannel; +import java.util.ArrayList; + +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLEngineResult; +import javax.net.ssl.SSLException; +import javax.net.ssl.SSLEngineResult.HandshakeStatus; + +import javax.net.ssl.SSLPeerUnverifiedException; +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.pinky.SSLOptions; + +/** + * A secure abstraction of the SocketChannel (by using SSL) + * + * @author clorenz + */ +public class SSLChannelIO extends ChannelIO { + /** + * Number of Threads in the ThreadPool which will be used for the time-consuming tasks + */ +// public static int EXECUTOR_THREADS = 4; + + /** + * used SSLEngine for this channel + */ + protected final SSLEngine sslEngine; + + /** + * contains the read data encrypted by ssl + */ + protected ReusableBuffer inNetBuffer; + + /** + * contains the written data encrypted by ssl + */ + protected ReusableBuffer outNetBuffer; + + /** + * an empty buffer for e.g. handshaking and shutdown; it will never contain data + */ + protected ReusableBuffer dummyBuffer; + + /** + * the last SSLEngine-status + */ + protected HandshakeStatus handshakeStatus; + protected boolean handshakeComplete; + protected int keyOpsBeforeHandshake=-1; + + /** + * true, if shutdown was called at least one time + */ + protected boolean shutdownInProgress; + + /** + * cipher suites without symmetric encryption, wich are supported by the SSLEngine in Java6 + */ + protected static String[] supportedCipherSuitesWithoutEncryption = null; + + /** + * SSL-Channel is used by client + */ + protected boolean clientMode; + + /** + * for asynchronious execution of time-consuming tasks + * only one executor for ALL SSLChannelIOs + */ +// private static ExecutorService executor = null; + + private boolean closed = false; + private boolean shutdownComplete = false; + + /** + * creates a SSLChannelIO + * @param channel channel, which should be protected by SSL + * @param sslOptions the Options for the SSL-Connection + * @param clientMode true, if you are a client; false, if you are a server + * @throws SSLException + */ + public SSLChannelIO(SocketChannel channel, SSLOptions sslOptions, boolean clientMode) throws SSLException { + super(channel); + // initialize SSLEngine for a server + sslEngine = sslOptions.getSSLContext().createSSLEngine(); + sslEngine.setUseClientMode(clientMode); + sslEngine.setNeedClientAuth(true); + + if(clientMode){ + // the first call for a client is wrap() + sslEngine.beginHandshake(); + handshakeStatus = HandshakeStatus.NEED_WRAP; + }else{ + // the first call for a server is unwrap() + sslEngine.beginHandshake(); + handshakeStatus = HandshakeStatus.NEED_UNWRAP; + } + + handshakeComplete = false; + shutdownInProgress = false; + + int netBufSize = sslEngine.getSession().getPacketBufferSize(); + inNetBuffer = BufferPool.allocate(netBufSize); + outNetBuffer = BufferPool.allocate(netBufSize); + dummyBuffer = BufferPool.allocate(netBufSize); + + sslEngine.setEnabledProtocols(sslEngine.getSupportedProtocols()); + if(sslOptions.isAuthenticationWithoutEncryption()){ // only authentication without protecting data? + // enable only cipher suites without encryption + + if(supportedCipherSuitesWithoutEncryption == null){ // runs only first time a SSLChannelIO without Encryption is created + // find all supported cipher suites without symmetric encryption + ArrayList cipherSuites = new ArrayList(); + for(String cipherSuite : sslEngine.getSupportedCipherSuites()){ + if(cipherSuite.contains("WITH_NULL")) + cipherSuites.add(cipherSuite); + } + supportedCipherSuitesWithoutEncryption = new String[cipherSuites.size()]; + supportedCipherSuitesWithoutEncryption = cipherSuites.toArray(supportedCipherSuitesWithoutEncryption); + } + sslEngine.setEnabledCipherSuites(supportedCipherSuitesWithoutEncryption); + }else + // enable all supported cipher suites + sslEngine.setEnabledCipherSuites(sslEngine.getSupportedCipherSuites()); + + // only initialize the first time an SSLChannelIO is created +/* if(executor==null) + executor = Executors.newFixedThreadPool(EXECUTOR_THREADS);*/ + } + + /** + * {@inheritDoc} + */ + @Override + public int read(ByteBuffer dst) throws IOException { + int returnValue = 0; + if (!shutdownInProgress) { + if (handshakeComplete) { + if (channel.read(inNetBuffer.getBuffer()) == -1) { + throw new IOException("End of stream has reached."); + } + inNetBuffer.flip(); // ready for being read + while(inNetBuffer.hasRemaining()){ + SSLEngineResult result = sslEngine.unwrap(inNetBuffer + .getBuffer(), dst); + + switch (result.getStatus()) { + case OK: { + returnValue += result.bytesProduced(); + // FIXME: if client does't close the connection after receiving close_notify => decomment it + if(sslEngine.isInboundDone()) // received close_notify + close(); + break; + } + case BUFFER_UNDERFLOW: { + // needed more data in inNetBuffer, maybe nexttime + inNetBuffer.compact(); + return returnValue; + } + case BUFFER_OVERFLOW: { + // needed more space in dst + throw new IOException("BufferOverflow in the SSLEngine: Destination-Buffer is too small."); + } + case CLOSED: { + throw new IOException("The SSLEngine is already closed."); + } + default: { + throw new IOException("The SSLEngine is in a curiuos state."); + } + } + } + inNetBuffer.compact(); // ready for reading from channel + } + } + return returnValue; + } + + /** + * {@inheritDoc} + * warning: maybe more bytes would be consumed from src-buffer than will be written to channel (returned value) + */ + @Override + public int write(ByteBuffer src) throws IOException { + int returnValue = 0; + if (!shutdownInProgress) { + if (handshakeComplete) { + SSLEngineResult result = sslEngine.wrap(src, outNetBuffer + .getBuffer()); + outNetBuffer.flip(); // ready for writing to channel + + switch (result.getStatus()) { + case OK: { + tryFlush(); + + break; + } + case BUFFER_OVERFLOW: { + // needed more space in outNetBuffer + // two reasons for overflow: + // 1. buffer is too small + // 2. buffer is nearly full + tryFlush(); +/* throw new IOException( + "BufferOverflow in SSLEngine. Buffer for SSLEngine-generated data is too small.");*/ + } + case CLOSED: { + throw new IOException("The SSLEngine is already closed."); + } + default: { + throw new IOException( + "The SSLEngine is in a curiuos state."); + } + } + returnValue = result.bytesConsumed(); + } + } + return returnValue; + } + + /** + * {@inheritDoc} + */ + @Override + public long write(ByteBuffer[] src) throws IOException { + // TODO: completely rewrite this function, because it is only a simple dummy + long tmp = 0; + for(int i=0; i simple close + shutdownInProgress = true; + return true; + } + + if(shutdownComplete){ + return shutdownComplete; + } + + if (!shutdownInProgress) { // initiate shutdown + Logging.logMessage(Logging.LEVEL_DEBUG, this, "shutdown SSL connection of "+channel.socket().getInetAddress()+":"+channel.socket().getPort()); + sslEngine.closeOutbound(); + shutdownInProgress = true; + key.interestOps(key.interestOps() & ~SelectionKey.OP_READ); // don't wait for the close_notify-reply + } + + outNetBuffer.flip(); // ready for writing to channel + if(tryFlush() && sslEngine.isOutboundDone()){ // shutdown complete + key.interestOps(key.interestOps() & ~SelectionKey.OP_WRITE); + shutdownComplete = true; + } + + if (!sslEngine.isOutboundDone()) { + // Get close message + SSLEngineResult result = sslEngine.wrap(dummyBuffer.getBuffer(), + outNetBuffer.getBuffer()); + outNetBuffer.flip(); // ready for writing to channel + switch (result.getStatus()) { + case OK: { + throw new IOException("This should not happen."); + } + case BUFFER_OVERFLOW: { + // needed more space in outNetBuffer + // two reasons for overflow: + // 1. buffer is too small + // 2. buffer is nearly full + tryFlush(); + key.interestOps(key.interestOps() | SelectionKey.OP_WRITE); +/* throw new IOException( + "BufferOverflow in SSLEngine. Buffer for SSLEngine-generated data is too small.");*/ + break; + } + case CLOSED: { + if (tryFlush() && sslEngine.isOutboundDone()) { + key.interestOps(key.interestOps() & ~SelectionKey.OP_WRITE); + shutdownComplete = true; + } else { + key.interestOps(key.interestOps() | SelectionKey.OP_WRITE); + } + break; + } + default: { + throw new IOException("The SSLEngine is in a curiuos state."); + } + } + } + return shutdownComplete; + } + + /** + * {@inheritDoc} + */ + @Override + public void close() throws IOException { + try { + super.close(); + try { + sslEngine.closeInbound(); + sslEngine.closeOutbound(); + } catch (SSLException e) { + // ignore it + } + // free buffers + BufferPool.free(inNetBuffer); + inNetBuffer = null; + BufferPool.free(outNetBuffer); + BufferPool.free(dummyBuffer); + shutdownInProgress = true; + closed = true; + } catch (Throwable th) { + System.out.println("CANNOT CLOSE DUE TO: " + th); + throw new IOException(th); + } + } + + /** + * {@inheritDoc} + */ + @Override + protected void finalize() { + if (inNetBuffer != null) { + Logging.logMessage(Logging.LEVEL_ERROR, this, "buffers not freed!"); + BufferPool.free(inNetBuffer); + inNetBuffer = null; + BufferPool.free(outNetBuffer); + BufferPool.free(dummyBuffer); + } + if (!closed) { + System.out.println("CONNECTION WAS NOT CLOSED PROPERLY: " + this); + } + } + + /** + * Writes the outNetBuffer-data to the channel. After write, the buffer is + * empty or ready for add new data + * + * @return true, if write was successful; false, if buffer is not empty + * @throws IOException + */ + protected boolean tryFlush() throws IOException { +// if (outNetBuffer.hasRemaining()) { // flush the buffer + channel.write(outNetBuffer.getBuffer()); + if (outNetBuffer.hasRemaining()) { + outNetBuffer.compact(); // ready for add new data + return false; + } else + outNetBuffer.compact(); +// } + return true; + } + + /** + * {@inheritDoc} + * warning: the function manipulates the SelectionKey Ops, so don't do anything in your programm beetween first call of this function + * until the function returns true + */ + @Override + public boolean doHandshake(SelectionKey key) throws IOException { + if (handshakeComplete || shutdownInProgress) { // quick return + return handshakeComplete; + } + + if(keyOpsBeforeHandshake==-1){ + keyOpsBeforeHandshake = key.interestOps(); + key.interestOps(key.interestOps() & ~SelectionKey.OP_READ & ~SelectionKey.OP_WRITE); + } + + if(!handshakeComplete) { +// Logging.logMessage(Logging.LEVEL_DEBUG, this, "SSL-handshake next step: "+handshakeStatus); + + SSLEngineResult result; + switch (handshakeStatus) { + case NEED_UNWRAP: { + key.interestOps(key.interestOps() & ~SelectionKey.OP_WRITE); + if (channel.read(inNetBuffer.getBuffer()) == -1) { + throw new IOException("End of stream has reached."); + } + + boolean underflow = false; + do{ // read all read data in buffer +// Logging.logMessage(Logging.LEVEL_DEBUG, this, "SSL-handshake doing: unwrap"); + inNetBuffer.flip(); // ready for being read + result = sslEngine.unwrap(inNetBuffer.getBuffer(), dummyBuffer.getBuffer()); + inNetBuffer.compact(); // ready for reading from channel + + handshakeStatus = result.getHandshakeStatus(); + switch (result.getStatus()) { + case OK: { + analyseHandshakeStatus(key, handshakeStatus); + break; + } + case BUFFER_UNDERFLOW: { + // needed more data in inNetBuffer, maybe nexttime + underflow = true; + key.interestOps(key.interestOps() | SelectionKey.OP_READ); + break; + } + case CLOSED: { + throw new IOException("The SSLEngine is already closed."); + } + default: { + throw new IOException("The SSLEngine is in a curiuos state."); + } + } + }while(bufferRemaining(inNetBuffer)!=0 && handshakeStatus==HandshakeStatus.NEED_UNWRAP && !underflow); + break; + } + case NEED_WRAP: { + key.interestOps(key.interestOps() & ~SelectionKey.OP_READ); +// Logging.logMessage(Logging.LEVEL_DEBUG, this, "SSL-handshake doing: wrap"); + + result = sslEngine.wrap(dummyBuffer.getBuffer(), outNetBuffer.getBuffer()); + outNetBuffer.flip(); // ready for writing to channel + + handshakeStatus = result.getHandshakeStatus(); + switch (result.getStatus()) { + case OK: { + tryFlush(); + + analyseHandshakeStatus(key, handshakeStatus); + break; + } + case BUFFER_OVERFLOW: { + // needed more space in outNetBuffer + // two reasons for overflow: + // 1. buffer is too small + // 2. buffer is nearly full + tryFlush(); + key.interestOps(key.interestOps() | SelectionKey.OP_WRITE); +/* throw new IOException( + "BufferOverflow in SSLEngine. Buffer for SSLEngine-generated data is too small.");*/ + break; + } + case CLOSED: { + throw new IOException("The SSLEngine is already closed."); + } + default: { + throw new IOException("The SSLEngine is in a curiuos state."); + } + } + break; + } + case FINISHED: { + outNetBuffer.flip(); // ready for writing to channel + if(tryFlush()) + handshakeFinished(key); + else + key.interestOps(key.interestOps() | SelectionKey.OP_WRITE); + break; + } + case NEED_TASK: { + key.interestOps(key.interestOps() & ~SelectionKey.OP_WRITE); + doTasks(key); + break; + } + case NOT_HANDSHAKING: { + // TODO: Exception or maybe handshakeComplete = true? + throw new IOException("The SSLEngine is not handshaking."); + } + default: { + throw new IOException("The SSLEngine is in a curiuos handshake-state."); + } + } + } + return handshakeComplete; + } + + /** + * {@inheritDoc} + */ + @Override + public boolean isFlushed() { + return bufferRemaining(outNetBuffer)==0; + } + + /** + * finishing operations for handshake + * @param key + */ + private void handshakeFinished(SelectionKey key) { + Logging.logMessage(Logging.LEVEL_DEBUG, this, "SSL-handshake for "+channel.socket().getInetAddress()+":"+channel.socket().getPort()+" finished"); + // all handshake-data processed and sent + handshakeComplete = true; + inNetBuffer.clear(); + outNetBuffer.clear(); + key.interestOps(keyOpsBeforeHandshake); + try { + this.certs = sslEngine.getSession().getPeerCertificates(); + } catch (SSLPeerUnverifiedException ex) { + Logging.logMessage(Logging.LEVEL_ERROR,this,ex); + this.certs = null; + } + + } + + /** + * + * @param key + * @param handshakeStatus + * @throws IOException + */ + private void analyseHandshakeStatus(SelectionKey key, HandshakeStatus handshakeStatus) throws IOException { + switch(handshakeStatus){ + case NEED_UNWRAP: { + key.interestOps(key.interestOps() | SelectionKey.OP_READ & ~SelectionKey.OP_WRITE); + break; + } + case NEED_WRAP: { + key.interestOps(key.interestOps() | SelectionKey.OP_WRITE); + break; + } + case NEED_TASK: { + key.interestOps(key.interestOps() & ~SelectionKey.OP_WRITE); + doTasks(key); + break; + } + case FINISHED: { + outNetBuffer.flip(); // ready for writing to channel + if(tryFlush()) + handshakeFinished(key); + else + key.interestOps(key.interestOps() | SelectionKey.OP_WRITE); + break; + } + case NOT_HANDSHAKING: { + // TODO: Exception or maybe handshakeComplete = true? + throw new IOException("The SSLEngine is not handshaking."); + } + default: { + throw new IOException("The SSLEngine is in a curiuos handshake-state."); + } + } + } + + /** + * checks the remaining data of the buffer + * (only for internal buffers) + * @param buffer + * @return + */ + private int bufferRemaining(ReusableBuffer buffer) { + buffer.flip(); // ready for being read + int tmp = buffer.remaining(); + buffer.compact(); // ready for being read + return tmp; + } + + /** + * runs the time-consuming tasks + * @param key + * @throws IOException + */ + protected void doTasks(final SelectionKey key) { +// Logging.logMessage(Logging.LEVEL_DEBUG, this, "SSL-handshake doing: doing task"); + + final int tmp = key.interestOps(); + // clear all interests, so no one other than this thread can modify the selector + key.interestOps(0); + +/* executor.execute(new Runnable(){ + public void run() {*/ + // TODO: running in a different thread + Runnable run; + while ((run = sslEngine.getDelegatedTask()) != null) { + run.run(); + } + + switch (handshakeStatus = sslEngine.getHandshakeStatus()) { + case NEED_WRAP: { + key.interestOps(tmp | SelectionKey.OP_WRITE); + break; + } + case NEED_UNWRAP: { + // need to read from channel + key.interestOps(tmp | SelectionKey.OP_READ); + break; + } + case FINISHED: { + // should not happen + handshakeFinished(key); + break; + } + case NEED_TASK: { + // should not happen + doTasks(key); + break; + } + case NOT_HANDSHAKING: { + // should not happen + Logging.logMessage(Logging.LEVEL_ERROR, this, "Exception in worker-thread: The SSLEngine is not handshaking."); + break; + } + default: { + Logging.logMessage(Logging.LEVEL_ERROR, this, "Exception in worker-thread: The SSLEngine is in a curiuos handshake-state."); + assert(false); +// throw new IOException("The SSLEngine is in a curiuos handshake-state."); + } + } + +/* key.selector().wakeup(); + } + });*/ + } +} diff --git a/servers/src/org/xtreemfs/foundation/speedy/ConnectionState.java b/servers/src/org/xtreemfs/foundation/speedy/ConnectionState.java new file mode 100644 index 0000000000000000000000000000000000000000..f760a2d2f389930bc72837ae5611e2b2105510a6 --- /dev/null +++ b/servers/src/org/xtreemfs/foundation/speedy/ConnectionState.java @@ -0,0 +1,487 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.foundation.speedy; + +import java.io.BufferedWriter; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.logging.Level; +import java.util.logging.Logger; +import org.xtreemfs.common.TimeSync; +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.pinky.HTTPHeaders; +import org.xtreemfs.foundation.pinky.HTTPUtils; +import org.xtreemfs.foundation.pinky.channels.ChannelIO; + +/** + * Contains a buffer and an active marker for each connection. Includes also the + * parsing routine and state machine state. + * + * @author bjko + */ +public class ConnectionState { + + /** + * Maximum buffer size + */ + public static final int BUFFSIZE = 1024 * 256; + + /** + * Maximum header size + */ + public static final int MAX_HDR_SIZE = 1024; + + /** + * Initial Size of buffer for headers + */ + public static final int INIT_HDR_BUF = 128; + + /** + * Buffer holding the incomming data + */ + ReusableBuffer data; + + /** + * The channel associated w/ this connection + */ + ChannelIO channel; + + /** + * will be set to false by a periodical clean up task. Inactive connections + * (timed out) will be closed and removed. + */ + AtomicBoolean active; + + /** + * Buffer for receiving the headers. + */ + StringBuilder requestHeaders; + + /** + * The payload received from the client. + */ + ReusableBuffer requestBody; + + /** + * The URI the client requested + */ + int responseStatusCode; + + /** + * The content-bytrange the client requested or null + */ + String byteRange; + + /** + * The request that the client waits for. + */ + public SpeedyRequest waitFor; + + /** + * Status of the parser state machine if connection is idle + */ + public static final int STATUS_IDLE = 0; + + /** + * Status of the parser state machine if reading the headers + */ + public static final int STATUS_READ_HEADERS = 1; + + /** + * Status of the parser state machine after receiving a CRLF + */ + public static final int STATUS_CRLF = 5; + + /** + * Status of the parser state machine if request was parsed + */ + public static final int STATUS_PARSE_REQUEST = 2; + + /** + * Status of the parser state machine while reading the body + */ + public static final int STATUS_READ_BODY = 3; + + /** + * Current status of the request parser state machine + */ + int status; + + /** + * Request method, can be GET or PUT + */ + public String method; + + /** + * pipeline with requests + */ + public LinkedBlockingQueue sendQ; + + /** + * pipeline with requests + */ + public LinkedList receiveQ; + + /** + * this is for the multiSpeedy + */ + public InetSocketAddress endpoint; + + /** + * number of connect retries + */ + public int conRetries; + + /** + * can be used to set a timeout after which to + * reset conRetries + */ + public long nextReconnectTime; + + /** + * number of reconnect cycles that failed. + * used to increase the wait timeout + */ + public int numReconnectCycles; + + /** + * Timestamp of last request sent through this connection. + */ + public long lastUsed; + + public static final int RETRY_RESET_IN_MS = 500; + + /** max wait is one hour + */ + public static final int MAX_RETRY_WAIT = 1000*60*60; + + /** Maximum size of body to accept. + */ + public static final int MAX_BODY_SIZE = 1024*1024*64; + + /** + * Creates a new instance of ConnectionStatus + * + * @param channel + * the channel to which this state object belongs. + */ + public ConnectionState(ChannelIO channel) { + + active = new AtomicBoolean(true); + + this.channel = channel; + + //data = ByteBuffer.allocateDirect(BUFFSIZE); + data = BufferPool.allocate(BUFFSIZE); + + this.status = STATUS_IDLE; + + waitFor = null; + + sendQ = new LinkedBlockingQueue(MultiSpeedy.MAX_CLIENT_QUEUE); + + receiveQ = new LinkedList(); + + this.conRetries = 0; + + this.numReconnectCycles = 0; + + this.lastUsed = TimeSync.getLocalSystemTime(); + } + + public void successfulConnect() { + this.numReconnectCycles = 0; + this.nextReconnectTime = 0; + } + + public void connectFailed() { + this.numReconnectCycles++; + long waitt = Math.round(RETRY_RESET_IN_MS*Math.pow(2,this.numReconnectCycles)); + if (waitt > MAX_RETRY_WAIT) + waitt = MAX_RETRY_WAIT; + Logging.logMessage(Logging.LEVEL_DEBUG,this,"next reconnect possible after "+(waitt/1000)+" s, "+this.numReconnectCycles); + this.nextReconnectTime = System.currentTimeMillis()+waitt; + this.lastUsed = TimeSync.getLocalSystemTime(); + } + + public boolean canReconnect() { + return (this.nextReconnectTime < System.currentTimeMillis()); + } + + public boolean serverIsAvailable() { + if (this.conRetries < MultiSpeedy.MAX_RECONNECT) { + return true; + } else { + return canReconnect(); + } + } + + /** + * This is the main parsing method. It parses the available data in the + * buffer. + * + * @throws com.xtreemfs.speedyg.SpeedyException + * if an error occurs while parsing the response + */ + public void processBuffer() throws SpeedyException { + + // loop until data is empty + while (data.hasRemaining()) { + + switch (this.status) { + case STATUS_IDLE : { + // prepare request + this.requestHeaders = new StringBuilder(INIT_HDR_BUF); + this.requestBody = null; + this.status = STATUS_READ_HEADERS; + this.responseStatusCode = 0; + // TRANSITION + this.waitFor = null; + // find next waiting request + Iterator iter = this.receiveQ.iterator(); + while (iter.hasNext()) { + SpeedyRequest sr = iter.next(); + if (sr.status == SpeedyRequest.RequestStatus.WAITING) { + this.waitFor = sr; + break; + } + } + if (waitFor == null) { + Logging.logMessage(Logging.LEVEL_ERROR,this,"WWWWWWWWWWWWWWWWWWWWW waitFor == null! Clearing buffer..."); + this.status = STATUS_IDLE; + data.limit(0); + return; + } + } + + case STATUS_READ_HEADERS : { + char ch = (char) (data.get() & 0xFF); + if (ch == '\n') { + // TRANSITION + this.status = STATUS_CRLF; + this.requestHeaders.append(ch); + + } else if (ch != '\r') { + // ignore \r s + this.requestHeaders.append(ch); + } + + // check for overflows... + if (this.requestHeaders.length() >= MAX_HDR_SIZE) { + // invalidate all requests! + for (SpeedyRequest rq : this.sendQ) { + rq.status = SpeedyRequest.RequestStatus.FAILED; + } + throw new SpeedyException( + "Response header exceeds max header size (" + + MAX_HDR_SIZE + " bytes): "+this.requestHeaders, true); + } + break; + } + + case STATUS_CRLF : { + char ch = (char) (data.get() & 0xFF); + if (ch == '\r') { + // IGNORE \r + continue; + } + if (ch != '\n') { + // TRANSITION + this.requestHeaders.append(ch); + this.status = STATUS_READ_HEADERS; + continue; + } + // if a second \n comes, headers are done + // TRANSITION + this.status = STATUS_PARSE_REQUEST; + } + + case STATUS_PARSE_REQUEST: { + + // if there is a content length field, try to read the body + int nextNL = this.requestHeaders.indexOf("\n"); + int cPos = 0; + String ftLine = null; + int length = 0; + + while (nextNL != -1) { + + String line = this.requestHeaders.substring(cPos, nextNL); + cPos = nextNL + 1; + nextNL = this.requestHeaders.indexOf("\n", cPos); + + if (ftLine == null) + ftLine = line; + + //if (HTTPUtils.isContentLength(line)) { + if (HTTPUtils.compareHeaderName("CONTENT-LENGTH",line)) { + try { + String len = line.substring(15).trim(); + length = Integer.valueOf(len); + } catch (Exception ex) { + // no transition because con is closed anyway... + // invalidate all requests! + for (SpeedyRequest rq : this.sendQ) { + rq.status = SpeedyRequest.RequestStatus.FAILED; + } + throw new SpeedyException( + "Conten-Length header is not an integer value!", + true); + } + if (length > MAX_BODY_SIZE) { + // make sure its not too long... + // no transition because con is closed anyway... + // invalidate all requests! + for (SpeedyRequest rq : this.sendQ) { + rq.status = SpeedyRequest.RequestStatus.FAILED; + } + throw new SpeedyException( + "Body length exceeds max body size (" + + BUFFSIZE + " bytes)", true); + } + /*} else if (HTTPUtils.compareHeaderName("CONNECTION",line)) { + System.out.println(line); + // client wants connection to be closed after request + if (line.substring(11).trim().equalsIgnoreCase("close")) { + closeConn = true; + }*/ + //} else if (HTTPUtils.isContentRange(line)) { + /*} else if (HTTPUtils.compareHeaderName("CONTENT-RANGE",line)) { + this.byteRange = line.substring(14).trim(); + }*/ + } + + } + + if (ftLine == null) { + // this is an empty request...ignore it + // TRANSITION + this.status = STATUS_IDLE; + this.requestHeaders = new StringBuilder(); + continue; + } + + if ( (ftLine.length() > 4) + && (ftLine.charAt(0) == 'H') + && (ftLine.charAt(1) == 'T') + && (ftLine.charAt(2) == 'T') + && (ftLine.charAt(3) == 'P') ) { + //if (ftLine.startsWith("HTTP")) { + // extract status code + try { + // HTTP/1.1 SCD Text + String tmp = ftLine.substring(9, 12); + Integer tmp2 = new Integer(tmp); + this.responseStatusCode = tmp2; + } catch (Exception ex) { + // no transition because con is closed anyway... + // invalidate all requests! + for (SpeedyRequest rq : this.sendQ) { + rq.status = SpeedyRequest.RequestStatus.FAILED; + } + throw new SpeedyException( + "Invalid response line. Status code is not an integer or malformed." + + ftLine, true); + } + } else { + // flush the entire buffer! + // no transition because con is closed anyway... + for (SpeedyRequest rq : this.sendQ) { + rq.status = SpeedyRequest.RequestStatus.FAILED; + } + throw new SpeedyException( + "Invalid response line. Status code is not an integer or malformed." + + ftLine, true); + } + if (length > 0) { + // TRANISTION + this.status = STATUS_READ_BODY; + this.requestBody = BufferPool.allocate(length); + assert (this.requestBody.remaining() == length) : + "invalid buffer: remaining > length! capacity=" + this.requestBody.capacity(); + } else { + // TRANSITION + this.status = STATUS_IDLE; + this.waitFor.responseHeaders = new HTTPHeaders(); + this.waitFor.responseHeaders.parse(this.requestHeaders.toString()); + this.requestHeaders = null; + + this.waitFor.responseBody = null; + this.waitFor.statusCode = this.responseStatusCode; + this.waitFor.status = SpeedyRequest.RequestStatus.FINISHED; + } + break; + } + + case STATUS_READ_BODY: { + // we assume the body to be raw data + if (data.remaining() <= this.requestBody.remaining()) { + this.requestBody.put(data); + } else { + int oldLimit = data.limit(); + data.limit(data.position()+this.requestBody.remaining()); + assert(oldLimit > data.limit()); + this.requestBody.put(data); + data.limit(oldLimit); + } + /*while (data.hasRemaining() && this.requestBody.hasRemaining()) { + this.requestBody.put(data.get()); + }*/ + if (!this.requestBody.hasRemaining()) { + // TRANSITION + this.status = STATUS_IDLE; + if (this.waitFor == null) { + Logging.logMessage(Logging.LEVEL_ERROR,this,"this.waitFor is null!"); + System.exit(1); + } + this.waitFor.responseHeaders = new HTTPHeaders(); + this.waitFor.responseHeaders.parse(this.requestHeaders.toString()); + this.requestHeaders = null; + + this.waitFor.responseBody = this.requestBody; + this.requestBody = null; + + this.waitFor.statusCode = this.responseStatusCode; + this.waitFor.status = SpeedyRequest.RequestStatus.FINISHED; + } + } + } + } + } + + void freeBuffers() { + BufferPool.free(this.data); + for (SpeedyRequest rq : this.sendQ) { + rq.freeBuffer(); + } + } + +} diff --git a/servers/src/org/xtreemfs/foundation/speedy/EmptyResponseListener.java b/servers/src/org/xtreemfs/foundation/speedy/EmptyResponseListener.java new file mode 100644 index 0000000000000000000000000000000000000000..86a618386f71b30c309da55e3d91ac2c2919c058 --- /dev/null +++ b/servers/src/org/xtreemfs/foundation/speedy/EmptyResponseListener.java @@ -0,0 +1,33 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.foundation.speedy; + +public class EmptyResponseListener implements SpeedyResponseListener { + + public void receiveRequest(SpeedyRequest theRequest) { + // do nothing + } + +} diff --git a/servers/src/org/xtreemfs/foundation/speedy/MultiSpeedy.java b/servers/src/org/xtreemfs/foundation/speedy/MultiSpeedy.java new file mode 100644 index 0000000000000000000000000000000000000000..47b9f63c043a51f63883fee2da8319d547862f46 --- /dev/null +++ b/servers/src/org/xtreemfs/foundation/speedy/MultiSpeedy.java @@ -0,0 +1,856 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even tnhe implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.foundation.speedy; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.SocketException; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.SelectionKey; +import java.nio.channels.Selector; +import java.nio.channels.SocketChannel; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.LinkedBlockingQueue; + +import org.xtreemfs.common.TimeSync; +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.LifeCycleThread; +import org.xtreemfs.foundation.pinky.SSLOptions; +import org.xtreemfs.foundation.pinky.channels.ChannelIO; +import org.xtreemfs.foundation.pinky.channels.SSLChannelIO; + +/** + * Main (and single) thread for Speedy a client for the Pinky async IO server. + * This variant is based on PipelinedSpeedy and can handle connections to + * multiple servers. + * + * @author bjko + */ +public class MultiSpeedy extends LifeCycleThread { + + /** + * Selector for server socket + */ + Selector selector; + + /** + * If set to true thei main loop will exit upon next invocation + */ + boolean quit; + + /** + * List of all active connections. + */ + private Map connections; + + /** + * Maximum size of the request queue. + */ + public static final int MAX_CLIENT_QUEUE = 50000; + + /** + * registered listeners for responses + */ + private final Map listeners; + + /** + * connections that still need to be registered with the selector. + */ + private final LinkedBlockingQueue newCons; + + /** + * Maxmimum tries to reconnect to the server + */ + public static final int MAX_RECONNECT = 4; + + + /** + * milliseconds between two timeout checks + */ + public static final int TIMEOUT_GRANULARITY = 250; + + /** + * timestamp of last timeout check + */ + private long lastCheck; + + /** + * a single listener for all connections if null listeners is used. + */ + private SpeedyResponseListener singleListener; + + /** + * options for ssl connection + */ + private SSLOptions sslOptions; + + /** + * delete idle connections after five minutes. + */ + public static long CONNECTION_REMOVE_TIMEOUT = 1000*60*5; + + //private ParserThread pThread; + + /** + * Creates a new instance of the Pinky server without SSL + * + * @throws java.io.IOException + * passes IO Exception when it cannot setup the server socket + */ + public MultiSpeedy() throws IOException { + this(null); + } + + /** + * Creates a new instance of the Pinky server + * + * @param sslOptions + * options for ssl connection, null for no SSL + * @throws java.io.IOException + * passes IO Exception when it cannot setup the server socket + */ + public MultiSpeedy(SSLOptions sslOptions) throws IOException { + + super("Speedy thr"); + + connections = Collections.synchronizedMap(new HashMap()); + + Logging.logMessage(Logging.LEVEL_INFO,this,"speedy operational"); + + this.newCons = new LinkedBlockingQueue(); + + // create a selector + selector = Selector.open(); + + listeners = new HashMap(); + + singleListener = null; + + this.sslOptions = sslOptions; + + //pThread = new ParserThread(this); + //pThread.start(); + } + + /** + * registers a listener for client requests. Overwrites any prevoiusly + * installed listener. + * + * @param rl + * the listener or null to unregister + */ + public void registerListener(SpeedyResponseListener rl, InetSocketAddress server) { + if (rl != null) + this.listeners.put(server, rl); + else + this.listeners.remove(server); + } + + /** + * registers a listener for all client requests. SpeedyRequest has methods + * to get server IP. Overwrites any prevoiusly installed listener. + * + * @param rl + * the listener or null to unregister + */ + public void registerSingleListener(SpeedyResponseListener rl) { + this.singleListener = rl; + } + + /** + * This method checks only if the server is available according + * to speedy's settings (i.e. timeout after connection failure). + * This does not mean that the server can be contacted, it just + * means that speedy will ttry to connect. + */ + public boolean serverIsAvailable(InetSocketAddress server) { + ConnectionState con = connections.get(server); + if (con != null) { + return con.serverIsAvailable(); + } else { + //new servers are always connectable + return true; + } + } + + /** + * Releases the resources associated with a server. Does not close the + * connection itself. + */ + public void releaseConnection(InetSocketAddress server) { + ConnectionState con = connections.get(server); + if (con != null) { + if (con.channel == null) { + connections.remove(server).freeBuffers(); + listeners.remove(server); + } + } + } + + /** + * Called to send a response. + * + * @attention rq must have a connection attached! + * @param rq + * the request to be sent + * @throws java.lang.IllegalStateException + * if the send queue is full + * @throws java.io.IOException + * passes all exceptions from the used IO primitives + */ + public void sendRequest(SpeedyRequest rq, InetSocketAddress server) + throws IOException, IllegalStateException { + + if (rq.listener == null) { + if (singleListener != null) { + rq.listener = singleListener; + } else { + rq.listener = this.listeners.get(server); + if (rq.listener == null) + throw new RuntimeException("not listener set for "+server); + } + + } + + + ConnectionState con = null; + synchronized (connections) { + con = connections.get(server); + if (con == null) { + // create a new connection + Logging.logMessage(Logging.LEVEL_DEBUG,this,"received new request, open new connection to " + + server); + ChannelIO channel; + try { + if(sslOptions == null){ // no SSL + channel = new ChannelIO(SocketChannel.open()); + } else { + channel = new SSLChannelIO(SocketChannel.open(), sslOptions, true); + } + } catch (IOException ex) { + System.out.println("\n\nSPEEDY STATUS:"); + System.out.println(this.getStatus()); + System.out.println(); + throw ex; + } + channel.configureBlocking(false); + channel.socket().setTcpNoDelay(true); + channel.socket().setReceiveBufferSize(256*1024); + channel.connect(server); + + con = new ConnectionState(channel); + con.endpoint = server; + newCons.add(con); + connections.put(server, con); + + rq.registerConnection(con); + rq.status = SpeedyRequest.RequestStatus.PENDING; + con.sendQ.add(rq); + + Logging.logMessage(Logging.LEVEL_DEBUG,this,"connecting..."); + selector.wakeup(); + } else { + // recycle old connection + + if (con.conRetries >= this.MAX_RECONNECT) { + if (con.canReconnect()) { + con.conRetries = 0; + con.channel = null; + Logging.logMessage(Logging.LEVEL_DEBUG,this,"retry count reset " + + con.endpoint); + } else { + throw new IOException("Cannot contact server"); + } + } + if (con.channel == null) { + Logging.logMessage(Logging.LEVEL_DEBUG,this,"need a reconnect to " + + server); + reconnect(con); + } + con.lastUsed = TimeSync.getLocalSystemTime(); + rq.registerConnection(con); + rq.status = SpeedyRequest.RequestStatus.PENDING; + con.sendQ.add(rq); + + SelectionKey key = con.channel.keyFor(selector); + if (key != null) { + if (key.isValid()) { + synchronized (key) { + key.interestOps(key.interestOps() | SelectionKey.OP_WRITE); + } + } else + Logging.logMessage(Logging.LEVEL_WARN,this,"invalid key for " + + server); + } else { + Logging.logMessage(Logging.LEVEL_WARN,this,"no key for " + server); + } + + Logging.logMessage(Logging.LEVEL_DEBUG,this,"received new request, use existing connection to " + + server); + selector.wakeup(); + } + } + + + } + + public void resetRetryCount(InetSocketAddress server) { + ConnectionState con = connections.get(server); + if (con != null) { + con.conRetries = 0; + } + } + + private void checkForTimers() { + //poor man's timer + long now = System.currentTimeMillis(); //TimeSync.getLocalSystemTime(); + if (now >= lastCheck + TIMEOUT_GRANULARITY) { + //check for timed out requests + synchronized (connections) { + Iterator conIter = connections.values().iterator(); + while (conIter.hasNext()) { + ConnectionState con = conIter.next(); + + if (con.lastUsed < (TimeSync.getLocalSystemTime()-CONNECTION_REMOVE_TIMEOUT)) { + Logging.logMessage(Logging.LEVEL_DEBUG, this,"removing idle connection from speedy: "+con.endpoint); + try { + conIter.remove(); + cancelRequests(con); + con.channel.close(); + } catch(Exception ex) { + } finally { + con.freeBuffers(); + } + } + + for (int i = 1; i < 3; i++) { + Iterator iter = (i == 1) ? con.receiveQ.iterator() : con.sendQ.iterator(); + while (iter.hasNext()) { + SpeedyRequest rq = iter.next(); + if ((rq.status != SpeedyRequest.RequestStatus.FAILED) && (rq.status != SpeedyRequest.RequestStatus.FINISHED) + && (rq.timeout > 0)) { + rq.waited += TIMEOUT_GRANULARITY; + if (rq.waited > rq.timeout) { + try { + Logging.logMessage(Logging.LEVEL_ERROR,this,"request timed out after "+rq.waited+"ms (to was "+rq.timeout+"). KeySet is "+con.channel.keyFor(selector).interestOps()+ + "receive wait queue length is "+con.receiveQ.size()); + } catch (Exception e) { + } + rq.status = SpeedyRequest.RequestStatus.FAILED; + assert (!rq.listenerNotified); + rq.listener.receiveRequest(rq); + rq.listenerNotified = true; + rq.freeBuffer(); + iter.remove(); + //if the connection is still open, close it! + try { + rq.con.channel.close(); + con.freeBuffers(); + conIter.remove(); + cancelRequests(rq.con); + } catch (Exception ex2) { + Logging.logMessage(Logging.LEVEL_DEBUG,this,ex2); + } + } + } + } + } + } + } + + lastCheck = now; + } + } + + /** + * reconnect after an unsuccessfull connect or after connection was closed + * + * @param con + * The connection to reconnect. + */ + private void reconnect(ConnectionState con) { + try { + // cancel old key + if (con.channel != null) { + SelectionKey key = con.channel.keyFor(selector); + key.cancel(); + } + + Logging.logMessage(Logging.LEVEL_DEBUG,this,"reconnect, open new connection to " + + con.endpoint); + + ChannelIO channel; + if(sslOptions == null){ // no SSL + channel = new ChannelIO(SocketChannel.open()); + } else { + channel = new SSLChannelIO(SocketChannel.open(), sslOptions, true); + } + channel.configureBlocking(false); + channel.socket().setTcpNoDelay(true); + channel.socket().setReceiveBufferSize(256*1024); + //try to resolve the address again! + con.endpoint = new InetSocketAddress(con.endpoint.getHostName(), con.endpoint.getPort()); + channel.connect(con.endpoint); + this.newCons.add(con); + con.channel = channel; + selector.wakeup(); + } catch (SocketException ex) { + Logging.logMessage(Logging.LEVEL_ERROR,this,ex); + } catch (IOException ex) { + Logging.logMessage(Logging.LEVEL_ERROR,this,ex); + } + } + + /** + * Shuts the server down gracefully. All connections are closed. + */ + public void shutdown() { + this.quit = true; + selector.wakeup(); + } + + public void closeConnection(ConnectionState con) { + try { + con.channel.close(); + synchronized (connections) { + connections.remove(con.endpoint).freeBuffers(); + } + con.channel.keyFor(selector).cancel(); + } catch (Exception ex) { + Logging.logMessage(Logging.LEVEL_DEBUG,this,ex); + } + cancelRequests(con); + Logging.logMessage(Logging.LEVEL_DEBUG, this, "connection to " + con.channel.socket().getRemoteSocketAddress() + " closed"); + } + + /** + * Pinky's main loop + */ + public void run() { + + try { + + notifyStarted(); + lastCheck = System.currentTimeMillis(); + + // repeat until someone shuts the thread down + while (!quit) { + // try to select events... + int numKeys = 0; + try { + numKeys = selector.select(TIMEOUT_GRANULARITY); + + } catch (IOException ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + continue; + } + + // register new connections w/ selector + if (!this.newCons.isEmpty()) { + ConnectionState cs = this.newCons.poll(); + try { + cs.channel.register(selector, SelectionKey.OP_CONNECT | SelectionKey.OP_WRITE | SelectionKey.OP_READ, cs); + } catch (ClosedChannelException ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + } + } + + if (numKeys == 0) { + checkForTimers(); + continue; + } + + if (quit) + break; + + // fetch events + Set keys = selector.selectedKeys(); + Iterator iter = keys.iterator(); + + // process all events + while (iter.hasNext()) { + + SelectionKey key = iter.next(); + + // remove key from the list + iter.remove(); + + ConnectionState con = (ConnectionState) key.attachment(); + + // ACCEPT A CONNECTION + if (key.isConnectable()) { + Logging.logMessage(Logging.LEVEL_DEBUG, this, "is connectable..."); + + ChannelIO client = null; + + // FIXME: Better exception handling! + try { + + client = con.channel; + + if (client.isConnectionPending()) { + client.finishConnect(); + } + + if (!con.sendQ.isEmpty()) { + synchronized (key) { + key.interestOps(SelectionKey.OP_WRITE | SelectionKey.OP_READ); + } + } + + Logging.logMessage(Logging.LEVEL_DEBUG, this, "connected to server " + con.endpoint); + + + con.successfulConnect(); + } catch (Exception ex) { + Logging.logMessage(Logging.LEVEL_DEBUG, this, "Exception while connecting " + ex); + if (con.conRetries < this.MAX_RECONNECT) { + Logging.logMessage(Logging.LEVEL_WARN, this, "cannot contact server...retrying"); + this.reconnect(con); + } else { + Logging.logMessage(Logging.LEVEL_WARN, this, "cannot contact server(" + con.endpoint + ")! giving up!"); + cancelRequests(con); + con.connectFailed(); + } + con.conRetries++; + continue; + } + + } + + try { + // INPUT READY + if (key.isReadable()) { + // make sure there is an attachment + if (con != null) { + if (!con.channel.isShutdownInProgress()) { + if (con.channel.doHandshake(key)) { + //System.out.println("read..."); + // Q has space + int numread; // num bytes read from Socket + + //ReusableBuffer rb = BufferPool.allocate(ConnectionState.BUFFSIZE); + + try { + numread = con.channel.read(con.data.getBuffer()); + } catch (IOException ex) { + //BufferPool.free(rb); + // read returns -1 when connection was closed + numread = -1; + } + + if (numread == -1) { + // connection was closed... + Logging.logMessage(Logging.LEVEL_DEBUG, this, "server closed connection!"); + + // cancel the key, i.e. deregister from Selector + key.cancel(); + synchronized (connections) { + connections.remove(con.endpoint).freeBuffers(); + } + + try { + con.channel.close(); + } catch (IOException ex) { + // no one cares! + Logging.logMessage(Logging.LEVEL_DEBUG, this, ex); + } + + if (!con.sendQ.isEmpty()) { + if (con.canReconnect()) { + con.conRetries = 0; + Logging.logMessage(Logging.LEVEL_DEBUG, this, "retry count reset " + con.endpoint); + } + if (con.conRetries < this.MAX_RECONNECT) { + this.reconnect(con); + } else { + Logging.logMessage(Logging.LEVEL_WARN, this, "cannot reconnect to server " + con.endpoint); + // kill requests + cancelRequests(con); + con.connectFailed(); + } + con.conRetries++; + } else { + + // inform client that requests have failed (we + // do not know if we can resend them + // because the server might have received and + // processed them! + cancelRequests(con); + + Logging.logMessage(Logging.LEVEL_DEBUG, this, "server closed connection, but it was not needed anymore."); + con.conRetries = 0; + con.channel = null; + } + continue; + + } + + // so there is new data available + if (numread > 0) { + //pThread.enqueueRequest(con,rb); + try { + + // important to find lingering connections + con.active.set(true); + + // prepare buffer for reading + con.data.flip(); + + // the parser may return multiple requests + // because this is async io + + while (con.data.hasRemaining()) { + // as long as there is data we call the + // parser + con.processBuffer(); + + while (true) { + + if (con.receiveQ.isEmpty()) { + break; + } + + SpeedyRequest sr = con.receiveQ.peek(); + if ((sr.status == SpeedyRequest.RequestStatus.FINISHED) || (sr.status == SpeedyRequest.RequestStatus.FAILED)) { + con.receiveQ.poll(); + sr.received = System.currentTimeMillis(); + + // here we should notify + // consumers... + assert (!sr.listenerNotified); + sr.listener.receiveRequest(sr); + sr.listenerNotified = true; + + } else { + break; + } + } + + } + // make buffer ready for reading again + con.data.compact(); + + } catch (SpeedyException ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + if (ex.isAbort()) { + try { + con.channel.close(); + synchronized (connections) { + connections.remove(con.endpoint).freeBuffers(); + } + key.cancel(); + } catch (Exception ex2) { + } + + } + cancelRequests(con); + continue; + } + } + } + } + } + } + + // CAN WRITE OUPUT + if (key.isWritable()) { + // we have a request to send out + con.active.set(true); + + if (!con.channel.isShutdownInProgress()) { + if (con.channel.doHandshake(key)) { + boolean rqDone = false; + + do { + rqDone = false; + SpeedyRequest toSend = con.sendQ.peek(); + + if (toSend == null) { + break; + } + // ITERATE as long as a requests finish (buffer is + // not full) + // if we wait for the next select it takes ages and + // the buffer is underfull + if (toSend.status == SpeedyRequest.RequestStatus.PENDING) { + toSend.status = SpeedyRequest.RequestStatus.SENDING; + toSend.sendStart = System.currentTimeMillis(); + } + + if (toSend.requestHeaders != null) { + con.channel.write(toSend.requestHeaders.getBuffer()); + if (!toSend.requestHeaders.hasRemaining() && con.channel.isFlushed()) { + BufferPool.free(toSend.requestHeaders); + toSend.requestHeaders = null; + // if there is no body we can skip the next + // round! + if (toSend.requestBody == null) { + rqDone = true; + } else { + toSend.requestBody.position(0); + } + } else { + break; + } + } + if (toSend.requestHeaders == null) { + // headers sent, send body + if (toSend.requestBody == null) { + rqDone = true; + } else { + if (toSend.requestBody.hasRemaining()) { + con.channel.write(toSend.requestBody.getBuffer()); + + } + if (!toSend.requestBody.hasRemaining()) { + BufferPool.free(toSend.requestBody); + toSend.requestBody = null; + rqDone = true; + } else { + break; + } + } + } + + if (rqDone) { + + toSend.status = SpeedyRequest.RequestStatus.WAITING; + con.sendQ.poll(); + con.receiveQ.add(toSend); + + if (con.sendQ.isEmpty()) { + Logging.logMessage(Logging.LEVEL_DEBUG, this, "Q empty"); + synchronized (key) { + key.interestOps(SelectionKey.OP_READ); + } + // selector.wakeup(); + } + + } + } while (rqDone); + } + } + con.active.set(true); + continue; + } + } catch (IOException e) { + if (con.channel != null) { + con.channel.close(); + } + Logging.logMessage(Logging.LEVEL_ERROR, this, e); + con.sendQ.poll(); + } + + } + } + + // gracefully shutdown... + try { + synchronized (connections) { + for (InetSocketAddress endpt : connections.keySet()) { + ConnectionState cs = connections.get(endpt); + try { + if (cs.channel != null) { + // TODO: non-blocking shutdown would be better + while (!cs.channel.shutdown(cs.channel.keyFor(selector))) {} + } + } catch (IOException ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, "Exception when shutdown connection: "+cs.channel.socket().getRemoteSocketAddress()+" "+ex.toString()); + }finally{ + cs.channel.close(); +// closeConnection(cs); + cancelRequests(cs); + } + } + } + selector.close(); + } catch (IOException ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + } + //pThread.shutdown(); + Logging.logMessage(Logging.LEVEL_DEBUG, this, "shutdown complete"); + notifyStopped(); + + } catch (Throwable th) { + Logging.logMessage(Logging.LEVEL_DEBUG, this, th); + notifyCrashed(th instanceof Exception ? (Exception) th + : new Exception(th)); + } + } + + void cancelRequests(final ConnectionState con) { + // kill requests + for (SpeedyRequest rq : con.sendQ) { + rq.status = SpeedyRequest.RequestStatus.FAILED; + assert(!rq.listenerNotified); + rq.listener.receiveRequest(rq); + rq.listenerNotified = true; + rq.freeBuffer(); + } + for (SpeedyRequest rq : con.receiveQ) { + if ((rq.status == SpeedyRequest.RequestStatus.PENDING) + || (rq.status == SpeedyRequest.RequestStatus.SENDING) + || (rq.status == SpeedyRequest.RequestStatus.WAITING)) { + rq.status = SpeedyRequest.RequestStatus.FAILED; + assert(!rq.listenerNotified); + rq.listener.receiveRequest(rq); + rq.listenerNotified = true; + } + rq.freeBuffer(); + } + con.sendQ.clear(); + con.receiveQ.clear(); + + // free the send buffer + //BufferPool.free(con.data); + } + + /** Get current queue loads. + * @returns an array containing the sum of all pending requests and the number of connections + */ + public int[] getQLength() { + synchronized (connections) { + int totalL = 0; + for (ConnectionState cs : this.connections.values()) { + totalL += cs.sendQ.size(); + } + return new int[]{totalL,this.connections.size()}; + } + } + + public String getStatus() { + int[] qs = getQLength(); + String str = "queue length: "+qs[0]+"\n"; + str += "connections: "+qs[1]; + return str; + } + +} diff --git a/servers/src/org/xtreemfs/foundation/speedy/SpeedyException.java b/servers/src/org/xtreemfs/foundation/speedy/SpeedyException.java new file mode 100644 index 0000000000000000000000000000000000000000..14457899d8219b90adc8ed2d934bf5a8ec1cbdf4 --- /dev/null +++ b/servers/src/org/xtreemfs/foundation/speedy/SpeedyException.java @@ -0,0 +1,61 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.foundation.speedy; + +/** + * + * @author bjko + */ +public class SpeedyException extends java.lang.Exception { + + boolean abortConnection; + + /** + * Creates a new instance of SpeedyException without detail + * message. + */ + public SpeedyException() { + } + + /** + * Constructs an instance of SpeedyException with the + * specified detail message. + * + * @param msg + * the detail message. + */ + public SpeedyException(String msg) { + super(msg); + } + + public SpeedyException(String msg, boolean abort) { + super(msg); + this.abortConnection = abort; + } + + public boolean isAbort() { + return this.abortConnection; + } +} diff --git a/servers/src/org/xtreemfs/foundation/speedy/SpeedyRequest.java b/servers/src/org/xtreemfs/foundation/speedy/SpeedyRequest.java new file mode 100644 index 0000000000000000000000000000000000000000..3e24298befff0757cf131b33be0f3a84e5521aac --- /dev/null +++ b/servers/src/org/xtreemfs/foundation/speedy/SpeedyRequest.java @@ -0,0 +1,442 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.foundation.speedy; + +import java.net.InetSocketAddress; + +import java.security.MessageDigest; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.xtreemfs.common.Request; +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.util.OutputUtils; +import org.xtreemfs.foundation.pinky.HTTPHeaders; +import org.xtreemfs.foundation.pinky.HTTPUtils; +import org.xtreemfs.mrc.MRCRequest; + +/** + * An HTTP request + * + * @author bjko + */ +public class SpeedyRequest { + + /** + * The request URI + */ + String requestURI; + + /** + * The request HTTP method (GET, PUT...) + */ + String requestMethod; + + /** + * All headers included in the request part + */ + HTTPHeaders requestHttpHeaders; + + /** + * The request header including the request line and separating double CRLF. + */ + ReusableBuffer requestHeaders; + + /** + * The request body, can be null if there is no body data to be sent. + */ + ReusableBuffer requestBody; + + public long sendStart; + + /** + * A listener for a single request. This is not directly supported by Speedy + * but might be by clients built using Speedy + */ + public SpeedyResponseListener listener; + + /** + * time in milliseconds when the response was received (for debugging + * purposes only) + */ + public long received; + + // ----- + + /** + * the headers sent by the server as response + */ + public HTTPHeaders responseHeaders; + + /** + * the body the server sent as response + */ + public ReusableBuffer responseBody; + + /** + * status code sent by the server + * + * @see org.xtreemos.wp34.mrc.pinky.HTTPUtils + */ + public int statusCode; + + public boolean listenerNotified; + + /* for debugging only */ + public int _debug_contentLength; + long _id; + + /** + * Status of a request + */ + public enum RequestStatus { + /** + * Request is being processed but not sent yet. + */ + PENDING, + /** + * The request is currently transferred to the server and Speedy is + * waiting for the response. + */ + SENDING, + /** + * The request was sent and Speedy is waiting for a response + */ + WAITING, + /** + * Speedy has received a valid response from the server. The request hs + * reached the end of it's lifecycle. + */ + FINISHED, + /** + * Speedy has received an invalid response from the server or other + * problems occured while executing the request. The request hs reached + * the end of it's lifecycle. + */ + FAILED }; + /** + * the current status of the request + */ + public RequestStatus status; + + public MRCRequest attachment; + + public Object genericAttatchment; + + /** + * original OSD-request (used for subrequests) + */ + private Request originalRequest; + + // ----- + // stuff used by the speedy client + + boolean ready; + + ConnectionState con; + + public int timeout; + + public int waited; + + + /** + * creates a new request + * + * @param requestMethod + * HTTP method + * @param requestURI + * uri to request from server + * @param range + * byte range to request or null + */ + public SpeedyRequest(String requestMethod, String requestURI, String range, String authString) { + this(requestMethod,requestURI,range,authString,null,null,null); + } + + /** + * creates a new request + * + * @param requestMethod + * HTTP method + * @param requestURI + * uri to request from server + * @param range + * byte range to request or null + * @param requestBody + * the body to send as part of the request, or null + * @param type + * the content type + */ + public SpeedyRequest(String requestMethod, String requestURI, String range, String authString, ReusableBuffer requestBody, HTTPUtils.DATA_TYPE type) { + this(requestMethod,requestURI,range,authString,requestBody,type,null); + } + + /** Creates a new request + * @param method Requested method + * @param URI Requested URI + * @param headers Headers for the request + * @autor Jesús Malo (jmalo) + */ + public SpeedyRequest(String method, HTTPHeaders headers, String URI) { + this(method,URI,null,null,null,null,headers); + } + + /** + * creates a new request + * + * @param requestMethod + * HTTP method + * @param requestURI + * uri to request from server + * @param range + * byte range to request or null + * @param requestBody + * the body to send as part of the request, or null + * @param type + * the content type + */ + public SpeedyRequest(String requestMethod, String requestURI, + String range, String authString, + ReusableBuffer requestBody, + HTTPUtils.DATA_TYPE type, + HTTPHeaders additionalHeaders) { + + assert(requestMethod != null); + assert(requestURI != null); + + this.requestURI = requestURI; + this.requestMethod = requestMethod; + + //prepare header fields + this.requestHttpHeaders = new HTTPHeaders(); + + if (range != null) + this.requestHttpHeaders.addHeader(HTTPHeaders.HDR_CONTENT_RANGE, range); + + if (authString != null) + this.requestHttpHeaders.addHeader(HTTPHeaders.HDR_AUTHORIZATION, authString); + + if (requestBody != null) { + this.requestHttpHeaders.addHeader(HTTPHeaders.HDR_CONTENT_TYPE, type.toString()); + this.requestHttpHeaders.addHeader(HTTPHeaders.HDR_CONTENT_LENGTH, requestBody.capacity()); + } + + //additional headers can override + if (additionalHeaders != null) + this.requestHttpHeaders.copyFrom(additionalHeaders); + + + this.requestHeaders = constructRequest(); + + this.requestBody = requestBody; + + initializeInternals(); + + _debug_contentLength = (requestBody == null) ? 0 : requestBody.capacity(); + } + + private void initializeInternals() { + this.responseHeaders = null; + this.responseBody = null; + this.statusCode = HTTPUtils.SC_OKAY; + this.ready = false; + this.listenerNotified = false; + this.timeout = 0; + } + + private ReusableBuffer constructRequest() { + StringBuilder sb = new StringBuilder(); + sb.append(this.requestMethod); + sb.append(" "); + sb.append(this.requestURI); + sb.append(" "); + sb.append(HTTPUtils.HTTP_VER); + sb.append(HTTPUtils.CRLF); + this.requestHttpHeaders.append(sb); + sb.append(HTTPUtils.CRLF); + return ReusableBuffer.wrap(sb.toString().getBytes(HTTPUtils.ENC_ASCII)); + + } + + /** + * Associates a request with a connection + * + * @param con + * connection to associate this request with + */ + public void registerConnection(ConnectionState con) { + this.con = con; + } + + public InetSocketAddress getServer() { + if (this.con != null) { + return this.con.endpoint; + } else { + return null; + } + } + + public void registerListener(SpeedyResponseListener rl) { + this.listener = rl; + } + + /** It provides the byte array of the body + * @return The array of bytes contained in the body of the request or null if there wasn't body in the message + * + * @author Jesús Malo (jmalo) + */ + public byte[] getBody() { + byte body[] = null; + + if (requestBody != null) { + if (requestBody.hasArray()) { + body = requestBody.array(); + } else { + body = new byte[requestBody.capacity()]; + requestBody.position(0); + requestBody.get(body); + } + } + + return body; + } + + /** It provides the byte array of the body + * @return The array of bytes contained in the body of the response or null if there wasn't body in the message + * + * @author Jesús Malo (jmalo) + */ + public byte[] getResponseBody() { + + if (responseBody != null) { + return responseBody.array(); + } + + return null; + } + + public void freeBuffer() { + BufferPool.free(requestBody); + requestBody = null; + BufferPool.free(requestHeaders); + requestHeaders = null; + BufferPool.free(responseBody); + responseBody = null; + } + + public void setTimeout(int timeout) { + this.timeout = timeout; + } + + public int getTimeout() { + return timeout; + } + + public String toString() { + String hdrs = (this.requestHeaders == null) ? null : new String(this.requestHeaders.array()); + String bdy = (this.requestBody == null) ? null : new String(this.requestBody.array()); + return this.requestURI+"\n"+ + hdrs+"\n"+ + bdy+"\n"+ + this.responseHeaders+"\n"+ + (this.responseBody != null ? + new String(this.responseBody.array()) : "empty"); + } + + public Request getOriginalRequest() { + return this.originalRequest; + } + + public void setOriginalRequest(Request osdRequest) { + this.originalRequest = osdRequest; + } + + public String getURI() { + return requestURI; + } + + public String getMethod() { + return this.requestMethod; + } + + /** + * Prepares the request for being resent with digest authentication + * @param username + * @param password + */ + public void addDigestAuthentication(String username, String password) { + //reset request & buffers + requestBody.flip(); + assert(requestBody.position() == 0); + initializeInternals(); + + + String serverAuthHdr = responseHeaders.getHeader(HTTPHeaders.HDR_WWWAUTH); + + //parse headers + final String hdrStr = new String(requestHeaders.array()); + + final String credentials = createCredentials(serverAuthHdr,username,password); + + this.requestHttpHeaders.setHeader(HTTPHeaders.HDR_AUTHORIZATION, credentials); + + this.constructRequest(); + } + + private String createCredentials(String authHeader, String authUsername, String authPassword) { + //check header... + if ((authHeader == null) || (authHeader.length() == 0)) + return null; + + try { + System.out.println("header: "+authHeader); + + final String cURI = this.requestURI; + + Pattern p = Pattern.compile("nonce=\\\"(\\S+)\\\""); + Matcher m = p.matcher(authHeader); + m.find(); + final String cNonce = m.group(1); + + + MessageDigest md5 = MessageDigest.getInstance("MD5"); + md5.update((authUsername+":xtreemfs:"+authPassword).getBytes()); + byte[] digest = md5.digest(); + final String HA1 = OutputUtils.byteArrayToHexString(digest).toLowerCase(); + + md5.update((this.requestMethod+":"+cURI).getBytes()); + digest = md5.digest(); + final String HA2 = OutputUtils.byteArrayToHexString(digest).toLowerCase(); + + md5.update((HA1+":"+cNonce+":"+HA2).getBytes()); + digest = md5.digest(); + return OutputUtils.byteArrayToHexString(digest).toLowerCase(); + } catch (Exception ex) { + return null; + } + } + +} diff --git a/servers/src/org/xtreemfs/foundation/speedy/SpeedyResponseListener.java b/servers/src/org/xtreemfs/foundation/speedy/SpeedyResponseListener.java new file mode 100644 index 0000000000000000000000000000000000000000..07efcc3558a486decdb86ae9b9e231acfbe29fa3 --- /dev/null +++ b/servers/src/org/xtreemfs/foundation/speedy/SpeedyResponseListener.java @@ -0,0 +1,43 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.foundation.speedy; + +/** + * Listener interface to register for client requests with Pinky. + * + * @author bjko + */ +public interface SpeedyResponseListener { + + /** + * Called when a request is received. + * + * @attention This operation blocks the Pinky server thread! + * @param theRequest + * the request received from the client. + */ + public void receiveRequest(SpeedyRequest theRequest); + +} diff --git a/servers/src/org/xtreemfs/mrc/MRC.java b/servers/src/org/xtreemfs/mrc/MRC.java new file mode 100644 index 0000000000000000000000000000000000000000..1899a4e3ecc44cb978f1a544663cd32446a78166 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/MRC.java @@ -0,0 +1,100 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc; + +import org.xtreemfs.common.logging.Logging; + +/** + * + * @author bjko + */ +public class MRC { + + private RequestController rc; + + /** + * @param args + * the command line arguments + */ + public MRC(MRCConfig config, boolean useDirService) { + + Logging + .logMessage(Logging.LEVEL_INFO, null, "JAVA_HOME=" + + System.getProperty("java.home")); + Logging.logMessage(Logging.LEVEL_INFO, null, "UUID: " + config.getUUID()); + + try { + rc = new RequestController(config); + rc.startup(); + + Runtime.getRuntime().addShutdownHook(new Thread() { + @Override + public void run() { + try { + Logging.logMessage(Logging.LEVEL_INFO, this, "received shutdown signal!"); + rc.shutdown(); + Logging.logMessage(Logging.LEVEL_INFO, this, "MRC shutdown complete"); + } catch (Exception ex) { + ex.printStackTrace(); + } + } + }); + + } catch (Exception ex) { + + Logging.logMessage(Logging.LEVEL_DEBUG, null, + "System could not start up due to an exception. Aborted."); + Logging.logMessage(Logging.LEVEL_ERROR, null, ex); + + if (rc != null) + try { + rc.shutdown(); + } catch (Exception e) { + Logging.logMessage(Logging.LEVEL_ERROR, config.getUUID(), + "could not shutdown MRC: "); + Logging.logMessage(Logging.LEVEL_ERROR, config.getUUID(), e); + } + } + + } + + /** + * Main routine + * + * @param args + * the command line arguments + */ + public static void main(String[] args) throws Exception { + + Thread.currentThread().setName("MRC"); + + String cfgFile = (args.length > 0) ? args[0] : "../config/mrcconfig.properties"; + MRCConfig config = new MRCConfig(cfgFile); + + Logging.start(config.getDebugLevel()); + new MRC(config, true); + }; + +} diff --git a/servers/src/org/xtreemfs/mrc/MRCConfig.java b/servers/src/org/xtreemfs/mrc/MRCConfig.java new file mode 100644 index 0000000000000000000000000000000000000000..67e04479f4f2c77d1960b5d2688f3c6b4bab20d9 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/MRCConfig.java @@ -0,0 +1,176 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.Properties; + +import org.xtreemfs.common.config.ServiceConfig; +import org.xtreemfs.common.uuids.ServiceUUID; + +/** + * + * @author bjko + */ +public class MRCConfig extends ServiceConfig { + + private InetSocketAddress directoryService; + + private ServiceUUID uuid; + + private int localClockRenew; + + private int remoteTimeSync; + + private String dbDir; + + private int osdCheckInterval; + + private String appendLogFileName; + + private boolean noatime; + + private boolean noFsync; + + private int dbCheckpointInterval; + + private int idleIntervalForDBCheckpoint; + + private int logFileSizeForDBCheckpoint; + + private String policyDir; + + private String authenticationProvider; + + private String capabilitySecret; + + /** Creates a new instance of MRCConfig */ + public MRCConfig(String filename) throws IOException { + super(filename); + read(); + } + + public MRCConfig(Properties prop) throws IOException { + super(prop); + read(); + } + + public void read() throws IOException { + super.read(); + + this.osdCheckInterval = this.readRequiredInt("osd_check_interval"); + + this.directoryService = this.readRequiredInetAddr("dir_service.host", "dir_service.port"); + + this.appendLogFileName = this.readRequiredString("database.log"); + + this.dbDir = this.readRequiredString("database.dir"); + + this.noatime = this.readRequiredBoolean("no_atime"); + + this.localClockRenew = this.readRequiredInt("local_clock_renewal"); + + this.remoteTimeSync = this.readRequiredInt("remote_time_sync"); + + this.noFsync = this.readOptionalBoolean("no_fsync", false); + + this.dbCheckpointInterval = this.readRequiredInt("database.checkpoint.interval"); + + this.idleIntervalForDBCheckpoint = this + .readRequiredInt("database.checkpoint.idle_interval"); + + this.logFileSizeForDBCheckpoint = this.readRequiredInt("database.checkpoint.logfile_size"); + + this.uuid = new ServiceUUID(this.readRequiredString("uuid")); + + this.policyDir = this.readOptionalString("policy_dir", null); + + this.authenticationProvider = readRequiredString("authentication_provider"); + + this.capabilitySecret = readRequiredString("capability_secret"); + } + + public int getOsdCheckInterval() { + return osdCheckInterval; + } + + public InetSocketAddress getDirectoryService() { + return directoryService; + } + + public String getAppendLogFileName() { + return appendLogFileName; + } + + public String getDbDir() { + return dbDir; + } + + public boolean isNoAtime() { + return noatime; + } + + public int getLocalClockRenew() { + return localClockRenew; + } + + public int getRemoteTimeSync() { + return remoteTimeSync; + } + + public boolean isNoFsync() { + return noFsync; + } + + public int getDBCheckpointInterval() { + return dbCheckpointInterval; + } + + public int getIdleIntervalForDBCheckpoint() { + return idleIntervalForDBCheckpoint; + } + + public int getLogFileSizeForDBCheckpoint() { + return logFileSizeForDBCheckpoint; + } + + public ServiceUUID getUUID() { + return uuid; + } + + public String getPolicyDir() { + return policyDir; + } + + public String getAuthenticationProvider() { + return authenticationProvider; + } + + public String getCapabilitySecret() { + return capabilitySecret; + } + +} diff --git a/servers/src/org/xtreemfs/mrc/MRCRequest.java b/servers/src/org/xtreemfs/mrc/MRCRequest.java new file mode 100644 index 0000000000000000000000000000000000000000..5d4ccc1c433e69f809374a64e63f3bf5bc3c87d0 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/MRCRequest.java @@ -0,0 +1,57 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc; + +import java.net.InetSocketAddress; + +import org.xtreemfs.common.Request; +import org.xtreemfs.foundation.pinky.PinkyRequest; +import org.xtreemfs.foundation.speedy.SpeedyRequest; +import org.xtreemfs.mrc.brain.storage.LogEntry; + +public class MRCRequest extends Request { + + public SpeedyRequest sr; + + public InetSocketAddress srEndpoint; + + public RequestDetails details; + + public boolean syncPseudoRequest; + + public LogEntry logEntry; + + public MRCRequest() { + this(null); + } + + public MRCRequest(PinkyRequest pr) { + super(pr); + sr = null; + details = new RequestDetails(); + logEntry = null; + syncPseudoRequest = false; + } +} diff --git a/servers/src/org/xtreemfs/mrc/PolicyContainer.java b/servers/src/org/xtreemfs/mrc/PolicyContainer.java new file mode 100644 index 0000000000000000000000000000000000000000..fdd46fb87e7d4dbda5a69c393e83c1f6cd4a7a8e --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/PolicyContainer.java @@ -0,0 +1,386 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ +package org.xtreemfs.mrc; + +import java.io.File; +import java.io.FileFilter; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.net.MalformedURLException; +import java.net.URL; +import java.net.URLClassLoader; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import javax.tools.JavaCompiler; +import javax.tools.JavaFileObject; +import javax.tools.StandardJavaFileManager; +import javax.tools.ToolProvider; + +import org.xtreemfs.common.auth.AuthenticationProvider; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.util.FSUtils; +import org.xtreemfs.mrc.ac.FileAccessPolicy; +import org.xtreemfs.mrc.osdselection.OSDSelectionPolicy; +import org.xtreemfs.mrc.slices.SliceManager; + +public class PolicyContainer { + + static class PolicyClassLoader extends ClassLoader { + + private static final Class[] POLICY_INTERFACES = { FileAccessPolicy.class, + OSDSelectionPolicy.class }; + + private Map cache; + + private Map> policyMap; + + private File[] jarFiles; + + private File policyDir; + + public PolicyClassLoader(String policyDirPath) throws IOException { + + this.cache = new HashMap(); + this.policyMap = new HashMap>(); + + if (policyDirPath != null) + policyDir = new File(policyDirPath); + } + + public void init() throws IOException { + + if ((policyDir == null) || (policyDir.exists())) + return; + + // get all JAR files + jarFiles = policyDir.listFiles(new FileFilter() { + public boolean accept(File pathname) { + return pathname.getAbsolutePath().endsWith(".jar"); + } + }); + + // get all Java files recursively + File[] javaFiles = FSUtils.listRecursively(policyDir, new FileFilter() { + public boolean accept(File pathname) { + return pathname.getAbsolutePath().endsWith(".java"); + } + }); + + // compile all Java files + if (javaFiles.length != 0) { + + String cp = System.getProperty("java.class.path") + ":"; + for (int i = 0; i < jarFiles.length; i++) { + cp += jarFiles[i]; + if (i != jarFiles.length - 1) + cp += ":"; + } + + List options = new ArrayList(1); + options.add("-cp"); + options.add(cp); + + JavaCompiler compiler = ToolProvider.getSystemJavaCompiler(); + StandardJavaFileManager fileManager = compiler.getStandardFileManager(null, null, + null); + + Iterable compilationUnits = fileManager + .getJavaFileObjectsFromFiles(Arrays.asList(javaFiles)); + if (!compiler.getTask(null, fileManager, null, options, null, compilationUnits) + .call()) + Logging.logMessage(Logging.LEVEL_WARN, this, "some policies in '" + + policyDir.getAbsolutePath() + "' could not be compiled"); + + fileManager.close(); + } + + // retrieve all policies from class files + File[] classFiles = FSUtils.listRecursively(policyDir, new FileFilter() { + public boolean accept(File pathname) { + return pathname.getAbsolutePath().endsWith(".class"); + } + }); + + for (File cls : classFiles) { + try { + + String className = cls.getAbsolutePath().substring( + policyDir.getAbsolutePath().length() + 1, + cls.getAbsolutePath().length() - ".class".length()).replace('/', '.'); + if (cache.containsKey(className)) + continue; + + // load the class + Class clazz = loadFromStream(new FileInputStream(cls)); + + // check whether the class refers to a policy; if so, cache + // it + checkClass(clazz); + + } catch (Exception exc) { + Logging.logMessage(Logging.LEVEL_WARN, this, + "an error occurred while trying to load class from file " + cls); + Logging.logMessage(Logging.LEVEL_WARN, this, exc); + } + } + + // retrieve all policies from JAR files + // for (File jar : jarFiles) { + // + // JarFile arch = new JarFile(jar); + // + // Enumeration entries = arch.entries(); + // while (entries.hasMoreElements()) { + // JarEntry entry = entries.nextElement(); + // if (entry.getName().endsWith(".class")) { + // + // try { + // + // // load the class + // Class clazz = loadFromStream(arch.getInputStream(entry)); + // + // // check whether the class refers to a policy; if + // // so, cache it + // checkClass(clazz); + // + // } catch (IOException exc) { + // Logging.logMessage(Logging.LEVEL_WARN, this, "could not load + // class '" + // + entry.getName() + "' from JAR '" + jar.getAbsolutePath() + + // "'"); + // Logging.logMessage(Logging.LEVEL_WARN, this, exc); + // } catch (LinkageError err) { + // // ignore + // } + // } + // } + // } + + } + + public Class loadClass(String name, boolean resolve) throws ClassNotFoundException { + + // first, check whether the class is cached + if (cache.containsKey(name)) + return cache.get(name); + + // if not cached, try to resolve the class by means of the system + // class loader + try { + return findSystemClass(name); + } catch (ClassNotFoundException exc) { + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, "could not find system class '" + + name + "', trying to define the class"); + } + + if (policyDir == null || !policyDir.exists()) + throw new ClassNotFoundException("no built-in policy '" + name + + "' available, and no plug-in policy directory specified"); + + // if it could not be loaded w/ the system class loader, try to + // define it + try { + + File classFile = new File(policyDir.getAbsolutePath() + "/" + + name.replace('.', '/') + ".class"); + + Class clazz = loadFromStream(new FileInputStream(classFile)); + + if (resolve) + resolveClass(clazz); + + return clazz; + + } catch (IOException exc) { + + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, "could not define class '" + name + + "', trying to load the class from a plug-in JAR file"); + + } catch (LinkageError err) { + + Logging.logMessage(Logging.LEVEL_WARN, this, "could not define class '" + name + + "'"); + Logging.logMessage(Logging.LEVEL_WARN, this, err); + + } + + // if the class could not be loaded by the system class loader, try + // to load it from an external JAR file + URL[] urls = new URL[jarFiles.length]; + try { + for (int i = 0; i < jarFiles.length; i++) + urls[i] = jarFiles[i].toURI().toURL(); + } catch (MalformedURLException exc) { + Logging.logMessage(Logging.LEVEL_ERROR, this, exc); + } + + return new URLClassLoader(urls).loadClass(name); + } + + public Class loadClass(long id, Class policyInterFace) throws ClassNotFoundException { + + Map map = policyMap.get(policyInterFace); + if (map == null) + throw new ClassNotFoundException(); + + Class clazz = map.get(id); + if (clazz == null) + throw new ClassNotFoundException(); + + return clazz; + } + + private Class loadFromStream(InputStream in) throws IOException { + + // load the binary class content + byte[] classData = new byte[in.available()]; + in.read(classData); + in.close(); + + Class clazz = defineClass(null, classData, 0, classData.length); + cache.put(clazz.getName(), clazz); + + return clazz; + } + + private void checkClass(Class clazz) { + + // check whether the class matches any of the policy + // interfaces + for (Class ifc : POLICY_INTERFACES) { + + if (ifc.isAssignableFrom(clazz)) { + + // get the policy ID + try { + long policyId = clazz.getDeclaredField("POLICY_ID").getLong(null); + + // add the policy to the internal map + Map polIdMap = policyMap.get(ifc); + if (polIdMap == null) { + polIdMap = new HashMap(); + policyMap.put(ifc, polIdMap); + } + + if (polIdMap.containsKey(policyId)) + Logging.logMessage(Logging.LEVEL_WARN, this, + "duplicate ID for policy '" + ifc + "':" + policyId); + + polIdMap.put(policyId, clazz); + + } catch (Exception exc) { + Logging.logMessage(Logging.LEVEL_WARN, this, + "could not load malformed policy '" + clazz + "'"); + Logging.logMessage(Logging.LEVEL_WARN, this, exc); + } + } + } + } + + } + + private final MRCConfig config; + + private PolicyClassLoader policyClassLoader; + + public PolicyContainer(MRCConfig config) throws IOException { + this.config = config; + policyClassLoader = new PolicyClassLoader(config.getPolicyDir()); + policyClassLoader.init(); + } + + public AuthenticationProvider getAuthenticationProvider() throws InstantiationException, + IllegalAccessException, ClassNotFoundException { + + String authPolicy = config.getAuthenticationProvider(); + + // first, check whether a built-in policy exists with the given name + try { + return (AuthenticationProvider) Class.forName(authPolicy).newInstance(); + } catch (Exception exc) { + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, "no built-in policy '" + + config.getAuthenticationProvider() + + "' exists, searching for plug-in policies..."); + } + + // if no built-in policy could be found, check for plug-in policy + // directory + + // if the class file could be found, load it + Class cls = policyClassLoader.loadClass(authPolicy); + + return (AuthenticationProvider) cls.newInstance(); + } + + public FileAccessPolicy getFileAccessPolicy(long id, SliceManager sliceMan) throws Exception { + + // load the class + Class policyClass = policyClassLoader.loadClass(id, FileAccessPolicy.class); + + // check whether a default constructor exists; if so, invoke the default + // constructor + try { + return (FileAccessPolicy) policyClass.newInstance(); + } catch (InstantiationException exc) { + // ignore + } + + // otherwise, check whether a constructor exists that needs the slice + // manager; if so, invoke it + try { + return (FileAccessPolicy) policyClass + .getConstructor(new Class[] { SliceManager.class }).newInstance(sliceMan); + } catch (InstantiationException exc) { + // ignore + } + + // otherwise, throw an exception indicating that no suitable constructor + // was found + throw new InstantiationException("policy " + policyClass + + " does not have a suitable constructor"); + + } + + public OSDSelectionPolicy getOSDSelectionPolicy(long id) throws Exception { + Class policyClass = policyClassLoader.loadClass(id, OSDSelectionPolicy.class); + return (OSDSelectionPolicy) policyClass.newInstance(); + } + + // public static void main(String[] args) throws Exception { + // + // Logging.start(Logging.LEVEL_DEBUG); + // + // PolicyClassLoader loader = new PolicyClassLoader("/tmp/policies"); + // loader.init(); + // System.out.println(loader.loadClass(3, FileAccessPolicy.class)); + // } +} diff --git a/servers/src/org/xtreemfs/mrc/QMonitor.java b/servers/src/org/xtreemfs/mrc/QMonitor.java new file mode 100644 index 0000000000000000000000000000000000000000..76d526a9d2cb4d31f65b30b1d85e0e843fd7c905 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/QMonitor.java @@ -0,0 +1,220 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc; + +import java.io.IOException; +import java.io.PrintWriter; +import java.util.TimerTask; + +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.pinky.PipelinedPinky; +import org.xtreemfs.foundation.speedy.MultiSpeedy; +import org.xtreemfs.mrc.brain.BrainStage; +import org.xtreemfs.mrc.brain.storage.DiskLogger; +import org.xtreemfs.mrc.replication.ReplicationManager; + +/** + * Monitors all queues and reduces or shuts down client queues if stage queues + * get too full. + * + * @author bjko + */ +public class QMonitor extends TimerTask { + + public static final String logfile = "qmonitor.log"; + + public static PrintWriter log; + + final PipelinedPinky pinkyStage; + + final BrainStage brainStage; + + final MultiSpeedy speedyStage; + + final DiskLogger loggerStage; + + final ReplicationManager replicationStage; + + public enum QState { + GREEN, YELLOW, ORANGE, RED + }; + + private QState state; + + public static final int CLIENT_MAXQ_GREEN = 400; + + public static final int CLIENT_MAXQ_YELLOW = 50; + + public static final int CLIENT_MAXQ_ORANGE = 10; + + public static final int CLIENT_MAXQ_RED = 0; + + public static final int BRAINQ_THR_GREEN_YELLOW = 500; + + public static final int BRAINQ_THR_YELLOW_GREEN = 200; + + public static final int BRAINQ_THR_YELLOW_ORANGE = 1000; + + public static final int BRAINQ_THR_ORANGE_YELLOW = 400; + + public static final int BRAINQ_THR_ORANGE_RED = 2000; + + public static final int BRAINQ_THR_RED_ORANGE = 800; + + /** Creates a new instance of QMonitor */ + public QMonitor(PipelinedPinky pp, BrainStage bs, MultiSpeedy ms, + DiskLogger dl, ReplicationManager rm) throws IOException { + super(); + state = QState.GREEN; + // log = new PrintWriter(new File(logfile)); + pinkyStage = pp; + brainStage = bs; + speedyStage = ms; + loggerStage = dl; + replicationStage = rm; + pinkyStage.MAX_CLIENT_QUEUE = CLIENT_MAXQ_GREEN; + pinkyStage.CLIENT_Q_THR = CLIENT_MAXQ_GREEN / 2; + } + + public void run() { + + /* + * first we try to find the longes Q + */ + int maxQlen = brainStage.getQLength(); + if (maxQlen < loggerStage.getQLength()) { + maxQlen = loggerStage.getQLength(); + } + if (maxQlen < replicationStage.getQLength()) { + maxQlen = replicationStage.getQLength(); + } + // we want the total length + if (maxQlen < speedyStage.getQLength()[0]) { + maxQlen = replicationStage.getQLength(); + } + + boolean stateChanged = false; + + if ((state == QState.GREEN) && (maxQlen > BRAINQ_THR_GREEN_YELLOW)) { + state = QState.YELLOW; + stateChanged = true; + + } + if ((state == QState.YELLOW) && (maxQlen > BRAINQ_THR_YELLOW_ORANGE)) { + state = QState.ORANGE; + stateChanged = true; + } + if ((state == QState.ORANGE) && (maxQlen > BRAINQ_THR_ORANGE_RED)) { + state = QState.RED; + stateChanged = true; + } + + boolean wasCodeRed = false; + + if ((state == QState.RED) && (maxQlen < BRAINQ_THR_RED_ORANGE)) { + // back from code red + wasCodeRed = true; + state = QState.ORANGE; + stateChanged = true; + } + if ((state == QState.ORANGE) && (maxQlen < BRAINQ_THR_ORANGE_YELLOW)) { + state = QState.YELLOW; + stateChanged = true; + } + if ((state == QState.YELLOW) && (maxQlen < BRAINQ_THR_YELLOW_GREEN)) { + state = QState.GREEN; + stateChanged = true; + + } + + if (stateChanged) { + // log.println("state now "+state); + } + + if (state == QState.GREEN) { + pinkyStage.MAX_CLIENT_QUEUE = CLIENT_MAXQ_GREEN; + pinkyStage.CLIENT_Q_THR = CLIENT_MAXQ_GREEN / 2; + } else if (state == QState.YELLOW) { + pinkyStage.MAX_CLIENT_QUEUE = CLIENT_MAXQ_YELLOW; + pinkyStage.CLIENT_Q_THR = CLIENT_MAXQ_YELLOW / 2; + } else if (state == QState.ORANGE) { + pinkyStage.MAX_CLIENT_QUEUE = CLIENT_MAXQ_ORANGE; + pinkyStage.CLIENT_Q_THR = CLIENT_MAXQ_ORANGE / 2; + } else if (state == QState.RED) { + pinkyStage.MAX_CLIENT_QUEUE = 0; + pinkyStage.CLIENT_Q_THR = 0; + } + if (wasCodeRed) { + // make all channels readable again... + Logging.logMessage(Logging.LEVEL_INFO, this, "resume reading"); + pinkyStage.restartReading(); + } + + // if (stateChanged) { + // log.println(String.format("Pinky #clients: %5d MaxClientQ: %5d QThr: + // %5d Total: %5d", + // pinkyStage.getNumConnections(),pinkyStage.MAX_CLIENT_QUEUE, + // pinkyStage.CLIENT_Q_THR,pinkyStage.getTotalQLength())); + // int[] sq = speedyStage.getQLength(); + // log.println(String.format("Speedy NumServers: %5d SumQ: + // %5d",sq[1],sq[0])); + // log.println(String.format("Brain Q: %5d",brainStage.getQLength())); + // log.println(String.format("Logger Q: %5d",loggerStage.getQLength())); + // log.println(String.format("Replicat. Q: + // %5d",replicationStage.getQLength())); + // log.println(); + // log.flush(); + // } + } + + public QState getState() { + return state; + } + + public int[] getSpeedyQueueLength() { + return speedyStage.getQLength(); + } + + public int getPinkyConnections() { + return pinkyStage.getNumConnections(); + } + + public int getBrainQueueLength() { + return brainStage.getQLength(); + } + + public int getLoggerQueueLength() { + return loggerStage.getQLength(); + } + + public int getReplicationQueueLength() { + return replicationStage.getQLength(); + } + + public boolean cancel() { + log.close(); + return true; + } +} diff --git a/servers/src/org/xtreemfs/mrc/RequestController.java b/servers/src/org/xtreemfs/mrc/RequestController.java new file mode 100644 index 0000000000000000000000000000000000000000..f9eec7597db3f70228381fbbeac7c713a54d732a --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/RequestController.java @@ -0,0 +1,852 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc; + +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileWriter; +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.lang.management.OperatingSystemMXBean; +import java.net.InetAddress; +import java.nio.CharBuffer; +import java.nio.channels.FileChannel; +import java.util.Date; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Timer; +import java.util.TimerTask; +import java.util.concurrent.atomic.AtomicInteger; + +import org.xtreemfs.common.HeartbeatThread; +import org.xtreemfs.common.TimeSync; +import org.xtreemfs.common.VersionManagement; +import org.xtreemfs.common.HeartbeatThread.ServiceDataGenerator; +import org.xtreemfs.common.auth.AuthenticationProvider; +import org.xtreemfs.common.auth.NullAuthProvider; +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.clients.RPCClient; +import org.xtreemfs.common.clients.dir.DIRClient; +import org.xtreemfs.common.clients.mrc.MRCClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.util.OutputUtils; +import org.xtreemfs.common.uuids.ServiceUUID; +import org.xtreemfs.common.uuids.UUIDResolver; +import org.xtreemfs.foundation.LifeCycleListener; +import org.xtreemfs.foundation.json.JSONCharBufferString; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.pinky.HTTPUtils; +import org.xtreemfs.foundation.pinky.PinkyRequest; +import org.xtreemfs.foundation.pinky.PinkyRequestListener; +import org.xtreemfs.foundation.pinky.PipelinedPinky; +import org.xtreemfs.foundation.pinky.SSLOptions; +import org.xtreemfs.foundation.speedy.MultiSpeedy; +import org.xtreemfs.foundation.speedy.SpeedyRequest; +import org.xtreemfs.foundation.speedy.SpeedyResponseListener; +import org.xtreemfs.mrc.brain.BrainHelper; +import org.xtreemfs.mrc.brain.BrainRequestListener; +import org.xtreemfs.mrc.brain.BrainStage; +import org.xtreemfs.mrc.brain.storage.DiskLogger; +import org.xtreemfs.mrc.brain.storage.InvalidLogEntryException; +import org.xtreemfs.mrc.brain.storage.LogEntry; +import org.xtreemfs.mrc.brain.storage.SyncListener; +import org.xtreemfs.mrc.osdselection.OSDStatusManager; +import org.xtreemfs.mrc.replication.ReplicationManager; +import org.xtreemfs.mrc.replication.ReplicationRequestListener; +import org.xtreemfs.mrc.slices.SliceInfo; +import org.xtreemfs.mrc.slices.SliceManager; +import org.xtreemfs.mrc.slices.VolumeInfo; +import org.xtreemfs.mrc.utils.MessageUtils; + +/** + * This class comtains the workflow of the MRC server and directs the requestst + * to the appropriate stages + * + * @author bjko + */ +public class RequestController implements SpeedyResponseListener, PinkyRequestListener, + SyncListener, BrainRequestListener, ReplicationRequestListener, LifeCycleListener { + + private static final String CMD_SHUTDOWN = ".shutdown"; + + private static final String CMD_DBDUMP = ".dumpdb"; + + private static final String CMD_DBRESTORE = ".restoredb"; + + private final PipelinedPinky pinkyStage; + + private final BrainStage brainStage; + + private final MultiSpeedy speedyStage; + + private final DiskLogger loggerStage; + + private final ReplicationManager replicationStage; + + private final MRCConfig config; + + private final OSDStatusManager osdMonitor; + + private final SliceManager slices; + + private final Timer qTimer; + + private final Timer cpTimer; + + private final String authString; + + private final DIRClient dirClient; + + private final MRCClient mrcClient; + + private final QMonitor qMon; + + private final AtomicInteger dbgId = new AtomicInteger(1); + + private long lastRequestTimeStamp; + + private final PolicyContainer policyContainer; + + private final HeartbeatThread heartbeatThread; + + /** Creates a new instance of RequestController */ + public RequestController(final MRCConfig config) throws Exception { + + try { + + this.config = config; + + // generate an authorization string for Directory Service operations + authString = NullAuthProvider.createAuthString(config.getUUID().toString(), config + .getUUID().toString()); + + policyContainer = new PolicyContainer(config); + + final AuthenticationProvider authProvider = policyContainer.getAuthenticationProvider(); + authProvider.initialize(config.isUsingSSL()); + if (Logging.isInfo()) + Logging.logMessage(Logging.LEVEL_INFO, this, "using authentication provider '" + + authProvider.getClass().getName() + "'"); + + qTimer = new Timer(); + cpTimer = new Timer(); + + File dbDir = new File(config.getDbDir()); + if (!dbDir.exists()) + dbDir.mkdirs(); + + speedyStage = config.isUsingSSL() ? new MultiSpeedy(new SSLOptions(config + .getServiceCredsFile(), config.getServiceCredsPassphrase(), config + .getServiceCredsContainer(), config.getTrustedCertsFile(), config + .getTrustedCertsPassphrase(), config.getTrustedCertsContainer(), false)) + : new MultiSpeedy(); + speedyStage.registerSingleListener(this); + speedyStage.setLifeCycleListener(this); + + Logging.logMessage(Logging.LEVEL_DEBUG, this, "use SSL=" + config.isUsingSSL()); + + pinkyStage = config.isUsingSSL() ? new PipelinedPinky(config.getPort(), config + .getAddress(), this, new SSLOptions(config.getServiceCredsFile(), config + .getServiceCredsPassphrase(), config.getServiceCredsContainer(), config + .getTrustedCertsFile(), config.getTrustedCertsPassphrase(), config + .getTrustedCertsContainer(), false)) : new PipelinedPinky(config.getPort(), + config.getAddress(), this); + pinkyStage.setLifeCycleListener(this); + + dirClient = new DIRClient(speedyStage, config.getDirectoryService()); + + TimeSync.initialize(dirClient, config.getRemoteTimeSync(), config.getLocalClockRenew(), + authString); + + UUIDResolver.start(dirClient, 10 * 1000, 600 * 1000); + UUIDResolver.addLocalMapping(config.getUUID(), config.getPort(), config.isUsingSSL()); + + mrcClient = new MRCClient(speedyStage); + + osdMonitor = new OSDStatusManager(config, dirClient, policyContainer, authString); + osdMonitor.setLifeCycleListener(this); + + slices = new SliceManager(config); + slices.init(); + try { + // cross stage listeners + slices.addVolumeChangeListener(osdMonitor); + } catch (Exception ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + } + + loggerStage = new DiskLogger(config.getAppendLogFileName(), config.isNoFsync()); + loggerStage.setLifeCycleListener(this); + + replicationStage = new ReplicationManager(config, mrcClient, loggerStage, slices); + replicationStage.registerBrainListener(this); + replicationStage.registerReplicationListener(this); + replicationStage.setLifeCycleListener(this); + + brainStage = new BrainStage(config, dirClient, osdMonitor, slices, policyContainer, + authProvider, authString); + brainStage.setRequestListener(this); + brainStage.setLifeCycleListener(this); + + ServiceDataGenerator gen = new ServiceDataGenerator() { + public Map> getServiceData() { + + String uuid = RequestController.this.config.getUUID().toString(); + + OperatingSystemMXBean osb = ManagementFactory.getOperatingSystemMXBean(); + String load = String.valueOf((int) (osb.getSystemLoadAverage() * 100 / osb + .getAvailableProcessors())); + + long totalRAM = Runtime.getRuntime().maxMemory(); + long usedRAM = Runtime.getRuntime().totalMemory()-Runtime.getRuntime().freeMemory(); + + // get service data + Map> map = new HashMap>(); + map.put(uuid, RPCClient.generateMap("type", "MRC", "load", load, + "prot_versions", VersionManagement.getSupportedProtVersAsString(), + "totalRAM", Long.toString(totalRAM), + "usedRAM", Long.toString(usedRAM), + "geoCoordinates",config.getGeoCoordinates())); + + // get volume data + for (VolumeInfo vol : slices.getVolumes()) { + if (!vol.isRegisterAtDS()) + continue; + Map dsVolumeInfo = BrainHelper.createDSVolumeInfo(vol, + osdMonitor, uuid); + map.put(vol.getId(), dsVolumeInfo); + } + + return map; + } + }; + + heartbeatThread = new HeartbeatThread("MRC Heartbeat Thread", dirClient, config + .getUUID(), gen, authString,config); + + replicationStage.setBrainStage(brainStage); + + qMon = new QMonitor(pinkyStage, brainStage, speedyStage, loggerStage, replicationStage); + + // recover database / replay log if necessary + brainStage.restoreDB(); + replayLog(); + + } catch (Exception exc) { + exc.printStackTrace(); + shutdown(); + throw exc; + } + } + + public void startup() throws Exception { + + try { + speedyStage.start(); + speedyStage.waitForStartup(); + + osdMonitor.start(); + replicationStage.start(); + replicationStage.init(); + brainStage.start(); + loggerStage.start(); + pinkyStage.start(); + heartbeatThread.start(); + + osdMonitor.waitForStartup(); + brainStage.waitForStartup(); + loggerStage.waitForStartup(); + pinkyStage.waitForStartup(); + replicationStage.waitForStartup(); + heartbeatThread.waitForStartup(); + + qTimer.scheduleAtFixedRate(qMon, 0, 1000); + + lastRequestTimeStamp = TimeSync.getGlobalTime(); + TimerTask cpTask = new TimerTask() { + public void run() { + if (TimeSync.getGlobalTime() - lastRequestTimeStamp > config + .getIdleIntervalForDBCheckpoint() + && loggerStage.getLogFileSize() > config.getLogFileSizeForDBCheckpoint()) + + try { + checkpoint(); + } catch (Exception exc) { + Logging.logMessage(Logging.LEVEL_ERROR, this, exc); + } + } + }; + + cpTimer.scheduleAtFixedRate(cpTask, config.getDBCheckpointInterval(), config + .getDBCheckpointInterval()); + + Logging.logMessage(Logging.LEVEL_INFO, this, "operational on port " + config.getPort()); + + } catch (Exception exc) { + // shutdown(); + throw exc; + } + + } + + /** + * Creates a database checkpoint. + */ + public void checkpoint() { + + // FIXME: block all incoming requests and make sure + // that no requests are being processed while + // checkpointing! The current approach is a + // workaround, not a valid solution!!! + + try { + brainStage.block(); // block new requests during CP + brainStage.checkpointDB(); // checkpoint the DB + loggerStage.cleanLog(); // clean the DB log + brainStage.completeDBCheckpoint(); // complete CP + brainStage.unblock(); // process incoming requests + + } catch (Exception e) { + Logging.logMessage(Logging.LEVEL_ERROR, this, "CANNOT CREATE DATABASE CHECKPOINT!"); + Logging.logMessage(Logging.LEVEL_ERROR, this, e); + } + } + + public void replayLog() throws IOException { + File lf = new File(config.getAppendLogFileName()); + + if (lf.exists()) { + + int numOps = 0; + Logging.logMessage(Logging.LEVEL_INFO, this, + "there is an old log file. Starting log replay.."); + + FileInputStream fis = new FileInputStream(lf); + FileChannel fc = fis.getChannel(); + + boolean replaySuccess = true; + + while (fc.position() < fc.size()) { + + LogEntry l = null; + try { + // unmarshall log entry from disk log + l = new LogEntry(fc); + // parse the log entry contents + Object args = null; + if (l.payload != null) { + // parse JSONrequest.pr.requestBody.position(0); + CharBuffer utf8buf = HTTPUtils.ENC_UTF8.decode(l.payload.getBuffer()); + args = JSONParser.parseJSON(new JSONCharBufferString(utf8buf)); + } + + // this one works sync + brainStage.replayLogEntry(l.operationName, l.userID, l.groupID, args); + + SliceInfo info = slices.getSliceInfo(l.slID); + assert (info != null); + + if (info.isDeleted()) + slices.removeSliceFromIndex(l.slID); + else + info.setNextSequenceID(l.sequenceID + 1); + + Logging.logMessage(Logging.LEVEL_DEBUG, this, "replayed operation " + + l.operationName); + numOps++; + + } catch (JSONException ex) { + Logging.logMessage(Logging.LEVEL_WARN, this, + "log entry with invalid JSON message encountered: " + ex); + } catch (InvalidLogEntryException ex) { + Logging.logMessage(Logging.LEVEL_DEBUG, this, + "encountered corrupted entry in logfile!"); + if (fc.size() - (fc.position() + ex.getLength()) > 0) { + Logging.logMessage(Logging.LEVEL_ERROR, this, "DON'T PANIC"); + Logging.logMessage(Logging.LEVEL_ERROR, this, + "FOUND A CORRUPTED ENTRY IN THE LOG FILE WHICH IS NOT THE LAST ONE!!!"); + Logging.logMessage(Logging.LEVEL_ERROR, this, + "CANNOT START UP SERVICE, PLEASE CHECK LOGFILE"); + System.exit(1); + } + } catch (Exception ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, "DON'T PANIC"); + Logging.logMessage(Logging.LEVEL_ERROR, this, + "CANNOT START UP SERVICE, PLEASE CHECK LOGFILE"); + Logging + .logMessage(Logging.LEVEL_ERROR, this, "cannot execute log entry: " + + ex); + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + System.exit(1); + } + } + + Logging.logMessage(Logging.LEVEL_INFO, this, "replayed " + numOps + " operations."); + + if (replaySuccess && (numOps > 0)) { + // make a checkpoint to keep the log short for the next crash + // ;-) + this.checkpoint(); + } + } + + } + + public void truncateLog() throws IOException { + // TODO: check whether it is necessary to stop and restart the logger + // thread + if (loggerStage != null) + loggerStage.cleanLog(); + + // if it is null we are in the startup phase and do not need to truncate + // the log + } + + public void shutdown() throws Exception { + + // create status page snapshot for debugging purposes + try { + String statusPageSnapshot = getStatusPage(); + BufferedWriter writer = new BufferedWriter(new FileWriter(config.getDbDir() + + "/.status.html")); + writer.write(statusPageSnapshot); + writer.close(); + } catch (Exception exc) { + // ignore + } + + if (slices != null && loggerStage != null + && loggerStage.getLogFileSize() > config.getLogFileSizeForDBCheckpoint()) + checkpoint(); + + if (heartbeatThread != null) + heartbeatThread.shutdown(); + if (qTimer != null) + qTimer.cancel(); + if (cpTimer != null) + cpTimer.cancel(); + if (pinkyStage != null) + pinkyStage.shutdown(); + if (osdMonitor != null) + osdMonitor.shutdown(); + if (brainStage != null) + brainStage.shutdown(); + if (loggerStage != null) + loggerStage.shutdown(); + if (speedyStage != null) + speedyStage.shutdown(); + if (replicationStage != null) + replicationStage.shutdown(); + + if (heartbeatThread != null && heartbeatThread != Thread.currentThread()) + heartbeatThread.waitForShutdown(); + if (pinkyStage != null && pinkyStage != Thread.currentThread()) + pinkyStage.waitForShutdown(); + if (osdMonitor != null && osdMonitor != Thread.currentThread()) + osdMonitor.waitForShutdown(); + if (brainStage != null && brainStage != Thread.currentThread()) + brainStage.waitForShutdown(); + if (loggerStage != null && loggerStage != Thread.currentThread()) + loggerStage.waitForShutdown(); + if (speedyStage != null && speedyStage != Thread.currentThread()) + speedyStage.waitForShutdown(); + if (replicationStage != null && replicationStage != Thread.currentThread()) + replicationStage.waitForShutdown(); + + } + + /** + * This operation KILLS all threads immediately. + * + * @attention EXTRA EVIL OPERATION! FOR TESTING PURPOSES ONLY! DO NOT USE + * OTHERWISE! + */ + public void dropDead() throws Exception { + heartbeatThread.shutdown(); + qTimer.cancel(); + cpTimer.cancel(); + pinkyStage.shutdown(); + osdMonitor.shutdown(); + brainStage.shutdown(); + loggerStage.shutdown(); + speedyStage.shutdown(); + replicationStage.shutdown(); + } + + // --------------------- LISTENERS ------------------------------ + + public void receiveRequest(SpeedyRequest theRequest) { + // all SpeedyRequests come from the brain + // hence, we give requeue it w/ brain + // TODO: handle theRequest.attachment over to brain + assert (theRequest.attachment != null); + brainStage.processRequest(theRequest.attachment); + } + + /** + * Here starts the action of the RequestController + */ + public void receiveRequest(PinkyRequest theRequest) { + MRCRequest rq = new MRCRequest(theRequest); + theRequest.debugRqId = dbgId.getAndIncrement(); + Logging.logMessage(Logging.LEVEL_DEBUG, this, "received request #" + theRequest.debugRqId); + try { + + if (theRequest.requestURI.charAt(0) == '/') { + + if (theRequest.requestURI.length() == 1) { + + // generate status HTTP page + String statusPage = getStatusPage(); + + ReusableBuffer bbuf = ReusableBuffer.wrap(statusPage + .getBytes(HTTPUtils.ENC_ASCII)); + theRequest.setResponse(HTTPUtils.SC_OKAY, bbuf, HTTPUtils.DATA_TYPE.HTML); + pinkyStage.sendResponse(rq.getPinkyRequest()); + return; + + } else + // process normal request + theRequest.requestURI = theRequest.requestURI.substring(1); + } + + if (theRequest.requestURI.length() > 0) { + if (theRequest.requestURI.charAt(0) == '.') { + // system command + handleSystemCall(rq); + } else { + try { + // everything else goes directly to the brain + + // handle over tosp the brainStage + brainStage.processRequest(rq); + } catch (IllegalStateException e) { + // brain's queue is full + theRequest.setClose(true); + theRequest.setResponse(HTTPUtils.SC_SERV_UNAVAIL); + pinkyStage.sendResponse(theRequest); + } + + } + } else { + theRequest.setClose(true); + theRequest.setResponse(HTTPUtils.SC_BAD_REQUEST); + pinkyStage.sendResponse(theRequest); + } + + } catch (IndexOutOfBoundsException e) { + theRequest.setClose(true); + theRequest.setResponse(HTTPUtils.SC_BAD_REQUEST); + pinkyStage.sendResponse(theRequest); + } catch (Exception exc) { + theRequest.setClose(true); + theRequest.setResponse(HTTPUtils.SC_SERVER_ERROR); + pinkyStage.sendResponse(theRequest); + } + } + + public void handleSystemCall(MRCRequest rq) { + try { + if (rq.getPinkyRequest().requestURI.startsWith(".R")) { + replicationStage.addRequest(rq); + } else if (rq.getPinkyRequest().requestURI.equals(CMD_SHUTDOWN)) { + // shutdown the whole MRC!!! + rq.getPinkyRequest().setResponse(HTTPUtils.SC_OKAY); + pinkyStage.sendResponse(rq.getPinkyRequest()); + shutdown(); + } else if (rq.getPinkyRequest().requestURI.equals(CMD_DBDUMP)) { + // dump database to file + try { + List args = (List) MessageUtils.unmarshallRequest(rq); + dumpDB((String) args.get(0)); + rq.getPinkyRequest().setResponse(HTTPUtils.SC_OKAY); + } catch (Exception exc) { + rq.getPinkyRequest().setResponse(HTTPUtils.SC_USER_EXCEPTION, "could not create dump file"); + } + + pinkyStage.sendResponse(rq.getPinkyRequest()); + } else if (rq.getPinkyRequest().requestURI.equals(CMD_DBRESTORE)) { + // dump database to file + try { + List args = (List) MessageUtils.unmarshallRequest(rq); + restoreDBFromDump((String) args.get(0)); + rq.getPinkyRequest().setResponse(HTTPUtils.SC_OKAY); + } catch (Exception exc) { + rq.getPinkyRequest().setResponse(HTTPUtils.SC_USER_EXCEPTION, "could not process dump file: " + + OutputUtils.stackTraceToString(exc)); + } + + pinkyStage.sendResponse(rq.getPinkyRequest()); + } else { + rq.getPinkyRequest().setResponse(HTTPUtils.SC_NOT_IMPLEMENTED); + pinkyStage.sendResponse(rq.getPinkyRequest()); + } + + } catch (IOException ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + } catch (Exception ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + } + } + + public void brainRequestDone(MRCRequest rq) { + + lastRequestTimeStamp = TimeSync.getGlobalTime(); + + if (rq.getPinkyRequest() == null) { + if (rq.sr != null) { + try { + speedyStage.sendRequest(rq.sr, rq.srEndpoint); + } catch (Exception ex) { + rq.getPinkyRequest().setResponse(HTTPUtils.SC_SERVER_ERROR); + pinkyStage.sendResponse(rq.getPinkyRequest()); + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + } + } + } else { + if (rq.getPinkyRequest().isReady()) { + // we have a response we can pass on to the disk logger + // or send a response to the client + if (rq.details.persistentOperation) { + SliceInfo info = slices.getSliceInfo(rq.details.sliceId); + + assert (info != null); + LogEntry e = new LogEntry(info.getCurrentViewID(), info.getNextSequenceID(), + rq.details.sliceId, DiskLogger.OPTYPE_MRC, rq.getPinkyRequest().requestURI, rq.details.userId, rq.details.groupIds + .get(0), rq.getPinkyRequest().requestBody.createViewBuffer(), rq); + e.registerListener(this); + assert (rq.logEntry == null); + rq.logEntry = e; + loggerStage.append(e); + assert (e.payload == rq.logEntry.payload); + } else { + // direct response! + pinkyStage.sendResponse(rq.getPinkyRequest()); + } + } else { + // if there is a speedy request we have to send it out + if (rq.sr != null) { + try { + rq.sr.attachment = rq; + speedyStage.sendRequest(rq.sr, rq.srEndpoint); + } catch (Exception ex) { + rq.details.persistentOperation = false; + rq.getPinkyRequest().setResponse(HTTPUtils.SC_SERVER_ERROR); + pinkyStage.sendResponse(rq.getPinkyRequest()); + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + } + } else { + throw new RuntimeException( + "[ E | RequestController ] Brain must send either a response or a SpeedyRequest, but sr is null!"); + } + } + } + } + + /** + * called by the loggerStage after the Entry was written to disk + */ + public void synced(LogEntry entry) { + + MRCRequest rq = entry.attachment; + assert (entry == rq.logEntry); + + // initiate replication + if (replicationStage.replicate(rq)) { + // okay! send response to client + // otherwise we have to wait for + // the replication to finish + + // check if a deferred deletion of the slice is necessary + if (rq.details.sliceId != null && slices.getSliceInfo(rq.details.sliceId).isDeleted()) + slices.removeSliceFromIndex(rq.details.sliceId); + + pinkyStage.sendResponse(rq.getPinkyRequest()); + } + } + + public void failed(LogEntry entry, Exception ex) { + MRCRequest rq = entry.attachment; + BufferPool.free(entry.payload); + // all we can do is send a 500 to the client + Logging.logMessage(Logging.LEVEL_ERROR, this, "write to disk log failed"); + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + rq.getPinkyRequest().setResponse(500); + rq.details.persistentOperation = false; + pinkyStage.sendResponse(rq.getPinkyRequest()); + } + + public void replicationDone(MRCRequest rq) { + BufferPool.free(rq.logEntry.payload); + + // check if a deferred deletion of the slice is necessary + if (rq.details.sliceId != null && slices.getSliceInfo(rq.details.sliceId).isDeleted()) + slices.removeSliceFromIndex(rq.details.sliceId); + + pinkyStage.sendResponse(rq.getPinkyRequest()); + } + + public void dumpDB(String dumpFilePath) throws Exception { + brainStage.dumpDB(dumpFilePath); + } + + public void restoreDBFromDump(String dumpFilePath) throws Exception { + brainStage.restoreDBFromDump(dumpFilePath); + } + + public void crashPerformed() { + try { + shutdown(); + } catch (Exception e) { + Logging.logMessage(Logging.LEVEL_ERROR, this, e); + } + } + + public void shutdownPerformed() { + // ignore + } + + public void startupPerformed() { + // ignore + } + + public String getStatusPage() throws Exception { + + int pinkyQL = pinkyStage.getTotalQLength(); + int pinkyCons = pinkyStage.getNumConnections(); + String me = "http://" + InetAddress.getLocalHost().getHostName() + ":" + config.getPort(); + String dirServiceURL = "http://" + config.getDirectoryService().getHostName() + ":" + + config.getDirectoryService().getPort(); + + StringBuffer rqTableBuf = new StringBuffer(); + long totalRequests = 0; + for (String req : brainStage._statMap.keySet()) { + + long count = brainStage._statMap.get(req); + totalRequests += count; + + rqTableBuf.append("'"); + rqTableBuf.append(req); + rqTableBuf.append("'"); + rqTableBuf.append(count); + rqTableBuf.append(""); + } + + StringBuffer volTableBuf = new StringBuffer(); + List vols = slices.getVolumes(); + for (VolumeInfo v : vols) { + + Map> osdList = osdMonitor.getUsableOSDs(v.getId()); + + volTableBuf.append(""); + volTableBuf.append(v.getName()); + volTableBuf + .append("
selectable OSDs"); + Iterator it = osdList.keySet().iterator(); + while (it.hasNext()) { + final ServiceUUID osdUUID = new ServiceUUID(it.next()); + volTableBuf.append(""); + volTableBuf.append(osdUUID); + volTableBuf.append(""); + if (it.hasNext()) + volTableBuf.append(", "); + } + volTableBuf.append("
striping policy"); + volTableBuf.append(slices.getSliceDB(v.getId(), 1, 'r').getVolumeStripingPolicy()); + volTableBuf.append("
access policy"); + volTableBuf.append(v.getAcPolicyId()); + volTableBuf.append("
osd policy"); + volTableBuf.append(v.getOsdPolicyId()); + volTableBuf.append("
partitioning policy"); + volTableBuf.append(v.getPartitioningPolicyId()); + volTableBuf.append("
"); + } + + String status = "XtreemFS MRC " + + me + + "" + + "" + + "" + + "" + + "" + + "

MRC " + + me + + "


" + + "" + + "" + "" + + "" + "" + + "" + + "" + + rqTableBuf + "" + volTableBuf + + "" + + "" + + "" + + "" + "" + + "" + + "" + + "" + + "" + + "" + "" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + + "
Configuration
port" + + config.getPort() + + "
Directory Service" + dirServiceURL + + "
debug level" + config.getDebugLevel() + + "
Load
Pinky #connections" + pinkyCons + + "
Pinky total requests in queue" + pinkyQL + + "
last request received" + + lastRequestTimeStamp + " (" + new Date(lastRequestTimeStamp) + ")" + "
Operation Statistics
total requests" + totalRequests + "
Volumes
Database Statistics
total database size" + + OutputUtils.formatBytes(brainStage.getTotalDBSize()) + "
database log size" + + OutputUtils.formatBytes(loggerStage.getLogFileSize()) + "
total #files" + brainStage.getTotalNumberOfFiles() + + "
total #directories" + + brainStage.getTotalNumberOfDirs() + "
VM Info / Memory
Memory free" + + OutputUtils.formatBytes(Runtime.getRuntime().freeMemory()) + "
Memory total" + + OutputUtils.formatBytes(Runtime.getRuntime().totalMemory()) + "
Memory max" + + OutputUtils.formatBytes(Runtime.getRuntime().maxMemory()) + "
BufferPool stats
" + BufferPool.getStatus()
+            + "
avail. processors" + + Runtime.getRuntime().availableProcessors() + "
Time
global xtreemfs time" + TimeSync.getGlobalTime() + + " (" + new Date(TimeSync.getGlobalTime()) + ")" + "
global time sync interval" + + config.getRemoteTimeSync() + " ms" + "
local time" + TimeSync.getLocalSystemTime() + " (" + + new Date(TimeSync.getLocalSystemTime()) + ")" + "
local time granularity" + + TimeSync.getLocalRenewInterval() + " ms
UUID Mapping Cache
" + UUIDResolver.getCache() + "
"; + + return status; + } + +} diff --git a/servers/src/org/xtreemfs/mrc/RequestDetails.java b/servers/src/org/xtreemfs/mrc/RequestDetails.java new file mode 100644 index 0000000000000000000000000000000000000000..3758ccb99a68543d2dd5b7c0014faaa94c0a0045 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/RequestDetails.java @@ -0,0 +1,63 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Christian Lorenz (ZIB), Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ +package org.xtreemfs.mrc; + +import java.util.List; +import java.util.Map; + +import org.xtreemfs.mrc.brain.storage.SliceID; + +/** + * + * 29.09.2008 + * + * @author clorenz + */ +public final class RequestDetails { + public SliceID sliceId; + + public String userId; + + public boolean superUser; + + public List groupIds; + + public boolean authenticated; + + public boolean authorized; + + public boolean persistentOperation; + + public Map context; + + /** + * + */ + public RequestDetails() { + sliceId = null; + userId = null; + authenticated = false; + authorized = false; + } +} diff --git a/servers/src/org/xtreemfs/mrc/ac/FileAccessManager.java b/servers/src/org/xtreemfs/mrc/ac/FileAccessManager.java new file mode 100644 index 0000000000000000000000000000000000000000..d8a8091391b50d980b951cd5d9bd4dff3175efbe --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/ac/FileAccessManager.java @@ -0,0 +1,183 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.mrc.ac; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.mrc.PolicyContainer; +import org.xtreemfs.mrc.brain.BrainException; +import org.xtreemfs.mrc.brain.UserException; +import org.xtreemfs.mrc.slices.SliceManager; + +/** + * This class is responsible for checking policy-based file access. + * + * @author stender + * + */ +public class FileAccessManager { + + public static final int READ_ACCESS = 1; + + public static final int SEARCH_ACCESS = 2; + + public static final int WRITE_ACCESS = 3; + + public static final int DELETE_ACCESS = 4; + + public static final int RM_MV_IN_DIR_ACCESS = 5; + + private final SliceManager sliceMan; + + private final Map policies; + + private PolicyContainer policyContainer; + + public FileAccessManager(SliceManager sliceMan, PolicyContainer policyContainer) { + + this.sliceMan = sliceMan; + this.policyContainer = policyContainer; + + policies = new HashMap(); + policies.put(POSIXFileAccessPolicy.POLICY_ID, new POSIXFileAccessPolicy(sliceMan)); + policies.put(YesToAnyoneFileAccessPolicy.POLICY_ID, new YesToAnyoneFileAccessPolicy()); + policies.put(VolumeACLFileAccessPolicy.POLICY_ID, new VolumeACLFileAccessPolicy(sliceMan)); + } + + public void checkSearchPermission(String volumeId, String path, String userId, + boolean superUser, List groupIds) throws UserException, BrainException { + + if (superUser) + return; + + getVolumeFileAccessPolicy(volumeId).checkSearchPermission(volumeId, path, userId, groupIds); + } + + public void checkPrivilegedPermissions(String volumeId, long fileId, String userId, + boolean superUser, List groupIds) throws UserException, BrainException { + + if (superUser) + return; + + getVolumeFileAccessPolicy(volumeId).checkPrivilegedPermissions(volumeId, fileId, userId, + groupIds); + } + + public void checkPermission(int accessMode, String volumeId, long fileId, long parentId, + String userId, boolean superUser, List groupIds) throws UserException, + BrainException { + + checkPermission(translateAccessMode(volumeId, accessMode), volumeId, fileId, parentId, + userId, superUser, groupIds); + } + + public void checkPermission(String accessMode, String volumeId, long fileId, long parentId, + String userId, boolean superUser, List groupIds) throws UserException, + BrainException { + + if (superUser) + return; + + getVolumeFileAccessPolicy(volumeId).checkPermission(volumeId, fileId, parentId, userId, + groupIds, accessMode); + } + + public String translateAccessMode(String volumeId, int accessMode) throws BrainException { + return getVolumeFileAccessPolicy(volumeId).translateAccessMode(accessMode); + } + + public Map createDefaultVolumeACL(String volumeId) throws BrainException { + return getVolumeFileAccessPolicy(volumeId).createDefaultVolumeACL(volumeId); + } + + public Map convertToACL(String volumeId, long mode) throws BrainException { + return getVolumeFileAccessPolicy(volumeId).convertToACL(mode); + } + + public long getPosixAccessMode(String volumeId, long fileId, String userId, + List groupIds) throws BrainException { + return getVolumeFileAccessPolicy(volumeId).getPosixAccessRights(volumeId, fileId, userId, + groupIds); + } + + public void setPosixAccessMode(String volumeId, long fileId, String userId, + List groupIds, long posixRights) throws BrainException, UserException { + getVolumeFileAccessPolicy(volumeId).setPosixAccessRights(volumeId, fileId, userId, + groupIds, posixRights); + } + + public void setACLEntries(String volumeId, long fileId, String userId, List groupIds, + Map entries) throws BrainException, UserException { + getVolumeFileAccessPolicy(volumeId).setACLEntries(volumeId, fileId, userId, groupIds, + entries); + } + + public void removeACLEntries(String volumeId, long fileId, String userId, + List groupIds, List entities) throws BrainException, UserException { + getVolumeFileAccessPolicy(volumeId).removeACLEntries(volumeId, fileId, userId, groupIds, + entities); + } + + public FileAccessPolicy getFileAccessPolicy(long policyId) { + + FileAccessPolicy policy = policies.get(policyId); + + // if the policy is not built-in, try to load it from the plug-in + // directory + if (policy == null) { + try { + policy = policyContainer.getFileAccessPolicy(policyId, sliceMan); + policies.put(policyId, policy); + } catch (Exception exc) { + Logging.logMessage(Logging.LEVEL_WARN, this, + "could not load FileAccessPolicy with ID " + policyId); + Logging.logMessage(Logging.LEVEL_WARN, this, exc); + } + } + + return policy; + } + + protected FileAccessPolicy getVolumeFileAccessPolicy(String volumeId) throws BrainException { + + try { + long policyId = sliceMan.getVolumeById(volumeId).getAcPolicyId(); + + FileAccessPolicy policy = getFileAccessPolicy(policyId); + + if (policy == null) + throw new BrainException("unknown file access policy for volume " + volumeId + ": " + + policyId); + + return policy; + + } catch (UserException exc) { + throw new BrainException(exc); + } + } +} diff --git a/servers/src/org/xtreemfs/mrc/ac/FileAccessPolicy.java b/servers/src/org/xtreemfs/mrc/ac/FileAccessPolicy.java new file mode 100644 index 0000000000000000000000000000000000000000..98468cbc1b34428b8903971143c869d73b9167ab --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/ac/FileAccessPolicy.java @@ -0,0 +1,238 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.mrc.ac; + +import java.util.List; +import java.util.Map; + +import org.xtreemfs.mrc.brain.BrainException; +import org.xtreemfs.mrc.brain.UserException; + +/** + * An interface for a policy defining file access. + * + * @author stender + */ +public interface FileAccessPolicy { + + /** + * Returns a string representing the policy-specific translation of the + * given POSIX access mode. This method can be used to obtain a valid access + * mode string to pass with checkAccess. + * + * @param accessMode + * the POSIX access mode, see {@link FileAccessManager} constants + * @return a policy-specific string describing the access mode + */ + public String translateAccessMode(int accessMode); + + /** + * Checks whether the user with the given ID is allowed to perform + * operations for the given file with the given access mode. + * + * @param volumeId + * the volume ID of the file + * @param fileId + * the file ID + * @param parentId + * the ID of the file's parent - note that '0' is provided unless + * the check refers to an entity being added, deleted or moved in + * a directory + * @param userId + * the user ID + * @param groupIds + * a list of group IDs + * @param accessMode + * the access mode. How the access mode has to be interpreted + * depends on the policy implementation. + * @throws UserException + * if the permission was denied + * @throws BrainException + * if an error occurs while trying to get permissions + */ + public void checkPermission(String volumeId, long fileId, long parentId, String userId, + List groupIds, String accessMode) throws UserException, BrainException; + + /** + * Checks whether search permission is granted on the given path. The method + * should return without throwing an exception if + * checkPermission() for an access mode implying the right to + * switch to the directory returns true. + * + * POSIX-compliant implementations might have to check permissions + * recursively for each path component. Since there might not be an explicit + * access mode for searching directories, the framework will invoke this + * method instead of using checkPermission() when checking + * search access on directories. + * + * @param volumeId + * the volume ID of the directory + * @param path + * the full path to the file or directory + * @param userId + * the user ID + * @param groupIds + * a list of group IDs + * @throws UserException + * if the permission was denied + * @throws BrainException + * if an error occurs at the backend + */ + public void checkSearchPermission(String volumeId, String path, String userId, + List groupIds) throws UserException, BrainException; + + /** + * Checks whether permission is granted to change the owner of the file with + * the given ID. + * + * @param volumeId + * the volume ID of the file + * @param fileId + * the file ID in the volume + * @param userId + * the user ID + * @param groupIds + * a list of group IDs + * @throws UserException + * if the permissoin was denied + * @throws BrainException + * if an error occurs at the backend + */ + public void checkPrivilegedPermissions(String volumeId, long fileId, String userId, + List groupIds) throws UserException, BrainException; + + /** + * Returns an ACL in the form of a map that is used as the volume ACL in + * case no volume ACL is explicitly specified when a new volume is created. + * + * @param volumeId + * the volume ID + * @return a mapping from String to Long representing the default volume ACL + * @throws BrainException + * if an error occurs while determining the default volume ACLs + */ + public Map createDefaultVolumeACL(String volumeId) throws BrainException; + + /** + * Returns the access control list that is automatically assigned to a newly + * created child. + * + * The framework will invoke this method when a new file or directory is + * created. + * + * @param mode + * the access mode from which the initial ACL is calculated + * @throws BrainException + * if an error occurs at the backend + */ + public Map convertToACL(long mode) throws BrainException; + + /** + * Returns a POSIX access mode bit mask for the given file and user in the + * form of a long. As specified in POSIX, the first three bits represent + * read, write and execute access for the user, the next three bits do the + * same for the group, and the last three bits for the rest of the world. + * Any other bits may be used in a policy-specific manner. + * + * @param volumeId + * the volume ID of the file + * @param fileId + * the file ID in the volume + * @param userId + * the user ID + * @param groupIds + * a list of group IDs + * @return the POSIX access rights + * @throws BrainException + * if an error occurs when trying to tranlate access rights + */ + public long getPosixAccessRights(String volumeId, long fileId, String userId, + List groupIds) throws BrainException; + + /** + * Modifies the file ACL by means of a POSIX access mode bit mask. + * + * @param volumeId + * the volume ID + * @param fileId + * the file ID + * @param userId + * the user ID + * @param groupIds + * a list of group IDs + * @param posixRights + * a long value describing the POSIX access rights + * @throws BrainException + * if an error occurs when trying to tranlate access rights + * @throws UserException + * if access is denied + */ + public void setPosixAccessRights(String volumeId, long fileId, String userId, + List groupIds, long posixRights) throws BrainException, UserException; + + /** + * Creates or changes a set of entries the current ACL of a file. + * + * @param volumeId + * the volume ID + * @param fileId + * the file ID + * @param userId + * the user ID + * @param groupIds + * a list of group IDs + * @param entries + * a mapping from entity names (ac entities) to long values + * (rights masks) representing the ACL entries to add/modify + * @throws BrainException + * if an error occurs when trying to change the ACL + * @throws UserException + * if access is denied + */ + public void setACLEntries(String volumeId, long fileId, String userId, List groupIds, + Map entries) throws BrainException, UserException; + + /** + * Creates or changes an entry in the current ACL of a file. + * + * @param volumeId + * the volume ID + * @param fileId + * the file ID + * @param userId + * the user ID + * @param groupIds + * a list of group IDs + * @param entities + * a list of access control entity names to delete from the ACL + * @throws BrainException + * if an error occurs when trying to change the ACL + * @throws UserException + * if access is denied + */ + public void removeACLEntries(String volumeId, long fileId, String userId, + List groupIds, List entities) throws BrainException, UserException; + +} \ No newline at end of file diff --git a/servers/src/org/xtreemfs/mrc/ac/POSIXFileAccessPolicy.java b/servers/src/org/xtreemfs/mrc/ac/POSIXFileAccessPolicy.java new file mode 100644 index 0000000000000000000000000000000000000000..01e230aae009962c345b094baaa9a82d765d8db5 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/ac/POSIXFileAccessPolicy.java @@ -0,0 +1,619 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.mrc.ac; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.xtreemfs.mrc.brain.BrainException; +import org.xtreemfs.mrc.brain.ErrNo; +import org.xtreemfs.mrc.brain.UserException; +import org.xtreemfs.mrc.brain.storage.StorageManager; +import org.xtreemfs.mrc.brain.storage.entities.ACLEntry; +import org.xtreemfs.mrc.brain.storage.entities.AbstractFileEntity; +import org.xtreemfs.mrc.brain.storage.entities.DirEntity; +import org.xtreemfs.mrc.slices.SliceManager; +import org.xtreemfs.mrc.utils.Converter; + +/** + * This policy evaluates access rights according to POSIX access control lists + * (ACLs). The implementation is based on the description provided in "POSIX + * Access Control Lists on Linux" by Andreas Grünbacher + * (http://www.suse.de/~agruen/acl/linux-acls/online/). + * + *

+ * Evaluation of access rights assumes that at least a minimal ACL exists on the + * corresponding file system entity. Minimal ACLs contain one entry for the + * owner, one for the owning group and one for the other users. A more + * fine-grained access control model can be supported by using extended ACLs. In + * order to be interpreted correctly, an ACL entry needs to correspond to at + * least one of the following patterns: + * + *

    + *
  • "user::" - owner, has to occur exactly once + *
  • "user:<name>" - named user, may occur zero or more times + *
  • "group::" - owning group, has to occur exactly once + *
  • "group:<name>" - named group, may occur zero or more times + *
  • "other::" - other, has to occur exactly once + *
  • "mask::" - mask, may occur at most once + *
+ * + *

+ * checkPermission() supports the following access modes: + * + *

    + *
  • "r" - read + *
  • "w" - write + *
  • "x" - execute + *
  • "a" - append + *
  • "ga" - GFS-like append (concurrent appends are properly synchronized) + *
  • "c" - create + *
  • "t" - truncate + *
  • "sr" - strict read-only + *
  • "d" - delete + *
+ * + *

+ * When checking access to a file or directory, the policy will search for the + * relevant ACL entry according to the POSIX access check algorithm, where the + * 'rights' value of the ACL entry is interpreted as a bit mask. The bits for + * the corresponding access modes are set in the reverse order as they are + * enumerated above. + * + *

+ * Example: an ACL entry ("user::", 35) would grant read, write and create + * access to the file owner, because 35 represents the bit mask 000100011. + * + *

+ * The conversion between ACLs and the POSIX access mode only takes into account + * the read, write and execute bits of the owner, owning group and others ACL + * entry. Search access on a file or directory is determined by means of + * checking execute access on each component of the parent tree. + * + * @author stender + */ +public class POSIXFileAccessPolicy implements FileAccessPolicy { + + public static final long POLICY_ID = 2; + + private static final String OWNER = "user::"; + + private static final String OWNER_GROUP = "group::"; + + private static final String OTHER = "other::"; + + private static final String MASK = "mask::"; + + private static final String NAMED_USER_PREFIX = "user:"; + + private static final String NAMED_GROUP_PREFIX = "group:"; + + private static final String STICKY_BIT = "sticky"; + + private static final String AM_WRITE = "w"; + + private static final String AM_READ = "r"; + + private static final String AM_EXECUTE = "x"; + + private static final String AM_DELETE = "d"; + + private static final String AM_MV_RM_IN_DIR = "m"; + + private static final long PERM_READ = 1 << 0; + + private static final long PERM_WRITE = 1 << 1; + + private static final long PERM_EXECUTE = 1 << 2; + + private static final long PERM_APPEND = 1 << 3; + + private static final long PERM_GFS_APPEND = 1 << 4; + + private static final long PERM_CREATE = 1 << 5; + + private static final long PERM_TRUNCATE = 1 << 6; + + private static final long PERM_STRICT_READ = 1 << 7; + + private static final long PERM_DELETE = 1 << 8; + + private static final long PERM_SUID_SGID = 1 << 16; + + private static final long READ_MASK = PERM_READ | PERM_STRICT_READ; + + private static final long WRITE_MASK = PERM_WRITE | PERM_APPEND | PERM_GFS_APPEND + | PERM_CREATE | PERM_TRUNCATE | PERM_DELETE; + + private static final long EXEC_MASK = PERM_EXECUTE; + + private SliceManager sliceMan; + + public POSIXFileAccessPolicy(SliceManager sliceMan) { + this.sliceMan = sliceMan; + } + + public String translateAccessMode(int accessMode) { + switch (accessMode) { + case FileAccessManager.READ_ACCESS: + return AM_READ; + case FileAccessManager.WRITE_ACCESS: + return AM_WRITE; + case FileAccessManager.SEARCH_ACCESS: + return AM_EXECUTE; + case FileAccessManager.DELETE_ACCESS: + return AM_DELETE; + case FileAccessManager.RM_MV_IN_DIR_ACCESS: + return AM_MV_RM_IN_DIR; + } + + return null; + } + + public void checkPermission(String volumeId, long fileId, long parentId, String userId, + List groupIds, String accessMode) throws UserException, BrainException { + + try { + + StorageManager sMan = sliceMan.getSliceDB(volumeId, fileId, 'r'); + + // retrieve the parent entity from the database + AbstractFileEntity parent = sMan.getFileEntity(parentId); + + // retrieve the file entity from the database + AbstractFileEntity file = sMan.getFileEntity(fileId); + + // retrieve the relevant ACL entry for evaluating the access + // rights + ACLEntry entry = getRelevantACLEntry(volumeId, file, parent, userId, groupIds, + accessMode); + assert (entry != null); + + // if the ACL entry is 'owner' or 'others', evaluate the access + // rights without taking into account the 'mask' entry + if (OTHER.equals(entry.getEntity()) || OWNER.equals(entry.getEntity())) { + + if (checkRights(accessMode, entry.getRights(), file, parent, userId)) { + return; + } else + accessDenied(volumeId, fileId, accessMode); + + } + + // otherwise, check whether both the entry and the mask entry + // grant access + ACLEntry maskEntry = findACL(file.getAcl(), MASK); + if (checkRights(accessMode, entry.getRights(), file, parent, userId) + && (maskEntry == null || checkRights(accessMode, maskEntry.getRights(), file, + parent, userId))) + return; + else + accessDenied(volumeId, fileId, accessMode); + + } catch (UserException exc) { + throw exc; + } catch (Exception exc) { + throw new BrainException(exc); + } + + } + + public void checkSearchPermission(String volumeId, String path, String userId, + List groupIds) throws UserException, BrainException { + + try { + + // check search permission for the root directory + StorageManager sMan = sliceMan.getSliceDB(volumeId, 1, 'r'); + AbstractFileEntity parentDir = sMan.getFileEntity(1); + checkPermission(volumeId, 1, 0, userId, groupIds, "x"); + + // iteratively check search permissions for all directories in the + // path + for (int index = 0; index < path.length();) { + + int newIndex = path.indexOf('/', index + 1); + if (newIndex == -1) + newIndex = path.length(); + + String nextComponent = path.substring(index, newIndex); + + sMan = sliceMan.getSliceDB(volumeId, path.substring(0, newIndex - 1), 'r'); + + parentDir = sMan.getChild(nextComponent, parentDir.getId()); + checkPermission(volumeId, parentDir.getId(), 0, userId, groupIds, "x"); + + index = newIndex + 1; + } + + } catch (UserException exc) { + throw exc; + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public void checkPrivilegedPermissions(String volumeId, long fileId, String userId, + List groupIds) throws UserException, BrainException { + + try { + + StorageManager sMan = sliceMan.getSliceDB(volumeId, fileId, 'r'); + + if (!sMan.getFileEntity(fileId).getUserId().equals(userId)) + throw new UserException(ErrNo.EPERM, "no privileged permissions granted"); + + } catch (UserException exc) { + throw exc; + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public void setACLEntries(String volumeId, long fileId, String userId, List groupIDs, + Map entries) throws BrainException, UserException { + + try { + + StorageManager sMan = sliceMan.getSliceDB(volumeId, fileId, 'w'); + + // check whether the current user is the owner of the file + Map acl = Converter.aclToMap(sMan.getFileEntity(fileId).getAcl()); + + if (acl == null) + acl = entries; + else + for (String entity : entries.keySet()) + acl.put(entity, entries.get(entity)); + + sMan.setFileACL(fileId, acl); + + } catch (UserException exc) { + throw exc; + } catch (Exception exc) { + throw new BrainException(exc); + } + + } + + public void removeACLEntries(String volumeId, long fileId, String userId, + List groupIds, List entities) throws BrainException, UserException { + + try { + + StorageManager sMan = sliceMan.getSliceDB(volumeId, fileId, 'w'); + + if (!sMan.getFileEntity(fileId).getUserId().equals(userId)) + throw new UserException(ErrNo.EPERM, + "changing access mode is restricted to file owner"); + + // check whether the current user is the owner of the file + Map acl = Converter.aclToMap(sMan.getFileEntity(fileId).getAcl()); + assert (acl != null); + + for (Object entity : entities) + acl.remove(entity); + + sMan.setFileACL(fileId, acl); + + } catch (UserException exc) { + throw exc; + } catch (Exception exc) { + throw new BrainException(exc); + } + + } + + public Map convertToACL(long mode) throws BrainException { + + try { + + Map aclMap = new HashMap(); + + // determine the sticky bit + long stickyBit = (mode & (1 << 9)) > 0 ? 1 : 0; + if (stickyBit != 0) + aclMap.put(STICKY_BIT, stickyBit); + + // determine ACL for owner + long owr = (mode & (1 << 6)) > 0 ? EXEC_MASK : 0; + owr |= (mode & (1 << 7)) > 0 ? WRITE_MASK : 0; + owr |= (mode & (1 << 8)) > 0 ? READ_MASK : 0; + owr |= (mode & (1 << 11)) > 0 ? PERM_SUID_SGID : 0; + aclMap.put(OWNER, owr); + + // determine ACL for group + long grr = (mode & (1 << 3)) > 0 ? EXEC_MASK : 0; + grr |= (mode & (1 << 4)) > 0 ? WRITE_MASK : 0; + grr |= (mode & (1 << 5)) > 0 ? READ_MASK : 0; + grr |= (mode & (1 << 10)) > 0 ? PERM_SUID_SGID : 0; + aclMap.put(OWNER_GROUP, grr); + + // determine ACL for others + long otr = (mode & (1 << 0)) > 0 ? EXEC_MASK : 0; + otr |= (mode & (1 << 1)) > 0 ? WRITE_MASK : 0; + otr |= (mode & (1 << 2)) > 0 ? READ_MASK : 0; + aclMap.put(OTHER, otr); + + return aclMap; + + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public Map createDefaultVolumeACL(String volumeId) throws BrainException { + + Map aclMap = new HashMap(); + aclMap.put(OWNER, 511L); // the owner may do anything + aclMap.put(OWNER_GROUP, 0L); + aclMap.put(OTHER, 0L); + + return aclMap; + } + + public void setPosixAccessRights(String volumeId, long fileId, String userId, + List groupIds, long posixAccessRights) throws BrainException, UserException { + + try { + + StorageManager sMan = sliceMan.getSliceDB(volumeId, fileId, 'w'); + + // check whether the current user is the owner of the file + ACLEntry[] acl = sMan.getFileEntity(fileId).getAcl(); + assert (acl != null); + + ACLEntry owner = findACL(acl, OWNER); + ACLEntry group = findACL(acl, MASK); + if (group == null) + group = findACL(acl, OWNER_GROUP); + ACLEntry other = findACL(acl, OTHER); + assert (owner != null); + assert (group != null); + assert (other != null); + + ACLEntry sticky = findACL(acl, STICKY_BIT); + + // determine rights mask for owner + long owr = (posixAccessRights & (1 << 6)) > 0 ? EXEC_MASK : 0; + owr |= (posixAccessRights & (1 << 7)) > 0 ? WRITE_MASK : 0; + owr |= (posixAccessRights & (1 << 8)) > 0 ? READ_MASK : 0; + owr |= (posixAccessRights & (1 << 11)) > 0 ? PERM_SUID_SGID : 0; + + // determine rights mask for group + long grr = (posixAccessRights & (1 << 3)) > 0 ? EXEC_MASK : 0; + grr |= (posixAccessRights & (1 << 4)) > 0 ? WRITE_MASK : 0; + grr |= (posixAccessRights & (1 << 5)) > 0 ? READ_MASK : 0; + grr |= (posixAccessRights & (1 << 10)) > 0 ? PERM_SUID_SGID : 0; + + // determine rights mask for others + long otr = (posixAccessRights & (1 << 0)) > 0 ? EXEC_MASK : 0; + otr |= (posixAccessRights & (1 << 1)) > 0 ? WRITE_MASK : 0; + otr |= (posixAccessRights & (1 << 2)) > 0 ? READ_MASK : 0; + + // determine whether the sticky bit is set + boolean isSticky = (posixAccessRights & (1 << 9)) > 0; + + if (sticky == null && isSticky) { + sticky = new ACLEntry(STICKY_BIT, 1); + ACLEntry[] newAcl = new ACLEntry[acl.length + 1]; + for (int i = 0, j = 0; i < acl.length; i++, j++) { + if (STICKY_BIT.compareTo(acl[i].getEntity()) < 0) { + newAcl[j++] = sticky; + } + newAcl[j] = acl[i]; + } + acl = newAcl; + } + + else if (sticky != null) + sticky.setRights(isSticky ? 1 : 0); + + owner.setRights(owr); + group.setRights(grr); + other.setRights(otr); + + sMan.setFileACL(fileId, acl); + + } catch (UserException exc) { + throw exc; + } catch (Exception exc) { + throw new BrainException(exc); + } + + } + + public long getPosixAccessRights(String volumeId, long fileId, String userId, + List groupIds) throws BrainException { + + try { + + StorageManager sMan = sliceMan.getSliceDB(volumeId, fileId, 'r'); + + ACLEntry[] acl = sMan.getFileEntity(fileId).getAcl(); + if (acl == null) + return 0; + + ACLEntry owner = findACL(acl, OWNER); + ACLEntry group = findACL(acl, MASK); + if (group == null) + group = findACL(acl, OWNER_GROUP); + ACLEntry other = findACL(acl, OTHER); + assert (owner != null); + assert (group != null); + assert (other != null); + + ACLEntry sticky = findACL(acl, STICKY_BIT); + + return ((owner.getRights() & PERM_SUID_SGID) > 0 ? 1 << 11 : 0) + | ((group.getRights() & PERM_SUID_SGID) > 0 ? 1 << 10 : 0) + | ((sticky != null && sticky.getRights() == 1) ? 1 << 9 : 0) + | ((owner.getRights() & PERM_READ) > 0 ? 1 << 8 : 0) + | ((owner.getRights() & PERM_WRITE) > 0 ? 1 << 7 : 0) + | ((owner.getRights() & PERM_EXECUTE) > 0 ? 1 << 6 : 0) + | ((group.getRights() & PERM_READ) > 0 ? 1 << 5 : 0) + | ((group.getRights() & PERM_WRITE) > 0 ? 1 << 4 : 0) + | ((group.getRights() & PERM_EXECUTE) > 0 ? 1 << 3 : 0) + | ((other.getRights() & PERM_READ) > 0 ? 1 << 2 : 0) + | ((other.getRights() & PERM_WRITE) > 0 ? 1 << 1 : 0) + | ((other.getRights() & PERM_EXECUTE) > 0 ? 1 : 0); + + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + private static ACLEntry findACL(ACLEntry[] acl, String entityName) { + + // find the ACL entry by means of a binary search + int low = 0; + int high = acl.length - 1; + + while (low <= high) { + int mid = (low + high) >>> 1; + ACLEntry midEntry = acl[mid]; + + int cmp = midEntry.getEntity().compareTo(entityName); + if (cmp < 0) + low = mid + 1; + else if (cmp > 0) + high = mid - 1; + else + return acl[mid]; + } + + return null; + } + + private static boolean checkRights(String accessMode, long rights, AbstractFileEntity file, + AbstractFileEntity parent, String userId) { + + if (accessMode.length() == 1) { + switch (accessMode.charAt(0)) { + case 'r': + return (rights & (1 << 0)) != 0; + case 'w': + return (rights & (1 << 1)) != 0; + case 'x': + return (rights & (1 << 2)) != 0; + case 'a': + return (rights & (1 << 3)) != 0; + case 'c': + return (rights & (1 << 5)) != 0; + case 't': + return (rights & (1 << 6)) != 0; + case 'd': + return (rights & (1 << 8)) != 0; + case 'm': + + // evaluate the parent's sticky bit + ACLEntry stickyBitEntry = findACL(parent.getAcl(), STICKY_BIT); + if (stickyBitEntry != null && stickyBitEntry.getRights() != 0) + return file.getUserId().equals(userId); + else + return true; + } + + } else if (accessMode.length() == 2) { + if (accessMode.equals("ga") && (rights & (1 << 4)) != 0) + return true; + else if (accessMode.equals("sr") && (rights & (1 << 7)) != 0) + return true; + } + + return false; + } + + private static ACLEntry getRelevantACLEntry(String volumeId, AbstractFileEntity file, + AbstractFileEntity parent, String userId, List groupIds, String accessMode) + throws UserException { + + // if the user ID is the owner, check access according to the rights + // associated with the owner entry + if (file.getUserId().equals(userId)) { + + ACLEntry entry = findACL(file.getAcl(), OWNER); + assert (entry != null); + + return entry; + } + + // if the user ID refers to a named user, check access according to + // the corresponding user rights + ACLEntry entry = findACL(file.getAcl(), NAMED_USER_PREFIX + userId); + if (entry != null) + return entry; + + boolean groupFound = false; + + // if a group ID refers to the owning group, check whether access is + // granted according to the owning group rights + for (String groupId : groupIds) { + if (groupId.equals(file.getGroupId())) { + + entry = findACL(file.getAcl(), OWNER_GROUP); + assert (entry != null); + + if (checkRights(accessMode, entry.getRights(), file, parent, userId)) + return entry; + + groupFound = true; + } + } + + // if a group ID refers to any of the named groups, check whether + // access is granted according to the corresponding group rights + for (String groupId : groupIds) { + + entry = findACL(file.getAcl(), NAMED_GROUP_PREFIX + groupId); + + if (entry != null) { + + if (checkRights(accessMode, entry.getRights(), file, parent, userId)) + return entry; + + groupFound = true; + } + } + + // if there was a matching entry but access was not granted, access + // is denied + if (groupFound) + accessDenied(volumeId, file.getId(), accessMode); + + entry = findACL(file.getAcl(), OTHER); + assert (entry != null); + return entry; + } + + private static void accessDenied(String volumeId, long fileId, String accessMode) + throws UserException { + + throw new UserException(ErrNo.EACCES, "access denied, volumeId = " + volumeId + + ", fileId = " + fileId + ", accessMode = \"" + accessMode + "\""); + } + +} diff --git a/servers/src/org/xtreemfs/mrc/ac/VolumeACLFileAccessPolicy.java b/servers/src/org/xtreemfs/mrc/ac/VolumeACLFileAccessPolicy.java new file mode 100644 index 0000000000000000000000000000000000000000..31c7486c80fa1820927e54bfe2554bddccfe3e66 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/ac/VolumeACLFileAccessPolicy.java @@ -0,0 +1,237 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.mrc.ac; + +import java.util.List; +import java.util.Map; + +import org.xtreemfs.mrc.brain.BrainException; +import org.xtreemfs.mrc.brain.ErrNo; +import org.xtreemfs.mrc.brain.Path; +import org.xtreemfs.mrc.brain.UserException; +import org.xtreemfs.mrc.brain.storage.StorageManager; +import org.xtreemfs.mrc.brain.storage.entities.ACLEntry; +import org.xtreemfs.mrc.slices.SliceManager; + +/** + * This policy grants or denies access based on immutable volume ACLs. Note that + * ACLs are no POSIX ACLs. A 'default' entry may be defined that is valid for + * any user except for those having a user-specific entry. + * + * @author stender + * + */ +public class VolumeACLFileAccessPolicy implements FileAccessPolicy { + + private SliceManager sliceMan; + + public static final long POLICY_ID = 3; + + private static final String AM_WRITE = "w"; + + private static final String AM_READ = "r"; + + private static final String AM_DELETE = "d"; + + public VolumeACLFileAccessPolicy(SliceManager sliceMan) { + this.sliceMan = sliceMan; + } + + public String translateAccessMode(int accessMode) { + switch (accessMode) { + case FileAccessManager.READ_ACCESS: + return AM_READ; + case FileAccessManager.WRITE_ACCESS: + return AM_WRITE; + case FileAccessManager.SEARCH_ACCESS: + return AM_READ; + case FileAccessManager.DELETE_ACCESS: + return AM_DELETE; + } + + return null; + } + + public void checkPermission(String volumeId, long fileId, long parentId, String userId, + List groupIds, String accessMode) throws UserException, BrainException { + + try { + + if (fileId == 0) + return; + + StorageManager sMan = sliceMan.getSliceDB(volumeId, "/", 'r'); + + ACLEntry[] acl = sMan.getVolumeACL(); + + long rights = getRights(userId, acl); + + if (accessMode.length() == 1) { + switch (accessMode.charAt(0)) { + case 'r': + if ((rights & (1 << 0)) != 0) + return; + break; + case 'w': + if ((rights & (1 << 1)) != 0) + return; + break; + case 'a': + if ((rights & (1 << 2)) != 0) + return; + break; + case 'c': + if ((rights & (1 << 4)) != 0) + return; + break; + case 't': + if ((rights & (1 << 5)) != 0) + return; + break; + case 'd': + if ((rights & (1 << 7)) != 0) + return; + break; + } + } else if (accessMode.length() == 2) { + if (accessMode.equals("ga") && (rights & (1 << 3)) != 0) + return; + if (accessMode.equals("sr")) + if ((rights & (1 << 6)) != 0) + return; + } + + } catch (Exception exc) { + throw new BrainException(exc); + } + + throw new UserException(ErrNo.EACCES, "access denied, volumeId = " + volumeId + + ", fileId = " + fileId + ", accessMode = \"" + accessMode + "\""); + } + + public void checkSearchPermission(String volumeId, String path, String userId, + List groupIds) throws UserException, BrainException { + checkPermission(volumeId, 1, 0, userId, groupIds, AM_READ); + } + + public void checkPrivilegedPermissions(String volumeId, long fileId, String userId, + List groupIds) throws UserException, BrainException { + + try { + + StorageManager sMan = sliceMan.getSliceDB(volumeId, "/", 'r'); + + if (!sMan.getFileEntity(1).getUserId().equals(userId)) + throw new UserException(ErrNo.EPERM, + "changing file owner is restricted to file owner"); + + } catch (UserException exc) { + throw exc; + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public Map createDefaultVolumeACL(String volumeId) throws BrainException { + return null; + } + + public Map convertToACL(long mode) throws BrainException { + return null; + } + + public long getPosixAccessRights(String volumeId, long fileId, String userId, + List groupIds) throws BrainException { + + try { + StorageManager sMan = sliceMan.getSliceDB(volumeId, "/", 'r'); + ACLEntry[] acl = sMan.getVolumeACL(); + + long rights = getRights(userId, acl); + rights = rights & 3 | ((rights & 1) << 2); // rw-mask, x=r + return rights * (1 << 6); + + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public void setPosixAccessRights(String volumeId, long fileId, String userId, + List groupIds, long posixRights) throws BrainException { + // do nothing + } + + public void setACLEntries(String volumeId, long fileId, String userId, List groupIDs, + Map entries) throws BrainException, UserException { + + try { + + // set volume ACL initially + StorageManager sMan = sliceMan.getSliceDB(volumeId, "/", 'w'); + ACLEntry[] acl = sMan.getVolumeACL(); + if (acl == null) + sMan.setFileACL(1, entries); + + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public void removeACLEntries(String volumeId, long fileId, String userId, + List groupIds, List entities) throws BrainException, UserException { + // do nothing + } + + private static long getRights(String userId, ACLEntry[] acl) { + + // do not permit anything by default + if (acl == null) + return 0; + + // find the ACL entry by means of a binary search + int low = 0; + int high = acl.length - 1; + + while (low <= high) { + + int mid = (low + high) >>> 1; + ACLEntry midEntry = acl[mid]; + + int cmp = midEntry.getEntity().compareTo(userId); + if (cmp < 0) + low = mid + 1; + else if (cmp > 0) + high = mid - 1; + else + return acl[mid].getRights(); + } + + if (userId.equals("default")) + return 0; + else + return getRights("default", acl); + } + +} diff --git a/servers/src/org/xtreemfs/mrc/ac/YesToAnyoneFileAccessPolicy.java b/servers/src/org/xtreemfs/mrc/ac/YesToAnyoneFileAccessPolicy.java new file mode 100644 index 0000000000000000000000000000000000000000..f4231e3f2afdd2454715db7d60a46b5300348694 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/ac/YesToAnyoneFileAccessPolicy.java @@ -0,0 +1,108 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.mrc.ac; + +import java.util.List; +import java.util.Map; + +import org.xtreemfs.mrc.brain.BrainException; +import org.xtreemfs.mrc.brain.UserException; + +/** + * This policy will grant access to anyone. It does not allow changeing access + * rights, any ACLs set on files or volumes will be ignored. + * + * @author stender + * + */ +public class YesToAnyoneFileAccessPolicy implements FileAccessPolicy { + + public static final long POLICY_ID = 1; + + private static final String AM_WRITE = "w"; + + private static final String AM_READ = "r"; + + private static final String AM_DELETE = "d"; + + public String translateAccessMode(int accessMode) { + switch (accessMode) { + case FileAccessManager.READ_ACCESS: + return AM_READ; + case FileAccessManager.WRITE_ACCESS: + return AM_WRITE; + case FileAccessManager.SEARCH_ACCESS: + return AM_READ; + case FileAccessManager.DELETE_ACCESS: + return AM_DELETE; + } + + return null; + } + + public void checkPermission(String volumeId, long fileId, long parentId, String userId, + List groupIds, String accessMode) { + // do nothing + } + + public void checkSearchPermission(String volumeId, String path, String userId, + List groupIds) { + // do nothing + } + + public void checkPrivilegedPermissions(String volumeId, long fileId, String userId, + List groupIds) { + // do nothing + } + + public Map createDefaultVolumeACL(String volumeId) throws BrainException { + return null; + } + + public Map convertToACL(long mode) { + return null; + } + + public long getPosixAccessRights(String volumeId, long fileId, String userId, + List groupIds) { + return 511; // rwxrwxrwx + } + + public void setPosixAccessRights(String volumeId, long fileId, String userId, + List groupIds, long posixRights) { + // do nothing + } + + public void setACLEntries(String volumeId, long fileId, String userId, List groupIDs, + Map entries) throws BrainException, UserException { + // do nothing + } + + public void removeACLEntries(String volumeId, long fileId, String userId, + List groupIds, List entities) throws BrainException, UserException { + // do nothing + } + +} diff --git a/servers/src/org/xtreemfs/mrc/brain/Brain.java b/servers/src/org/xtreemfs/mrc/brain/Brain.java new file mode 100644 index 0000000000000000000000000000000000000000..02526b4a39fd75165be71bb8789f8f1baaab9343 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/brain/Brain.java @@ -0,0 +1,3015 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc.brain; + +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.StringTokenizer; + +import org.xtreemfs.common.Capability; +import org.xtreemfs.common.VersionManagement; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.clients.dir.DIRClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.pinky.HTTPHeaders; +import org.xtreemfs.foundation.pinky.HTTPUtils; +import org.xtreemfs.mrc.MRCConfig; +import org.xtreemfs.mrc.MRCRequest; +import org.xtreemfs.mrc.PolicyContainer; +import org.xtreemfs.mrc.ac.FileAccessManager; +import org.xtreemfs.mrc.ac.YesToAnyoneFileAccessPolicy; +import org.xtreemfs.mrc.brain.storage.BackendException; +import org.xtreemfs.mrc.brain.storage.SliceID; +import org.xtreemfs.mrc.brain.storage.StorageManager; +import org.xtreemfs.mrc.brain.storage.entities.ACLEntry; +import org.xtreemfs.mrc.brain.storage.entities.AbstractFileEntity; +import org.xtreemfs.mrc.brain.storage.entities.DirEntity; +import org.xtreemfs.mrc.brain.storage.entities.FileEntity; +import org.xtreemfs.mrc.brain.storage.entities.StripingPolicy; +import org.xtreemfs.mrc.brain.storage.entities.XLocation; +import org.xtreemfs.mrc.brain.storage.entities.XLocationsList; +import org.xtreemfs.mrc.osdselection.OSDStatusManager; +import org.xtreemfs.mrc.osdselection.RandomSelectionPolicy; +import org.xtreemfs.mrc.slices.DefaultPartitioningPolicy; +import org.xtreemfs.mrc.slices.SliceInfo; +import org.xtreemfs.mrc.slices.SliceManager; +import org.xtreemfs.mrc.slices.VolumeInfo; +import org.xtreemfs.mrc.utils.Converter; +import org.xtreemfs.mrc.utils.MessageUtils; + +/** + * An implementation of the MRC interface. + * + * @author stender + * + */ +public class Brain { + + protected enum AccessMode { + r, w, x, a, ga, c, t, sr, d + } + + protected enum SysAttrs { + locations, + file_id, + object_type, + url, + owner, + group, + default_sp, + ac_policy_id, + osdsel_policy_id, + osdsel_policy_args, + read_only, + free_space + } + + public static final int FILETYPE_NOTEXIST = 0; + + public static final int FILETYPE_DIR = 1; + + public static final int FILETYPE_FILE = 2; + + public static final String DEFAULT_STRIPING_POLICY = "PLAIN"; + + private final InetSocketAddress dirService; + + private FileAccessManager faMan; + + private OSDStatusManager osdMan; + + private BrainRequestListener requestListener; + + private final SliceManager sliceMan; + + private final DIRClient dirClient; + + private final boolean updateATime; + + private final MRCConfig config; + + private final String authString; + + public Brain(MRCConfig config, DIRClient client, OSDStatusManager osdMan, SliceManager slices, + PolicyContainer policyContainer, String authString) throws BrainException { + + try { + + this.dirClient = client; + this.sliceMan = slices; + this.osdMan = osdMan; + this.config = config; + this.authString = authString; + + dirService = config.getDirectoryService(); + updateATime = !config.isNoAtime(); + faMan = new FileAccessManager(sliceMan, policyContainer); + + } catch (Exception e) { + throw new BrainException(e); + } + } + + public void getProtocolVersion(MRCRequest request, List proposedVersions) + throws BrainException { + + long result = VersionManagement.getMatchingProtVers(proposedVersions); + if (result == -1) + throw new BrainException("No matching protocol version found. Server supports " + + VersionManagement.getSupportedProtVersAsString()); + + MessageUtils.marshallResponse(request, result); + this.notifyRequestListener(request); + } + + public void createVolume(MRCRequest request, String volumeName) throws BrainException, + UserException { + + HashMap simpleSP = new HashMap(); + simpleSP.put("policy", "RAID0"); + simpleSP.put("stripe-size", Long.valueOf(64)); + simpleSP.put("width", Long.valueOf(1)); + + createVolume(request, volumeName, RandomSelectionPolicy.POLICY_ID, simpleSP, + YesToAnyoneFileAccessPolicy.POLICY_ID, DefaultPartitioningPolicy.POLICY_ID, null); + } + + public void createVolume(MRCRequest request, String volumeName, long osdSelectionPolicyId, + Map defaultStripingPolicy, long acPolicyId, long partitioningPolicyId, + Map acl) throws BrainException, UserException { + + // first, check whether the given policies are supported + + if (osdMan.getOSDSelectionPolicy(osdSelectionPolicyId) == null) + throw new UserException(ErrNo.EINVAL, "invalid OSD selection policy ID: " + + osdSelectionPolicyId); + + if (faMan.getFileAccessPolicy(acPolicyId) == null) + throw new UserException(ErrNo.EINVAL, "invalid file access policy ID: " + acPolicyId); + + // in order to allow volume creation in a single-threaded non-blocking + // manner, it needs to be performed in two steps: + // * first, the volume is registered with the directory service needs + // * when registration has been confirmed at the directory service, + // request processing is continued with createVolumeStep2 + + try { + + String volumeId = SliceManager.generateNewVolumeId(); + VolumeInfo vol = new VolumeInfo(volumeId, volumeName, acPolicyId, osdSelectionPolicyId, + partitioningPolicyId, true); + + request.details.context = new HashMap(); + request.details.context.put("nextMethod", "createVolumeStep2"); + request.details.context.put("volumeInfo", vol); + request.details.context.put("defaultStripingPolicy", defaultStripingPolicy); + request.details.context.put("acl", acl); + request.details.context.put("volumeId", volumeId); + + Map queryMap = new HashMap(); + queryMap.put("name", volumeName); + List attrs = new LinkedList(); + attrs.add("version"); + + List args = new LinkedList(); + args.add(queryMap); + args.add(attrs); + + // check whether a volume with the same name has already been + // registered at the Directory Service + BrainHelper.submitRequest(this, request, dirService, "getEntities", args, authString); + + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public void createVolumeStep2(MRCRequest request) throws BrainException { + + try { + + // check the response; if a volume with the same name has already + // been registered, throw an exception + Map> response = (Map>) BrainHelper + .parseResponse(request.sr); + + // check if the volume already exists + if (!response.isEmpty()) { + + String uuid = response.keySet().iterator().next(); + throw new UserException(ErrNo.EEXIST, "volume '" + + request.details.context.get("volumeName") + + "' already exists in Directory Service, id='" + uuid + "'"); + } + + // otherwise, register the volume at the Directory Service + request.details.context.put("nextMethod", "createVolumeStep3"); + + VolumeInfo vol = (VolumeInfo) request.details.context.get("volumeInfo"); + Map dsVolumeInfo = BrainHelper.createDSVolumeInfo(vol, osdMan, config + .getUUID().toString()); + + List args = new LinkedList(); + args.add(vol.getId()); + args.add(dsVolumeInfo); + args.add(0L); + + BrainHelper + .submitRequest(this, request, dirService, "registerEntity", args, authString); + + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public void createVolumeStep3(MRCRequest request) throws BrainException { + + try { + + // check whether an exception has occured; if so, an exception is + // thrown when trying to parse the response + BrainHelper.parseResponse(request.sr); + + // get arguments from context + VolumeInfo vol = (VolumeInfo) request.details.context.get("volumeInfo"); + Map defaultStripingPolicy = (Map) request.details.context + .get("defaultStripingPolicy"); + Map acl = (Map) request.details.context.get("acl"); + + // perform the local volume creation + createVolumeLocally(request, vol.getName(), vol.getId(), vol.getOsdPolicyId(), + defaultStripingPolicy, vol.getAcPolicyId(), vol.getPartitioningPolicyId(), acl); + + // prepare the request for the log replay + List args = new ArrayList(2); + args.add(vol.getName()); + args.add(vol.getId()); + args.add(vol.getOsdPolicyId()); + args.add(defaultStripingPolicy); + args.add(vol.getAcPolicyId()); + args.add(vol.getPartitioningPolicyId()); + args.add(acl); + ReusableBuffer body = ReusableBuffer.wrap(JSONParser.writeJSON(args).getBytes( + HTTPUtils.ENC_UTF8)); + + request.getPinkyRequest().setURIAndBody("createVolumeLocally", body); + request.details.persistentOperation = true; + + MessageUtils.marshallResponse(request, null); + this.notifyRequestListener(request); + + } catch (Exception exc) { + // FIXME: roll back DIR registration + throw new BrainException(exc); + } + } + + public void createVolumeLocally(MRCRequest request, String volumeName, String volumeId, + long osdSelectionPolicyId, Map defaultStripingPolicy, long acPolicyId, + long partitioningPolicyId, Map acl) throws BrainException, UserException { + + try { + + // create the volume + VolumeInfo volume = sliceMan.createVolume(volumeId, volumeName, acPolicyId, + osdSelectionPolicyId, null, partitioningPolicyId, true, true); + + // if no volume ACL has been set, use the default volume ACL + // returned by the access control manager + if (acl == null) + acl = faMan.createDefaultVolumeACL(volume.getId()); + + StorageManager sMan = sliceMan.getSliceDB(volume.getId(), "/", 'r'); + + // set slice to ONLINE + SliceInfo info = sliceMan.getSliceInfo(sMan.getSliceId()); + assert (info != null); + info.setStatus(SliceInfo.SliceStatus.ONLINE); + + // set the volume ACL + request.details.authorized = true; + request.details.sliceId = sMan.getSliceId(); + doChangeOwner(request, volumeName, request.details.userId, request.details.groupIds + .get(0), false); + doSetACLEntries(request, volumeName, acl, false); + doSetStripingPolicy(request, volumeName, defaultStripingPolicy, false); + + } catch (Exception exc) { + throw new BrainException(exc); + } + + } + + public void deleteVolume(MRCRequest request, String name) throws BrainException, UserException { + + try { + + // check whether the volume exists locally + VolumeInfo volume = sliceMan.getVolumeByName(name); + + // check whether privileged permissions are granted for deleting the + // volume + faMan.checkPrivilegedPermissions(volume.getId(), 1, request.details.userId, + request.details.superUser, request.details.groupIds); + + request.details.context = new HashMap(); + request.details.context.put("nextMethod", "deleteVolumeStep2"); + request.details.context.put("volume", volume); + + List args = new LinkedList(); + args.add(volume.getId()); + + BrainHelper.submitRequest(this, request, dirService, "deregisterEntity", args, + authString); + + } catch (UserException exc) { + throw exc; + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public void deleteVolumeStep2(MRCRequest request) throws BrainException, UserException { + + try { + + // check whether an exception has occured; if so, an exception is + // thrown when trying to parse the response + BrainHelper.parseResponse(request.sr); + + VolumeInfo volume = (VolumeInfo) request.details.context.get("volume"); + + deleteVolumeLocally(request, volume.getName()); + + request.details.sliceId = volume.getSlices().iterator().next().sliceID; + request.getPinkyRequest().requestURI = "deleteVolumeLocally"; + request.details.persistentOperation = true; + + MessageUtils.marshallResponse(request, null); + this.notifyRequestListener(request); + + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public void deleteVolumeLocally(MRCRequest request, String volumeName) throws BrainException { + + try { + // delete the volume from the local slice manager + sliceMan.deleteVolume(volumeName); + + } catch (Exception exc) { + throw new BrainException(exc); + } + + } + + public void createDir(MRCRequest request, String dirPath) throws UserException, BrainException { + createDir(request, dirPath, null, 511L); + } + + public void createDir(MRCRequest request, String dirPath, Map xAttrs, long mode) + throws UserException, BrainException { + + try { + + Path p = new Path(dirPath); + VolumeInfo volume = sliceMan.getVolumeByName(p.getVolumeName()); + StorageManager sMan = sliceMan.getSliceDB(volume.getId(), p.getPathWithoutVolume(), + request.syncPseudoRequest ? '*' : 'w'); + + // find the parent directory + AbstractFileEntity parentDir = sMan.getFileEntity(p.getInnerPath(), true); + + if (!request.details.authorized) { + + // check whether the parent directory is searchable + faMan.checkSearchPermission(volume.getId(), p.getInnerPath(), + request.details.userId, request.details.superUser, request.details.groupIds); + + // check whether the parent directory grants write access + faMan.checkPermission(FileAccessManager.WRITE_ACCESS, volume.getId(), parentDir + .getId(), 0, request.details.userId, request.details.superUser, + request.details.groupIds); + } + + if (sMan.fileExists(parentDir.getId(), p.getLastPathComponent())) + throw new UserException(ErrNo.EEXIST, "file or directory '" + dirPath + + "' already exists"); + + // convert the given access mode to an ACL + Map acl = faMan.convertToACL(volume.getId(), mode); + + // create the metadata object + long fileId = sMan.createFile(null, request.details.userId, request.details.groupIds + .get(0), null, true, acl); + + // link the metadata object to the given parent directory + sMan.linkFile(p.getLastPathComponent(), fileId, parentDir.getId()); + + // create the extended attributes + sMan.addXAttributes(fileId, xAttrs); + + // update POSIX timestamps of parent directory + sMan.updateFileTimes(parentDir.getId(), false, true, true); + + request.details.sliceId = sMan.getSliceId(); + request.details.persistentOperation = true; + + MessageUtils.marshallResponse(request, null); + this.notifyRequestListener(request); + + } catch (UserException exc) { + throw exc; + } catch (BrainException exc) { + throw exc; + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public void createFile(MRCRequest request, String filePath) throws UserException, + BrainException { + createFile(request, filePath, null, null, 511L, false, null); + } + + public void createFile(MRCRequest request, String filePath, Map xAttrs, + Map stripingPolicy, long mode) throws UserException, BrainException { + createFile(request, filePath, xAttrs, stripingPolicy, mode, false, null); + } + + public void createFile(MRCRequest request, String filePath, Map xAttrs, + Map stripingPolicy, long mode, boolean open) throws UserException, + BrainException { + createFile(request, filePath, xAttrs, stripingPolicy, mode, open, null); + } + + public void createFile(MRCRequest request, String filePath, Map xAttrs, + Map stripingPolicy, long mode, boolean open, List assignedXLocList) + throws UserException, BrainException { + + try { + + Path p = new Path(filePath); + VolumeInfo volume = sliceMan.getVolumeByName(p.getVolumeName()); + StorageManager sMan = sliceMan.getSliceDB(volume.getId(), p.getPathWithoutVolume(), + request.syncPseudoRequest ? '*' : 'w'); + + // find the parent directory + AbstractFileEntity parentDir = sMan.getFileEntity(p.getInnerPath(), true); + + if (!request.details.authorized) { + + // check whether the parent directory is searchable + faMan.checkSearchPermission(volume.getId(), p.getInnerPath(), + request.details.userId, request.details.superUser, request.details.groupIds); + + // check whether the parent directory grants write access + faMan.checkPermission(FileAccessManager.WRITE_ACCESS, volume.getId(), parentDir + .getId(), 0, request.details.userId, request.details.superUser, + request.details.groupIds); + } + + if (p.getPathWithoutVolume().length() == 0 + || sMan.fileExists(parentDir.getId(), p.getLastPathComponent())) + throw new UserException(ErrNo.EEXIST, "file or directory '" + filePath + + "' already exists"); + + // derive the ACL for the file in accordance with the volume's file + // access policy + Map acl = faMan.convertToACL(volume.getId(), mode); + + // create the metadata object + long fileId = sMan.createFile(null, request.details.userId, request.details.groupIds + .get(0), stripingPolicy, false, acl); + + // link the metadata object to the given parent directory + sMan.linkFile(p.getLastPathComponent(), fileId, parentDir.getId()); + + // create the user attributes + sMan.addXAttributes(fileId, xAttrs); + + HTTPHeaders headers = null; + + if (open) { + // create a capability for O_CREAT open calls + String capability = BrainHelper.createCapability(AccessMode.w.toString(), + volume.getId(), fileId, 0, config.getCapabilitySecret()).toString(); + + XLocationsList xLocList = null; + if (assignedXLocList == null) { + // assign a new list + xLocList = BrainHelper.createXLocList(null, sMan, osdMan, p, fileId, parentDir + .getId(), volume, request.getPinkyRequest().getClientAddress()); + } else { + // log replay, use assigned list + xLocList = Converter.listToXLocList(assignedXLocList); + } + + // assign the OSDs + sMan.setXLocationsList(sMan.getFileEntity(p.getPathWithoutVolume()).getId(), + xLocList); + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, "assigned xloc list to " + p + + ": " + xLocList); + + headers = BrainHelper.createXCapHeaders(capability, xLocList); + + if (assignedXLocList == null) { + // not necessary when in log replay mode! + // rewrite body + // prepare the request for the log replay + List args = new ArrayList(5); + args.add(filePath); + args.add(xAttrs); + args.add(stripingPolicy); + args.add(mode); + args.add(true); + args.add(Converter.xLocListToList(xLocList)); + + ReusableBuffer body = ReusableBuffer.wrap(JSONParser.writeJSON(args).getBytes( + HTTPUtils.ENC_UTF8)); + + request.getPinkyRequest().setURIAndBody("createFile", body); + } + } + + // update POSIX timestamps of parent directory + sMan.updateFileTimes(parentDir.getId(), false, true, true); + + request.details.sliceId = sMan.getSliceId(); + request.details.persistentOperation = true; + + MessageUtils.marshallResponse(request, null, headers); + this.notifyRequestListener(request); + + } catch (UserException exc) { + throw exc; + } catch (BrainException exc) { + throw exc; + } catch (Exception exc) { + Logging.logMessage(Logging.LEVEL_ERROR, this, exc); + throw new BrainException(exc); + } + } + + public void createSymbolicLink(MRCRequest request, String linkPath, String targetPath) + throws UserException, BrainException { + + try { + + Path p = new Path(linkPath); + VolumeInfo volume = sliceMan.getVolumeByName(p.getVolumeName()); + StorageManager sMan = sliceMan.getSliceDB(volume.getId(), p.getPathWithoutVolume(), + request.syncPseudoRequest ? '*' : 'w'); + + // find the parent directory + AbstractFileEntity parentDir = sMan.getFileEntity(p.getInnerPath(), true); + + if (!request.details.authorized) { + + // check whether the parent directory is searchable + faMan.checkSearchPermission(volume.getId(), p.getInnerPath(), + request.details.userId, request.details.superUser, request.details.groupIds); + + // check whether the parent directory grants write access + faMan.checkPermission(FileAccessManager.WRITE_ACCESS, volume.getId(), parentDir + .getId(), 0, request.details.userId, request.details.superUser, + request.details.groupIds); + } + + if (sMan.fileExists(parentDir.getId(), p.getLastPathComponent())) + throw new UserException(ErrNo.EEXIST, "file '" + linkPath + "' already exists"); + + // TODO: check whether the target path refers to a file or a + // directory + boolean isDirectory = false; + + // create an ACL with all permissions for anyone + Map acl = faMan.convertToACL(volume.getId(), 0777); + + // create the metadata object + long fileId = sMan.createFile(targetPath, request.details.userId, + request.details.groupIds.get(0), null, isDirectory, acl); + + // link the metadata object to the given parent directory + sMan.linkFile(p.getLastPathComponent(), fileId, parentDir.getId()); + + // update POSIX timestamps of parent directory + sMan.updateFileTimes(parentDir.getId(), false, true, true); + + request.details.sliceId = sMan.getSliceId(); + request.details.persistentOperation = true; + + MessageUtils.marshallResponse(request, null); + this.notifyRequestListener(request); + + } catch (UserException exc) { + throw exc; + } catch (BrainException exc) { + throw exc; + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public void createLink(MRCRequest request, String linkPath, String targetPath) + throws UserException, BrainException { + + try { + + Path lPath = new Path(linkPath); + Path tPath = new Path(targetPath); + + if (!lPath.getVolumeName().equals(tPath.getVolumeName())) + throw new UserException(ErrNo.EXDEV, + "cannot create hard links across volume boundaries"); + + VolumeInfo volume = sliceMan.getVolumeByName(lPath.getVolumeName()); + StorageManager sMan = sliceMan.getSliceDB(volume.getId(), lPath.getPathWithoutVolume(), + request.syncPseudoRequest ? '*' : 'w'); + + // find the parent directory + AbstractFileEntity linkParentDir = sMan.getFileEntity(lPath.getInnerPath(), true); + + if (!request.details.authorized) { + + // check whether the parent directory is searchable + faMan.checkSearchPermission(volume.getId(), lPath.getInnerPath(), + request.details.userId, request.details.superUser, request.details.groupIds); + + // check whether the parent directory grants write access + faMan.checkPermission(FileAccessManager.WRITE_ACCESS, volume.getId(), linkParentDir + .getId(), 0, request.details.userId, request.details.superUser, + request.details.groupIds); + } + + if (sMan.fileExists(linkParentDir.getId(), lPath.getLastPathComponent())) + throw new UserException(ErrNo.EEXIST, "file '" + linkPath + "' already exists"); + + AbstractFileEntity targetParentDir = sMan.getFileEntity(tPath.getInnerPath()); + + if (!request.details.authorized) { + // check whether the target's parent directory is searchable + faMan.checkSearchPermission(volume.getId(), tPath.getInnerPath(), + request.details.userId, request.details.superUser, request.details.groupIds); + } + + AbstractFileEntity target = sMan.getChild(tPath.getLastPathComponent(), targetParentDir + .getId()); + if (target instanceof DirEntity) + throw new UserException(ErrNo.EPERM, "no support for links to directories"); + + if (!request.details.authorized) { + // check whether the target grants write access + faMan.checkPermission(FileAccessManager.WRITE_ACCESS, volume.getId(), target + .getId(), targetParentDir.getId(), request.details.userId, + request.details.superUser, request.details.groupIds); + } + + // create the link + sMan.linkFile(lPath.getLastPathComponent(), target.getId(), linkParentDir.getId()); + + // update POSIX timestamps + sMan.updateFileTimes(linkParentDir.getId(), false, true, true); + sMan.updateFileTimes(target.getId(), false, true, false); + + request.details.sliceId = sMan.getSliceId(); + request.details.persistentOperation = true; + + MessageUtils.marshallResponse(request, null); + this.notifyRequestListener(request); + + } catch (UserException exc) { + throw exc; + } catch (BrainException exc) { + throw exc; + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public void delete(MRCRequest request, String path) throws UserException, BrainException { + + try { + + Path p = new Path(path); + VolumeInfo volume = sliceMan.getVolumeByName(p.getVolumeName()); + StorageManager sMan = sliceMan.getSliceDB(volume.getId(), p.getPathWithoutVolume(), + request.syncPseudoRequest ? '*' : 'w'); + + AbstractFileEntity parentDir = sMan.getFileEntity(p.getInnerPath()); + + if (!request.details.authorized) { + + // check whether the parent directory is searchable + faMan.checkSearchPermission(volume.getId(), p.getInnerPath(), + request.details.userId, request.details.superUser, request.details.groupIds); + + // check whether the parent directory grants write access + faMan.checkPermission(FileAccessManager.WRITE_ACCESS, volume.getId(), parentDir + .getId(), 0, request.details.userId, request.details.superUser, + request.details.groupIds); + } + + AbstractFileEntity file = sMan.getChild(p.getLastPathComponent(), parentDir.getId()); + if (file == null) + throw new UserException(ErrNo.ENOENT, "file or directory '" + + p.getLastPathComponent() + "' does not exist"); + + if (!request.details.authorized) { + // check whether the entry itself can be deleted (this is e.g. + // important w/ POSIX access control if the sticky bit is set) + faMan.checkPermission(FileAccessManager.RM_MV_IN_DIR_ACCESS, volume.getId(), file + .getId(), parentDir.getId(), request.details.userId, + request.details.superUser, request.details.groupIds); + } + + if (file.isDirectory() && sMan.hasChildren(file.getId())) + throw new UserException(ErrNo.ENOTEMPTY, "'" + path + "' is not empty"); + + HTTPHeaders xCapHeaders = null; + + // unless the file is a directory, retrieve X-headers for file + // deletion on OSDs; if the request was authorized before, + // assume that a capability has been issued already. + if (!request.details.authorized && !file.isDirectory()) { + + // obtain a deletion capability for the file + String aMode = faMan.translateAccessMode(volume.getId(), + FileAccessManager.DELETE_ACCESS); + String capability = BrainHelper.createCapability(aMode, volume.getId(), + file.getId(), Integer.MAX_VALUE, config.getCapabilitySecret()).toString(); + + // set the XCapability and XLocationsList headers + xCapHeaders = BrainHelper.createXCapHeaders(capability, sMan.getXLocationsList(file + .getId())); + } + + // unlink the file; if there are still links to the file, reset the + // X-headers to null, as the file content must not be deleted + long linkCount = sMan.unlinkFile(p.getLastPathComponent(), file.getId(), parentDir + .getId()); + if (linkCount > 0) + xCapHeaders = null; + + // update POSIX timestamps of parent directory + sMan.updateFileTimes(parentDir.getId(), false, true, true); + + request.details.sliceId = sMan.getSliceId(); + request.details.persistentOperation = true; + + MessageUtils.marshallResponse(request, null, xCapHeaders); + this.notifyRequestListener(request); + + } catch (UserException exc) { + throw exc; + } catch (BrainException exc) { + throw exc; + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + /** + * This method has the semantics of the POSIX 'rename' operation. It can be + * used to rename or move files and directories inside a volume, across + * volume boundaries or even between MRCs. + * + * In the first step, 'move' checks whether the source path points to an + * existing file or directory. If this is the case, the method checks + * whether the target volume is the same as the source volume. If this is + * not the case, the movement operation itself will be executed by the + * 'interVolumeMove' method. + * + * Note that inter-volume moves may be fairly expensive if large directory + * structures are moved, since the entire directory subtree has to be + * transferred to and restored on the target site. + * + * @param request + * @param sourcePath + * @param targetPath + * @throws UserException + * @throws BrainException + */ + public void move(MRCRequest request, String sourcePath, String targetPath) + throws UserException, BrainException { + + try { + + Path sPath = new Path(sourcePath); + VolumeInfo volume = sliceMan.getVolumeByName(sPath.getVolumeName()); + StorageManager sMan = sliceMan.getSliceDB(volume.getId(), sPath.getPathWithoutVolume(), + 'w'); + + request.details.sliceId = sMan.getSliceId(); + request.details.persistentOperation = true; + + Path tPath = new Path(targetPath); + + // find out what the source path refers to (1 = directory, 2 = file) + AbstractFileEntity sourceParentDir = sMan.getFileEntity(sPath.getInnerPath()); + + if (sPath.getLastPathComponent() == null) + throw new UserException(ErrNo.ENOENT, "cannot move a volume"); + + if (!tPath.getVolumeName().equals(sPath.getVolumeName())) + throw new UserException(ErrNo.ENOENT, "cannot move between volumes"); + + if (!request.details.authorized) { + + // check whether the parent directory of the source file is + // searchable + faMan.checkSearchPermission(volume.getId(), sPath.getInnerPath(), + request.details.userId, request.details.superUser, request.details.groupIds); + + // check whether the parent directory of the source file grants + // write access + faMan.checkPermission(FileAccessManager.WRITE_ACCESS, volume.getId(), + sourceParentDir.getId(), 0, request.details.userId, request.details.superUser, + request.details.groupIds); + } + + AbstractFileEntity source = sMan.getChild(sPath.getLastPathComponent(), sourceParentDir + .getId()); + if (source == null) + throw new UserException(ErrNo.ENOENT, "file or directory '" + + sPath.getLastPathComponent() + "' does not exist"); + + if (!request.details.authorized) { + // check whether the entry itself can be moved (this is e.g. + // important w/ POSIX access control if the sticky bit is set) + faMan.checkPermission(FileAccessManager.RM_MV_IN_DIR_ACCESS, volume.getId(), source + .getId(), sourceParentDir.getId(), request.details.userId, + request.details.superUser, request.details.groupIds); + } + + int sourceType = source.isDirectory() ? FILETYPE_DIR : FILETYPE_FILE; + + // if the target path refers to a different volume, perform an + // inter-volume move + // if (!tPath.getVolumeName().equals(sPath.getVolumeName())) { + // interVolumeMove(request, sMan, source, tPath, sliceMan + // .hasVolume(tPath.getVolumeName())); + // return; + // } + + // find out what the target path refers to (0 = does not exist, 1 = + // directory, 2 = file) + AbstractFileEntity targetParentDir = sMan.getFileEntity(tPath.getInnerPath(), true); + AbstractFileEntity tChild = sMan.getChild(tPath.getLastPathComponent(), targetParentDir + .getId()); + int targetType = tPath.getPathWithoutVolume().length() == 0 ? FILETYPE_DIR + : tChild == null ? FILETYPE_NOTEXIST : tChild.isDirectory() ? FILETYPE_DIR + : FILETYPE_FILE; + + // if both the old and the new directory point to the same + // entity, do nothing + if (sPath.toString().equals(tPath.toString())) { + MessageUtils.marshallResponse(request, null); + this.notifyRequestListener(request); + return; + } + + if (!request.details.authorized) { + + // check whether the parent directory of the target file is + // searchable + faMan.checkSearchPermission(volume.getId(), tPath.getInnerPath(), + request.details.userId, request.details.superUser, request.details.groupIds); + + // check whether the parent directory of the target file + // grants write access + faMan.checkPermission(FileAccessManager.WRITE_ACCESS, volume.getId(), + targetParentDir.getId(), 0, request.details.userId, request.details.superUser, + request.details.groupIds); + } + + HTTPHeaders xCapHeaders = null; + + // if the source is a directory + if (sourceType == FILETYPE_DIR) { + + // check whether the target is a subdirectory of the + // source directory; if so, throw an exception + if (targetPath.startsWith(sourcePath + "/")) + throw new UserException(ErrNo.EINVAL, "cannot move '" + sourcePath + + "' to one of its own subdirectories"); + + switch (targetType) { + + case FILETYPE_NOTEXIST: // target does not exist + { + // relink the metadata object to the parent directory of + // the target path and remove the former link + sMan.linkFile(tPath.getLastPathComponent(), source.getId(), targetParentDir + .getId()); + sMan.unlinkFile(sPath.getLastPathComponent(), source.getId(), sourceParentDir + .getId()); + + break; + } + + case FILETYPE_DIR: // target is a directory + { + // unlink the target directory if existing + + if (!request.details.authorized) { + // check whether the target directory may be overwritten + faMan.checkPermission(FileAccessManager.DELETE_ACCESS, volume.getId(), + tChild.getId(), targetParentDir.getId(), request.details.userId, + request.details.superUser, request.details.groupIds); + } + + if (sMan.hasChildren(tChild.getId())) + throw new UserException(ErrNo.ENOTEMPTY, "target directory '" + targetPath + + "' is not empty"); + else + sMan.unlinkFile(tPath.getLastPathComponent(), tChild.getId(), + targetParentDir.getId()); + + // relink the metadata object to the parent directory of + // the target path and remove the former link + sMan.linkFile(tPath.getLastPathComponent(), source.getId(), targetParentDir + .getId()); + sMan.unlinkFile(sPath.getLastPathComponent(), source.getId(), sourceParentDir + .getId()); + + break; + } + + case FILETYPE_FILE: // target is a file + throw new UserException(ErrNo.ENOTDIR, "cannot rename directory '" + sourcePath + + "' to file '" + targetPath + "'"); + + } + + } + + // if the source is a file + else { + + switch (targetType) { + + case FILETYPE_NOTEXIST: // target does not exist + { + + // relink the metadata object to the parent directory of + // the target path and remove the former link + sMan.linkFile(tPath.getLastPathComponent(), source.getId(), targetParentDir + .getId()); + sMan.unlinkFile(sPath.getLastPathComponent(), source.getId(), sourceParentDir + .getId()); + + break; + } + + case FILETYPE_DIR: // target is a directory + { + throw new UserException(ErrNo.EISDIR, "cannot rename file '" + sourcePath + + "' to directory '" + targetPath + "'"); + } + + case FILETYPE_FILE: // target is a file + { + + if (!request.details.authorized) { + + // obtain a deletion capability for the file + String aMode = faMan.translateAccessMode(volume.getId(), + FileAccessManager.DELETE_ACCESS); + String capability = BrainHelper.createCapability(aMode, volume.getId(), + tChild.getId(), Integer.MAX_VALUE, config.getCapabilitySecret()) + .toString(); + + // set the XCapability and XLocationsList headers + xCapHeaders = BrainHelper.createXCapHeaders(capability, sMan + .getXLocationsList(tChild.getId())); + } + + // unlink the target file + long linkCount = sMan.unlinkFile(tPath.getLastPathComponent(), tChild.getId(), + targetParentDir.getId()); + + // reset the x-header to null if there is still another link + // to the metadata object, i.e. the metadata object must not + // be deleted yet + if (linkCount > 0) + xCapHeaders = null; + + // relink the metadata object to the parent directory of + // the target path and remove the former link + sMan.linkFile(tPath.getLastPathComponent(), source.getId(), targetParentDir + .getId()); + sMan.unlinkFile(sPath.getLastPathComponent(), source.getId(), sourceParentDir + .getId()); + + break; + } + + } + } + + // update POSIX timestamps of parent directories + sMan.updateFileTimes(sourceParentDir.getId(), false, true, true); + sMan.updateFileTimes(targetParentDir.getId(), false, true, true); + + MessageUtils.marshallResponse(request, null, xCapHeaders); + this.notifyRequestListener(request); + + } catch (UserException exc) { + throw exc; + } catch (BrainException exc) { + throw exc; + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + /** + * Restore the MetaData for the given fileID. + * + * @param request + * @param filePath + * @param fileNumber + * @param fileSize + * @param xAttrs + * @param osd + * @param objectSize + * @param volumeID + * + * @throws UserException + * @throws BrainException + */ + public void restoreFile(MRCRequest request, String filePath, long fileNumber, long fileSize, + Map xAttrs, String osd, long objectSize, String volumeID) throws UserException, + BrainException { + try { + VolumeInfo volume = getVolumeData(volumeID); + String path = volume.getName()+"/"+filePath; + Path p = new Path(path); + + StorageManager sMan = sliceMan.getSliceDB(volumeID, p.getPathWithoutVolume(), + request.syncPseudoRequest ? '*' : 'w'); + + // find the parent directory + AbstractFileEntity parentDir = sMan.getFileEntity("/", true); + + // derive the ACL for the file in accordance with the volume's file + // access policy + Map acl = faMan.convertToACL(volumeID, 511L); + + if (!request.details.authorized) { + + // check whether the parent directory is searchable + faMan.checkSearchPermission(volumeID, "/", + request.details.userId, request.details.superUser, request.details.groupIds); + + // check whether the parent directory grants write access + faMan.checkPermission(FileAccessManager.WRITE_ACCESS, volumeID, parentDir + .getId(), 0, request.details.userId, request.details.superUser, + request.details.groupIds); + } + + long lostFoundID = 0L; + try{ + lostFoundID = sMan.getFileEntity(filePath).getId(); + }catch (UserException ue){ + // create lost and found DIR, if necessary + lostFoundID = sMan.createFile(null, request.details.userId, request.details.groupIds + .get(0), null, true, acl); + + // link the metadata object to the given parent directory + sMan.linkFile(filePath, lostFoundID, parentDir.getId()); + } + + long size = (objectSize<1024L ? 1L : (objectSize % 1024L != 0L) ? objectSize/1024L+1L : objectSize/1024L); + + // make a new xlocl + XLocationsList xloc = new XLocationsList(new XLocation[] { new XLocation( + new StripingPolicy("RAID0", size, 1L), (new String[] { osd })) }, 0L); + + // generate the metadata + long time = System.currentTimeMillis(); + AbstractFileEntity file = new FileEntity(fileNumber, request.details.userId, + request.details.groupIds.get(0), time, time, time, fileSize, xloc, Converter + .mapToACL(acl), 0L, 0L, 0L); + + // create the metadata object + sMan.createFile(file, null); + + // link the metadata object to the given parent directory + sMan.linkFile(volumeID + ":" + fileNumber, fileNumber, lostFoundID); + + // create the user attributes + sMan.addXAttributes(fileNumber, xAttrs); + + // log entry + request.details.sliceId = sMan.getSliceId(); + request.details.persistentOperation = true; + + // return + MessageUtils.marshallResponse(request, null); + this.notifyRequestListener(request); + + } catch (BackendException e) { + Logging.logMessage(Logging.LEVEL_ERROR, this, e); + throw new BrainException(e); + } + } + + public void stat(MRCRequest request, String path, boolean inclReplicas, boolean inclXAttrs, + boolean inclACLs) throws UserException, BrainException { + + try { + + Path p = new Path(path); + VolumeInfo volume = sliceMan.getVolumeByName(p.getVolumeName()); + StorageManager sMan = sliceMan + .getSliceDB(volume.getId(), p.getPathWithoutVolume(), 'r'); + + AbstractFileEntity parentDir = sMan.getFileEntity(p.getInnerPath()); + + // check whether the parent directory of the file grants search + // access + if (p.getLastPathComponent().length() != 0) + faMan.checkSearchPermission(volume.getId(), p.getInnerPath(), + request.details.userId, request.details.superUser, request.details.groupIds); + + AbstractFileEntity file = sMan.getChild(p.getLastPathComponent(), parentDir.getId()); + if (file == null) + throw new UserException(ErrNo.ENOENT, "file or directory '" + + p.getLastPathComponent() + "' does not exist"); + + String ref = sMan.getFileReference(file.getId()); + + XLocationsList xLocList = file instanceof FileEntity && inclReplicas ? ((FileEntity) file) + .getXLocationsList() + : null; + + Map xAttrs = null; + if (inclXAttrs) { + xAttrs = sMan.getXAttributes(file.getId()); + if (xAttrs == null) + xAttrs = new HashMap(); + for (SysAttrs attr : SysAttrs.values()) { + String key = "xtreemfs." + attr.toString(); + Object value = BrainHelper.getSysAttrValue(config, sMan, osdMan, volume, p, + file, attr.toString()); + if (!value.equals("")) + xAttrs.put(key, value); + } + } + + ACLEntry[] acl = inclACLs ? file.getAcl() : null; + + Object statInfo = BrainHelper.createStatInfo(faMan, file, ref, volume.getId(), + request.details.userId, request.details.groupIds, xLocList, xAttrs, acl); + + MessageUtils.marshallResponse(request, statInfo); + this.notifyRequestListener(request); + + } catch (UserException exc) { + throw exc; + } catch (BrainException exc) { + throw exc; + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public void getXAttr(MRCRequest request, String path, String attrKey) throws UserException, + BrainException { + + try { + + Path p = new Path(path); + VolumeInfo volume = sliceMan.getVolumeByName(p.getVolumeName()); + StorageManager sMan = sliceMan + .getSliceDB(volume.getId(), p.getPathWithoutVolume(), 'r'); + + AbstractFileEntity parentDir = sMan.getFileEntity(p.getInnerPath()); + + // check whether the parent directory of the file grants search + // access + if (p.getLastPathComponent().length() != 0) + faMan.checkSearchPermission(volume.getId(), p.getInnerPath(), + request.details.userId, request.details.superUser, request.details.groupIds); + + AbstractFileEntity file = sMan.getChild(p.getLastPathComponent(), parentDir.getId()); + if (file == null) + throw new UserException(ErrNo.ENOENT, "file or directory '" + + p.getLastPathComponent() + "' does not exist"); + + String value = null; + + if (attrKey.startsWith("xtreemfs.")) + value = BrainHelper.getSysAttrValue(config, sMan, osdMan, volume, p, file, attrKey + .substring(9)); + else { + value = String.valueOf(sMan.getXAttributes(file.getId()).get(attrKey)); + } + + if (value == null) + value = ""; + + MessageUtils.marshallResponse(request, value); + this.notifyRequestListener(request); + + } catch (UserException exc) { + throw exc; + } catch (BrainException exc) { + throw exc; + } catch (Exception exc) { + throw new BrainException(exc); + } + + } + + public void readDir(MRCRequest request, String path) throws UserException, BrainException { + + try { + + Path p = new Path(path); + VolumeInfo volume = sliceMan.getVolumeByName(p.getVolumeName()); + StorageManager sMan = sliceMan + .getSliceDB(volume.getId(), p.getPathWithoutVolume(), 'r'); + + AbstractFileEntity parentDir = sMan.getFileEntity(p.getInnerPath(), true); + + // check whether the parent directory is searchable + if (p.getLastPathComponent().length() != 0) + faMan.checkSearchPermission(volume.getId(), p.getInnerPath(), + request.details.userId, request.details.superUser, request.details.groupIds); + + AbstractFileEntity dir = sMan.getChild(p.getLastPathComponent(), parentDir.getId()); + if (dir == null) + throw new UserException(ErrNo.ENOENT, "file or directory '" + + p.getLastPathComponent() + "' does not exist"); + + // check whether the directory grants read access + faMan.checkPermission(FileAccessManager.READ_ACCESS, volume.getId(), dir.getId(), + parentDir.getId(), request.details.userId, request.details.superUser, + request.details.groupIds); + + // update POSIX timestamps + if (updateATime) + sMan.updateFileTimes(dir.getId(), true, false, false); + + MessageUtils.marshallResponse(request, sMan.getChildren(dir.getId())); + this.notifyRequestListener(request); + + } catch (UserException exc) { + throw exc; + } catch (BrainException exc) { + throw exc; + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public void readDirAndStat(MRCRequest request, String path) throws UserException, + BrainException { + + try { + + Path p = new Path(path); + VolumeInfo volume = sliceMan.getVolumeByName(p.getVolumeName()); + StorageManager sMan = sliceMan + .getSliceDB(volume.getId(), p.getPathWithoutVolume(), 'r'); + + AbstractFileEntity parentDir = sMan.getFileEntity(p.getInnerPath(), true); + + // check whether the directory is searchable + if (p.getLastPathComponent().length() != 0) + faMan.checkSearchPermission(volume.getId(), p.getInnerPath(), + request.details.userId, request.details.superUser, request.details.groupIds); + + AbstractFileEntity dir = sMan.getChild(p.getLastPathComponent(), parentDir.getId()); + if (dir == null) + throw new UserException(ErrNo.ENOENT, "file or directory '" + + p.getLastPathComponent() + "' does not exist"); + + // check whether the directory grants read access + faMan.checkPermission(FileAccessManager.READ_ACCESS, volume.getId(), dir.getId(), + parentDir.getId(), request.details.userId, request.details.superUser, + request.details.groupIds); + + Map fileData = sMan.getChildData(dir.getId()); + + Map> result = new HashMap>(); + for (String name : fileData.keySet()) { + AbstractFileEntity data = fileData.get(name); + String ref = sMan.getFileReference(data.getId()); + result.put(name, BrainHelper.createStatInfo(faMan, data, ref, volume.getId(), + request.details.userId, request.details.groupIds, null, null, null)); + } + + // update POSIX timestamps + if (updateATime) + sMan.updateFileTimes(dir.getId(), true, false, false); + + MessageUtils.marshallResponse(request, result); + this.notifyRequestListener(request); + + } catch (UserException exc) { + throw exc; + } catch (BrainException exc) { + throw exc; + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public void open(MRCRequest request, String path, String accessMode) throws BrainException, + UserException { + + try { + + Path p = new Path(path); + VolumeInfo volume = sliceMan.getVolumeByName(p.getVolumeName()); + StorageManager sMan = sliceMan + .getSliceDB(volume.getId(), p.getPathWithoutVolume(), 'r'); + + AbstractFileEntity parentDir = sMan.getFileEntity(p.getInnerPath(), true); + + // check whether the parent directory of the source file is + // searchable + faMan.checkSearchPermission(volume.getId(), p.getInnerPath(), request.details.userId, + request.details.superUser, request.details.groupIds); + + AbstractFileEntity file = sMan.getChild(p.getLastPathComponent(), parentDir.getId()); + if (file == null) + throw new UserException(ErrNo.ENOENT, "file '" + p.getLastPathComponent() + + "' does not exist"); + + // if the file refers to a symbolic link, resolve the link + String target = sMan.getFileReference(file.getId()); + if (target != null) { + path = target; + p = new Path(path); + + // if the local MRC is not responsible, send a redirect + if (!sliceMan.hasVolume(p.getVolumeName())) { + MessageUtils.setRedirect(request, target); + this.notifyRequestListener(request); + return; + } + + volume = sliceMan.getVolumeByName(p.getVolumeName()); + sMan = sliceMan.getSliceDB(volume.getId(), p.getPathWithoutVolume(), 'r'); + + file = sMan.getFileEntity(p.getPathWithoutVolume()); + } + + if (file.isDirectory()) + throw new UserException(ErrNo.EISDIR, "open is restricted to files"); + + AccessMode mode = null; + try { + mode = AccessMode.valueOf(accessMode); + } catch (IllegalArgumentException exc) { + throw new UserException(ErrNo.EINVAL, "invalid access mode for 'open': " + + accessMode); + } + + // get the current epoch, use (and increase) the truncate number if + // the open mode is truncate + long epochNo; + FileEntity fileAsFile = (FileEntity) file; + if (mode == AccessMode.t) { + epochNo = fileAsFile.getIssuedEpoch() + 1; + setTruncateEpoch(request, volume.getId(), fileAsFile.getId(), epochNo); + } else + epochNo = fileAsFile.getEpoch(); + + // create the capability; return if the operation fails + String capability = null; + try { + + // check whether the file is marked as 'read-only'; in this + // case, throw an exception if write access is requested + if ((mode == AccessMode.w || mode == AccessMode.a || mode == AccessMode.ga || mode == AccessMode.t) + && sMan.isReadOnly(file.getId())) + throw new UserException(ErrNo.EPERM, "read-only files cannot be written"); + + // check whether the permission is granted + faMan.checkPermission(accessMode, volume.getId(), file.getId(), 0, + request.details.userId, request.details.superUser, request.details.groupIds); + + capability = BrainHelper.createCapability(accessMode, volume.getId(), file.getId(), + epochNo, config.getCapabilitySecret()).toString(); + + } catch (UserException exc) { + + MessageUtils.marshallResponse(request, null); + this.notifyRequestListener(request); + return; + } + + // get the list of replicas associated with the file + XLocationsList xLocList = sMan.getXLocationsList(file.getId()); + + // if no replica exists yet, create one using the default striping + // policy together with a set of feasible OSDs from the OSD status + // manager + if (xLocList == null || xLocList.getReplicas() == null) { + + xLocList = BrainHelper.createXLocList(xLocList, sMan, osdMan, p, file.getId(), + parentDir.getId(), volume, request.getPinkyRequest().getClientAddress()); + + assignOSDs(request, volume.getId(), p.getPathWithoutVolume(), Converter + .xLocListToList(xLocList)); + } + + HTTPHeaders headers = BrainHelper.createXCapHeaders(capability, xLocList); + + // update POSIX timestamps + if (updateATime) + sMan.updateFileTimes(file.getId(), true, false, false); + + MessageUtils.marshallResponse(request, null, headers); + this.notifyRequestListener(request); + + } catch (UserException e) { + throw e; + } catch (BrainException e) { + throw e; + } catch (Exception e) { + throw new BrainException(e); + } + } + + public void renew(MRCRequest request) throws UserException, BrainException { + + try { + + String capString = request.getPinkyRequest().requestHeaders + .getHeader(HTTPHeaders.HDR_XCAPABILITY); + String newsizeString = request.getPinkyRequest().requestHeaders + .getHeader(HTTPHeaders.HDR_XNEWFILESIZE); + + if (capString == null) + throw new UserException("missing " + HTTPHeaders.HDR_XCAPABILITY + " header"); + + Capability cap = new Capability(capString, config.getCapabilitySecret()); + + // check whether the received capability has a valid signature + if (!cap.isValid()) + throw new UserException(capString + " is invalid"); + + // if an X-NewFileSize header has been sent, update the file size if + // necessary ... + if (newsizeString != null) { + + // parse volume and file ID from global file ID + String globalFileId = cap.getFileId(); + int i = globalFileId.indexOf(':'); + String volumeId = cap.getFileId().substring(0, i); + long fileId = Long.parseLong(cap.getFileId().substring(i + 1)); + + StorageManager sMan = sliceMan.getSliceDB(volumeId, fileId, 'r'); + + // if the file still exists, update the file size + if (sMan.getFileEntity(fileId) != null) { + + setFileSize(request, volumeId, fileId, newsizeString); + + request.details.sliceId = sMan.getSliceId(); + request.details.persistentOperation = true; + } + } + + Capability newCap = new Capability(cap.getFileId(), cap.getAccessMode(), cap + .getEpochNo(), config.getCapabilitySecret()); + + HTTPHeaders headers = BrainHelper.createXCapHeaders(newCap.toString(), null); + + MessageUtils.marshallResponse(request, null, headers); + this.notifyRequestListener(request); + + } catch (UserException exc) { + throw exc; + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + /** + * Look up the files identified by volumeID and fileID. + * + * @param request + * + * @param volumeID + * + * @param fileIDs + */ + public void checkFileList(MRCRequest request, String volumeID, List fileIDs) + throws BrainException { + String response = ""; + + try { + if (fileIDs == null || fileIDs.size() == 0) + throw new BackendException("fileList was empty!"); + for (String fileID : fileIDs) { + if (fileID == null) + throw new BackendException("fileID was null!"); + response += (sliceMan.exists(volumeID, fileID.toString())) ? "1" : "0"; + } + } catch (UserException ue) { + response = "2"; + } catch (BackendException be) { + throw new BrainException("checkFileList caused an Exception: " + be.getMessage()); + } + // send an answer to the osd + MessageUtils.marshallResponse(request, response); + this.notifyRequestListener(request); + } + + public void updateFileSize(MRCRequest request) throws BrainException { + + try { + + String capString = request.getPinkyRequest().requestHeaders + .getHeader(HTTPHeaders.HDR_XCAPABILITY); + String newsizeString = request.getPinkyRequest().requestHeaders + .getHeader(HTTPHeaders.HDR_XNEWFILESIZE); + + if (capString == null) + throw new UserException("missing " + HTTPHeaders.HDR_XCAPABILITY + " header"); + if (newsizeString == null) + throw new UserException("missing " + HTTPHeaders.HDR_XNEWFILESIZE + " header"); + + // create a capability object to verify the capability + Capability cap = new Capability(capString, config.getCapabilitySecret()); + + // check whether the received capability has a valid signature + if (!cap.isValid()) + throw new UserException(capString + " is invalid"); + + // parse volume and file ID from global file ID + String globalFileId = cap.getFileId(); + int i = globalFileId.indexOf(':'); + String volumeId = cap.getFileId().substring(0, i); + long fileId = Long.parseLong(cap.getFileId().substring(i + 1)); + StorageManager sMan = sliceMan.getSliceDB(volumeId, fileId, 'r'); + + setFileSize(request, volumeId, fileId, newsizeString); + + request.details.sliceId = sMan.getSliceId(); + request.details.persistentOperation = true; + + MessageUtils.marshallResponse(request, null, null); + this.notifyRequestListener(request); + + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public void setFileSize(MRCRequest request, String volumeId, long fileId, String newsizeString) + throws UserException, BrainException { + + try { + // prepare the request for the log replay + List args = new ArrayList(3); + args.add(volumeId); + args.add(fileId); + args.add(newsizeString); + ReusableBuffer body = ReusableBuffer.wrap(JSONParser.writeJSON(args).getBytes( + HTTPUtils.ENC_UTF8)); + request.getPinkyRequest().setURIAndBody("setFileSize", body); + + StorageManager sMan = sliceMan.getSliceDB(volumeId, fileId, 'w'); + + BrainHelper.updateFileSize(sMan, updateATime, volumeId, fileId, newsizeString); + + } catch (UserException exc) { + throw exc; + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public void setTruncateEpoch(MRCRequest request, String volumeId, long fileId, + long truncateEpoch) throws BrainException { + + try { + // prepare the request for the log replay + List args = new ArrayList(3); + args.add(volumeId); + args.add(fileId); + args.add(truncateEpoch); + ReusableBuffer body = ReusableBuffer.wrap(JSONParser.writeJSON(args).getBytes( + HTTPUtils.ENC_UTF8)); + request.getPinkyRequest().setURIAndBody("setTruncateEpoch", body); + + StorageManager sMan = sliceMan.getSliceDB(volumeId, fileId, 'w'); + FileEntity file = (FileEntity) sMan.getFileEntity(fileId); + sMan.setFileSize(fileId, file.getSize(), file.getEpoch(), truncateEpoch); + + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public void assignOSDs(MRCRequest request, String volumeId, String path, List xLocList) + throws BrainException { + + try { + // prepare the request for the log replay + List args = new ArrayList(3); + args.add(volumeId); + args.add(path); + args.add(xLocList); + ReusableBuffer body = ReusableBuffer.wrap(JSONParser.writeJSON(args).getBytes( + HTTPUtils.ENC_UTF8)); + + // assign the OSDs + StorageManager sMan = sliceMan.getSliceDB(volumeId, path, + request.syncPseudoRequest ? '*' : 'w'); + sMan.setXLocationsList(sMan.getFileEntity(path).getId(), Converter + .listToXLocList(xLocList)); + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, "assigned xloc list to " + path + + ": " + xLocList); + + request.getPinkyRequest().setURIAndBody("assignOSDs", body); + request.details.sliceId = sMan.getSliceId(); + request.details.persistentOperation = true; + + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public void setDefaultStripingPolicy(MRCRequest request, String path, + Map stripingPolicy) throws BrainException { + + try { + + doSetStripingPolicy(request, path, stripingPolicy, true); + + MessageUtils.marshallResponse(request, null); + this.notifyRequestListener(request); + + } catch (Exception exc) { + throw new BrainException(exc); + } + + } + + public void getDefaultStripingPolicy(MRCRequest request, String path) throws UserException, + BackendException, BrainException { + + Path p = new Path(path); + VolumeInfo volume = sliceMan.getVolumeByName(p.getVolumeName()); + StorageManager sMan = sliceMan.getSliceDB(volume.getId(), p.getPathWithoutVolume(), + request.syncPseudoRequest ? '*' : 'w'); + + AbstractFileEntity parentDir = sMan.getFileEntity(p.getInnerPath(), true); + + if (!request.details.authorized) { + // check whether the parent directory of the file grants search + // access + if (p.getLastPathComponent().length() != 0) + faMan.checkSearchPermission(volume.getId(), p.getInnerPath(), + request.details.userId, request.details.superUser, request.details.groupIds); + } + + AbstractFileEntity dir = sMan.getChild(p.getLastPathComponent(), parentDir.getId()); + if (dir == null) + throw new UserException(ErrNo.ENOENT, "directory '" + p.getLastPathComponent() + + "' does not exist"); + + // if the directory refers to a symbolic link, resolve the link + String target = sMan.getFileReference(dir.getId()); + if (target != null) { + path = target; + p = new Path(path); + + // if the local MRC is not responsible, send a redirect + if (!sliceMan.hasVolume(p.getVolumeName())) { + MessageUtils.setRedirect(request, target); + this.notifyRequestListener(request); + return; + } + + volume = sliceMan.getVolumeByName(p.getVolumeName()); + sMan = sliceMan.getSliceDB(volume.getId(), p.getPathWithoutVolume(), 'r'); + + dir = sMan.getFileEntity(p.getPathWithoutVolume()); + } + + if (!dir.isDirectory()) + throw new UserException(ErrNo.ENOTDIR, path + " does not point to a directory"); + + Map sp = Converter.stripingPolicyToMap(sMan.getStripingPolicy(dir.getId())); + + MessageUtils.marshallResponse(request, sp); + this.notifyRequestListener(request); + } + + public void changeAccessMode(MRCRequest request, String path, long mode) throws UserException, + BrainException { + + try { + + Path p = new Path(path); + VolumeInfo volume = sliceMan.getVolumeByName(p.getVolumeName()); + StorageManager sMan = sliceMan.getSliceDB(volume.getId(), p.getPathWithoutVolume(), + request.syncPseudoRequest ? '*' : 'w'); + + AbstractFileEntity parentDir = sMan.getFileEntity(p.getInnerPath()); + + if (!request.details.authorized) { + // check whether the parent directory of the file grants search + // access + if (p.getLastPathComponent().length() != 0) + faMan + .checkSearchPermission(volume.getId(), p.getInnerPath(), + request.details.userId, request.details.superUser, + request.details.groupIds); + } + + AbstractFileEntity file = sMan.getChild(p.getLastPathComponent(), parentDir.getId()); + if (file == null) + throw new UserException(ErrNo.ENOENT, "file or directory '" + + p.getLastPathComponent() + "' does not exist"); + + if (!request.details.authorized) { + // check whether the access mode may be changed + faMan.checkPrivilegedPermissions(volume.getId(), file.getId(), + request.details.userId, request.details.superUser, request.details.groupIds); + } + + // change the access mode + faMan.setPosixAccessMode(volume.getId(), file.getId(), request.details.userId, + request.details.groupIds, mode); + + // update POSIX timestamps + sMan.updateFileTimes(file.getId(), false, true, false); + + request.details.sliceId = sMan.getSliceId(); + request.details.persistentOperation = true; + + MessageUtils.marshallResponse(request, null); + this.notifyRequestListener(request); + + } catch (UserException e) { + throw e; + } catch (BrainException e) { + throw e; + } catch (Exception e) { + throw new BrainException(e); + } + } + + public void changeOwner(MRCRequest request, String path, String userId, String groupId) + throws UserException, BrainException { + + try { + + doChangeOwner(request, path, userId, groupId, true); + + MessageUtils.marshallResponse(request, null); + this.notifyRequestListener(request); + + } catch (Exception exc) { + throw new BrainException(exc); + } + + } + + public void setACLEntries(MRCRequest request, String path, Map aclEntries) + throws BrainException, UserException { + + try { + + doSetACLEntries(request, path, aclEntries, true); + + MessageUtils.marshallResponse(request, null); + this.notifyRequestListener(request); + + } catch (UserException e) { + throw e; + } catch (BrainException e) { + throw e; + } catch (Exception e) { + throw new BrainException(e); + } + + } + + public void removeACLEntries(MRCRequest request, String path, List entities) + throws UserException, BrainException { + + try { + + Path p = new Path(path); + VolumeInfo volume = sliceMan.getVolumeByName(p.getVolumeName()); + StorageManager sMan = sliceMan.getSliceDB(volume.getId(), p.getPathWithoutVolume(), + request.syncPseudoRequest ? '*' : 'w'); + + AbstractFileEntity parentDir = sMan.getFileEntity(p.getInnerPath()); + + if (!request.details.authorized) { + // check whether the parent directory of the file grants search + // access + if (p.getLastPathComponent().length() != 0) + faMan + .checkSearchPermission(volume.getId(), p.getInnerPath(), + request.details.userId, request.details.superUser, + request.details.groupIds); + } + + AbstractFileEntity file = sMan.getChild(p.getLastPathComponent(), parentDir.getId()); + if (file == null) + throw new UserException(ErrNo.ENOENT, "file or directory '" + + p.getLastPathComponent() + "' does not exist"); + + if (!request.details.authorized) { + // check whether the access mode may be changed + faMan.checkPrivilegedPermissions(volume.getId(), file.getId(), + request.details.userId, request.details.superUser, request.details.groupIds); + } + + // change the ACL + faMan.removeACLEntries(volume.getId(), file.getId(), request.details.userId, + request.details.groupIds, entities); + + // update POSIX timestamps + sMan.updateFileTimes(file.getId(), false, true, false); + + request.details.sliceId = sMan.getSliceId(); + request.details.persistentOperation = true; + + MessageUtils.marshallResponse(request, null); + this.notifyRequestListener(request); + + } catch (UserException e) { + throw e; + } catch (BrainException e) { + throw e; + } catch (Exception e) { + throw new BrainException(e); + } + + } + + public void setXAttrs(MRCRequest request, String path, Map xAttrs) + throws UserException, BrainException { + + try { + Path p = new Path(path); + VolumeInfo volume = sliceMan.getVolumeByName(p.getVolumeName()); + StorageManager sMan = sliceMan.getSliceDB(volume.getId(), p.getPathWithoutVolume(), + request.syncPseudoRequest ? '*' : 'w'); + + AbstractFileEntity parentDir = sMan.getFileEntity(p.getInnerPath()); + + if (!request.details.authorized) { + // check whether the parent directory of the file grants search + // access + if (p.getLastPathComponent().length() != 0) + faMan + .checkSearchPermission(volume.getId(), p.getInnerPath(), + request.details.userId, request.details.superUser, + request.details.groupIds); + } + + AbstractFileEntity file = sMan.getChild(p.getLastPathComponent(), parentDir.getId()); + if (file == null) + throw new UserException(ErrNo.ENOENT, "file or directory '" + + p.getLastPathComponent() + "' does not exist"); + + // set all system attributes included in the map + for (String attrKey : new HashSet(xAttrs.keySet())) { + + if (attrKey.startsWith("xtreemfs.")) { + + // check whether the user has privileged permissions to set + // system attributes + faMan + .checkPrivilegedPermissions(volume.getId(), file.getId(), + request.details.userId, request.details.superUser, + request.details.groupIds); + + BrainHelper.setSysAttrValue(sMan, sliceMan, volume, file, attrKey.substring(9), xAttrs + .get(attrKey).toString()); + + xAttrs.remove(attrKey); + } + } + + sMan.addXAttributes(file.getId(), xAttrs); + + // update POSIX timestamps + sMan.updateFileTimes(file.getId(), false, true, false); + + request.details.sliceId = sMan.getSliceId(); + request.details.persistentOperation = true; + + MessageUtils.marshallResponse(request, null); + this.notifyRequestListener(request); + + } catch (UserException exc) { + throw exc; + } catch (BrainException exc) { + throw exc; + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public void removeXAttrs(MRCRequest request, String path, List keys) + throws UserException, BrainException { + + try { + + Path p = new Path(path); + VolumeInfo volume = sliceMan.getVolumeByName(p.getVolumeName()); + StorageManager sMan = sliceMan.getSliceDB(volume.getId(), p.getPathWithoutVolume(), + request.syncPseudoRequest ? '*' : 'w'); + + AbstractFileEntity parentDir = sMan.getFileEntity(p.getInnerPath()); + + if (!request.details.authorized) { + // check whether the parent directory of the file grants search + // access + if (p.getLastPathComponent().length() != 0) + faMan + .checkSearchPermission(volume.getId(), p.getInnerPath(), + request.details.userId, request.details.superUser, + request.details.groupIds); + } + + AbstractFileEntity file = sMan.getChild(p.getLastPathComponent(), parentDir.getId()); + if (file == null) + throw new UserException(ErrNo.ENOENT, "file or directory '" + + p.getLastPathComponent() + "' does not exist"); + + sMan.deleteXAttributes(file.getId(), keys); + + // update POSIX timestamps + sMan.updateFileTimes(file.getId(), false, true, false); + + request.details.sliceId = sMan.getSliceId(); + request.details.persistentOperation = true; + + MessageUtils.marshallResponse(request, null); + this.notifyRequestListener(request); + + } catch (UserException exc) { + throw exc; + } catch (BrainException exc) { + throw exc; + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public void addReplica(MRCRequest request, String globalFileId, + Map stripingPolicy, List osdList) throws BrainException, + UserException { + + try { + + StringTokenizer st = new StringTokenizer(globalFileId, ":"); + if (st.countTokens() != 2) + throw new BrainException( + "invalid global file ID - needs to be as follows: \"volumeIdAsString\":\"fileIdInVolumeAsNumber\""); + String volumeId = st.nextToken(); + long fileId = -1; + try { + fileId = Long.parseLong(st.nextToken()); + } catch (NumberFormatException exc) { + throw new BrainException( + "invalid global file ID - needs to be as follows: \"volumeIdAsString\":\"fileIdInVolumeAsNumber\""); + } + + StorageManager sMan = sliceMan.getSliceDB(volumeId, fileId, + request.syncPseudoRequest ? '*' : 'w'); + + // check whether a striping policy is explicitly assigned to the + // replica; if not, use the one from the file; if none is assigned + // to the file either, use the one from the volume; if the volume + // does not have a default striping policy, throw an exception + StripingPolicy sPol = Converter.mapToStripingPolicy(stripingPolicy); + if (sPol == null) + sPol = sMan.getStripingPolicy(fileId); + if (sPol == null) + sPol = sMan.getVolumeStripingPolicy(); + if (sPol == null) + throw new UserException(ErrNo.EPERM, + "either the replica, the file or the volume need a striping policy"); + + AbstractFileEntity entity = sMan.getFileEntity(fileId); + if (!(entity instanceof FileEntity)) + throw new UserException(ErrNo.EPERM, "replicas may only be added to files"); + + // if the file refers to a symbolic link, resolve the link + String target = sMan.getFileReference(fileId); + if (target != null) { + String path = target; + Path p = new Path(path); + + // if the local MRC is not responsible, send a redirect + if (!sliceMan.hasVolume(p.getVolumeName())) { + MessageUtils.setRedirect(request, target); + this.notifyRequestListener(request); + return; + } + + VolumeInfo volume = sliceMan.getVolumeByName(p.getVolumeName()); + sMan = sliceMan.getSliceDB(volume.getId(), p.getPathWithoutVolume(), 'r'); + + fileId = sMan.getFileEntity(p.getPathWithoutVolume()).getId(); + } + + if (!sMan.isReadOnly(fileId)) + throw new UserException(ErrNo.EPERM, + "the file has to be made read-only before adding replicas"); + + // check whether the new replica relies on a set of OSDs which + // hasn't been used yet + XLocationsList xLocList = sMan.getXLocationsList(fileId); + + if (!BrainHelper.isAddable(xLocList, osdList)) + throw new UserException( + "at least one OSD already used in current X-Locations list '" + + JSONParser.writeJSON(Converter.xLocListToList(xLocList)) + "'"); + + // create a new replica and add it to the client's X-Locations list + // (this will automatically increment the X-Locations list version) + XLocation replica = new XLocation(sPol, osdList.toArray(new String[osdList.size()])); + if (xLocList == null) + xLocList = new XLocationsList(new XLocation[] { replica }, 0); + else + xLocList.addReplica(replica); + sMan.setXLocationsList(fileId, xLocList); + + // update POSIX timestamps + sMan.updateFileTimes(fileId, false, true, false); + + request.details.sliceId = sMan.getSliceId(); + request.details.persistentOperation = true; + + MessageUtils.marshallResponse(request, null); + this.notifyRequestListener(request); + + } catch (UserException exc) { + throw exc; + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public void removeReplica(MRCRequest request, String globalFileId, + Map stripingPolicy, List osdList) throws BrainException, + UserException { + + try { + + StringTokenizer st = new StringTokenizer(globalFileId, ":"); + if (st.countTokens() != 2) + throw new BrainException( + "invalid global file ID - needs to look like this: \"volumeIdAsString\":\"fileIdInVolumeAsNumber\""); + String volumeId = st.nextToken(); + long fileId = -1; + try { + fileId = Long.parseLong(st.nextToken()); + } catch (NumberFormatException exc) { + throw new BrainException( + "invalid global file ID - needs to be as follows: \"volumeIdAsString\":\"fileIdInVolumeAsNumber\""); + } + + StorageManager sMan = sliceMan.getSliceDB(volumeId, fileId, + request.syncPseudoRequest ? '*' : 'w'); + + XLocation repl = new XLocation(Converter.mapToStripingPolicy(stripingPolicy), osdList + .toArray(new String[osdList.size()])); + + // search for the replica in the X-Locations list + XLocationsList xLocList = sMan.getXLocationsList(fileId); + + if (xLocList != null) { + + xLocList.removeReplica(repl); + + sMan.setXLocationsList(fileId, xLocList); + + // update POSIX timestamps + sMan.updateFileTimes(fileId, false, true, false); + } + + request.details.sliceId = sMan.getSliceId(); + request.details.persistentOperation = true; + + MessageUtils.marshallResponse(request, null); + this.notifyRequestListener(request); + + } catch (UserException exc) { + throw exc; + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public void setReplicas(MRCRequest request, String globalFileId, List> replicas) + throws BrainException, UserException { + + try { + + StringTokenizer st = new StringTokenizer(globalFileId, ":"); + if (st.countTokens() != 2) + throw new BrainException( + "invalid global file ID - needs to look like this: \"volumeIdAsString\":\"fileIdInVolumeAsNumber\""); + String volumeId = st.nextToken(); + long fileId = -1; + try { + fileId = Long.parseLong(st.nextToken()); + } catch (NumberFormatException exc) { + throw new BrainException( + "invalid global file ID - needs to be as follows: \"volumeIdAsString\":\"fileIdInVolumeAsNumber\""); + } + + StorageManager sMan = sliceMan.getSliceDB(volumeId, fileId, + request.syncPseudoRequest ? '*' : 'w'); + + // create an array of X-Locations from the given 'replicas' argument + XLocation[] newRepls = null; + if (replicas != null) { + newRepls = new XLocation[replicas.size()]; + + for (int i = 0; i < replicas.size(); i++) { + + Map spol = (Map) replicas.get(i).get(0); + List osds = (List) replicas.get(i).get(0); + + newRepls[i] = new XLocation(Converter.mapToStripingPolicy(spol), osds + .toArray(new String[osds.size()])); + } + } + + // create the new X-Locations list from the given array + XLocationsList xLocList = sMan.getXLocationsList(fileId); + if (newRepls != null) { + + if (xLocList == null) + xLocList = new XLocationsList(newRepls, 0); + else + xLocList = new XLocationsList(newRepls, xLocList.getVersion() + 1); + } + + else if (xLocList != null) + xLocList = new XLocationsList(null, xLocList.getVersion() + 1); + + sMan.setXLocationsList(fileId, null); + + // update POSIX timestamps + sMan.updateFileTimes(fileId, false, true, false); + + request.details.sliceId = sMan.getSliceId(); + request.details.persistentOperation = true; + + MessageUtils.marshallResponse(request, null); + this.notifyRequestListener(request); + + } catch (UserException exc) { + throw exc; + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public void checkAccess(MRCRequest request, String path, String mode) throws BrainException, + UserException { + + try { + Path p = new Path(path); + VolumeInfo volume = sliceMan.getVolumeByName(p.getVolumeName()); + StorageManager sMan = sliceMan + .getSliceDB(volume.getId(), p.getPathWithoutVolume(), 'r'); + + AbstractFileEntity parentDir = sMan.getFileEntity(p.getInnerPath(), true); + + // check whether the parent directory is searchable + if (p.getLastPathComponent().length() != 0) + faMan.checkSearchPermission(volume.getId(), p.getInnerPath(), + request.details.userId, request.details.superUser, request.details.groupIds); + + AbstractFileEntity file = sMan.getChild(p.getLastPathComponent(), parentDir.getId()); + if (file == null) + throw new UserException(ErrNo.ENOENT, "file or directory '" + + p.getLastPathComponent() + "' does not exist"); + + boolean success = false; + try { + for (int i = 0; i < mode.length(); i++) + faMan.checkPermission(mode.substring(i, i + 1), volume.getId(), file.getId(), + parentDir.getId(), request.details.userId, request.details.superUser, + request.details.groupIds); + success = true; + } catch (UserException exc) { + // permission denied + } + + MessageUtils.marshallResponse(request, success); + this.notifyRequestListener(request); + + } catch (UserException exc) { + throw exc; + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public void initFileSystem(MRCRequest request) throws BrainException { + + try { + + if (!request.details.superUser) + throw new UserException(ErrNo.EPERM, + "only superusers can initialize the file system"); + + // deregister all volumes + List volIDs = new LinkedList(); + for (VolumeInfo volume : sliceMan.getVolumes()) + volIDs.add(volume.getId()); + + request.details.context = new HashMap(); + request.details.context.put("volIDs", volIDs); + + // reset the partition manager + sliceMan.reset(); + + initFileSystemStep2(request); + + } catch (Exception e) { + throw new BrainException(e); + } + } + + public void initFileSystemStep2(MRCRequest request) throws BrainException { + + if (request.sr != null) + try { + BrainHelper.parseResponse(request.sr); + } catch (Exception exc) { + Logging.logMessage(Logging.LEVEL_WARN, this, exc); + } + + List volIDs = (List) request.details.context.get("volIDs"); + + // if no more volumes need to be deregistered, send the response to + // the + // client + if (volIDs.size() == 0) { + MessageUtils.marshallResponse(request, null); + this.notifyRequestListener(request); + } + + // otherwise, deregister the next volume + else { + + request.details.context.put("nextMethod", "initFileSystemStep2"); + + String nextVolId = volIDs.remove(0); + List args = new LinkedList(); + args.add(nextVolId); + + BrainHelper.submitRequest(this, request, dirService, "deregisterEntity", args, + authString); + } + + } + + public void query(MRCRequest request, String path, String queryString) throws UserException, + BrainException { + + try { + + Path p = new Path(path); + VolumeInfo volume = sliceMan.getVolumeByName(p.getVolumeName()); + StorageManager sMan = sliceMan + .getSliceDB(volume.getId(), p.getPathWithoutVolume(), 'r'); + + // TODO: access control + + MessageUtils.marshallResponse(request, sMan.submitQuery(p.getPathWithoutVolume(), + queryString)); + this.notifyRequestListener(request); + + } catch (UserException e) { + throw e; + } catch (Exception e) { + throw new BrainException(e); + } + } + + public long getTotalDBSize() throws BrainException { + + try { + long size = 0; + for (SliceID slice : sliceMan.getSliceList()) + size += sliceMan.getSliceDB(slice, 'r').getDBFileSize(); + + return size; + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public long getTotalNumberOfFiles() throws BrainException { + + try { + long count = 0; + for (SliceID slice : sliceMan.getSliceList()) + count += sliceMan.getSliceDB(slice, 'r').getNumberOfFiles(); + + return count; + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public long getTotalNumberOfDirs() throws BrainException { + + try { + long count = 0; + for (SliceID slice : sliceMan.getSliceList()) + count += sliceMan.getSliceDB(slice, 'r').getNumberOfDirs(); + + return count; + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public void shutdown() throws BrainException { + + try { + sliceMan.closeSliceDBs(); + } catch (Exception e) { + throw new BrainException(e); + } + } + + public void setRequestListener(BrainRequestListener listener) { + requestListener = listener; + } + + public void getLocalVolumes(MRCRequest request) throws BrainException { + + try { + List volumes = sliceMan.getVolumes(); + + Map map = new HashMap(); + for (VolumeInfo data : volumes) + map.put(data.getId(), data.getName()); + + MessageUtils.marshallResponse(request, map); + this.notifyRequestListener(request); + + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public void checkpointDB() throws BrainException { + try { + sliceMan.compactDB(); + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public void completeDBCheckpoint() throws BrainException { + try { + sliceMan.completeDBCompaction(); + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public void restoreDB() throws BrainException { + try { + sliceMan.restoreDB(); + } catch (Exception exc) { + throw new BrainException(exc); + } + } + + public void dumpDB(String dumpFilePath) throws Exception { + sliceMan.dumpDB(dumpFilePath); + } + + public void restoreDBFromDump(String dumpFilePath) throws Exception { + sliceMan.restoreDBFromDump(dumpFilePath); + } + + // public void createFileTree(MRCRequest request, List treeData, + // String targetPath) throws BrainException, UserException { + // + // try { + // + // HTTPHeaders xCapHeaders = createSubtree(request, treeData, + // targetPath); + // + // Path path = new Path(targetPath); + // VolumeInfo volume = sliceMan.getVolumeByName(path.getVolumeName()); + // StorageManager sMan = sliceMan.getSliceDB(volume.getId(), path + // .getPathWithoutVolume(), request.syncPseudoRequest ? '*' + // : 'w'); + // + // request.details.sliceId = sMan.getSliceId(); + // request.details.persistentOperation = true; + // + // MessageUtils.marshallResponse(request, null, xCapHeaders); + // this.notifyRequestListener(request); + // + // } catch (UserException exc) { + // throw exc; + // } catch (Exception exc) { + // throw new BrainException(exc); + // } + // + // } + // + // public void replayMove(MRCRequest request, String sourcePath, + // String targetPath) throws UserException, BrainException { + // + // try { + // Path sPath = new Path(sourcePath); + // Path tPath = new Path(targetPath); + // + // VolumeInfo sVolume = sliceMan + // .getVolumeByName(sPath.getVolumeName()); + // StorageManager sMan = sliceMan.getSliceDB(sVolume.getId(), sPath + // .getPathWithoutVolume(), '*'); + // + // // if a remote move took place, only replay the local part of the + // // operation ... + // if (!sliceMan.hasVolume(tPath.getVolumeName())) { + // + // AbstractFileEntity source = sMan.getFileEntity(sPath + // .getPathWithoutVolume()); + // + // // delete the file/directory from the local volume + // deleteRecursively(sMan, source.getId()); + // + // MessageUtils.marshallResponse(request, null); + // this.notifyRequestListener(request); + // } + // + // // ... otherwise, completely replay the operation locally + // else + // move(request, sourcePath, targetPath); + // + // } catch (UserException exc) { + // throw exc; + // } catch (BrainException exc) { + // throw exc; + // } catch (Exception exc) { + // throw new BrainException(exc); + // } + // + // } + + // public void interVolumeMove(MRCRequest request, StorageManager sMan, + // AbstractFileEntity source, Path targetPath, boolean executeLocally) + // throws UserException, BrainException { + // + // request.details.context = new HashMap(); + // request.details.context.put("sMan", sMan); + // request.details.context.put("source", source); + // request.details.context.put("targetPath", targetPath); + // request.details.context.put("executeLocally", executeLocally); + // + // if (executeLocally) + // // if the volume resides on the local MRC, directly proceed with the + // // next step + // interVolumeMoveStep2(request); + // + // else { + // // otherwise, retrieve the target host for the volume from the + // // Directory Service + // + // Map queryMap = new HashMap(); + // queryMap.put("name", targetPath.getVolumeName()); + // queryMap.put("type", "volume"); + // + // List attrs = new LinkedList(); + // attrs.add("mrcURL"); + // + // List args = new LinkedList(); + // args.add(queryMap); + // args.add(attrs); + // + // request.details.context.put("nextMethod", "interVolumeMoveStep2"); + // BrainHelper.submitRequest(this, request, dirService, "getEntities", + // args, "nullauth " + url); + // } + // } + // + // public void interVolumeMoveStep2(MRCRequest request) throws + // UserException, + // BrainException { + // + // try { + // + // Path targetPath = (Path) request.details.context.get("targetPath"); + // StorageManager sMan = (StorageManager) + // request.details.context.get("sMan"); + // AbstractFileEntity source = (AbstractFileEntity) request.details.context + // .get("source"); + // + // if ((Boolean) request.details.context.get("executeLocally")) { + // + // // if the target volume resides on the local MRC, create the + // // target file tree locally + // createFileTree(request, Converter.fileTreeToList(sMan, source), + // targetPath.toString()); + // + // } else { + // // otherwise, create the target file tree remotely + // + // request.details.context.put("nextMethod", "interVolumeMoveStep3"); + // + // Map> response = (Map>) BrainHelper + // .parseResponse(request.sr); + // + // if (response.size() == 0) + // throw new UserException(ErrNo.ENOENT, "volume '" + // + targetPath.getVolumeName() + "' unknown"); + // + // assert (response.size() == 1); + // + // // get the MRC holding the volume from the query result + // String targetEndpoint = (String) response.get( + // response.keySet().iterator().next()).get("mrcURL"); + // + // // serialize the entire subtree with all dependencies + // List subTree = Converter.fileTreeToList(sMan, source); + // + // List params = new LinkedList(); + // params.add(subTree); + // params.add(targetPath.toString()); + // + // final InetSocketAddress targetMRCURL = new InetSocketAddress( + // targetEndpoint.substring(0, targetEndpoint.indexOf(':')), + // Integer.parseInt(targetEndpoint.substring(targetEndpoint + // .indexOf(':') + 1))); + // + // // create the file tree on the remote host + // BrainHelper.submitRequest(this, request, targetMRCURL, + // "createFileTree", params); + // } + // + // } catch (UserException exc) { + // throw exc; + // } catch (BrainException exc) { + // throw exc; + // } catch (Exception exc) { + // throw new BrainException(exc); + // } + // } + // + // public void interVolumeMoveStep3(MRCRequest request) throws + // BrainException { + // + // try { + // + // // check whether an exception has occured + // Object response = BrainHelper.parseResponse(request.sr); + // if (response != null) { + // MessageUtils.marshallException(request, + // (Map) response, false); + // this.notifyRequestListener(request); + // return; + // } + // + // // if a capability for target file deletion has been issued, return + // // it + // HTTPHeaders xCapHeaders = null; + // if (request.sr.responseHeaders + // .getHeader(HTTPHeaders.HDR_XCAPABILITY) != null + // && request.sr.responseHeaders + // .getHeader(HTTPHeaders.HDR_XLOCATIONS) != null) { + // xCapHeaders = new HTTPHeaders(); + // xCapHeaders.addHeader(HTTPHeaders.HDR_XCAPABILITY, + // request.sr.responseHeaders + // .getHeader(HTTPHeaders.HDR_XCAPABILITY)); + // xCapHeaders.addHeader(HTTPHeaders.HDR_XLOCATIONS, + // request.sr.responseHeaders + // .getHeader(HTTPHeaders.HDR_XLOCATIONS)); + // } + // + // StorageManager sMan = (StorageManager) + // request.details.context.get("sMan"); + // AbstractFileEntity source = (AbstractFileEntity) request.details.context + // .get("source"); + // + // // delete the file/directory from the local volume + // deleteRecursively(sMan, source.getId()); + // + // MessageUtils.marshallResponse(request, null, xCapHeaders); + // this.notifyRequestListener(request); + // + // } catch (Exception exc) { + // throw new BrainException(exc); + // } + // } + // + // protected void deleteRecursively(StorageManager sMan, long fileId, + // String name, long parentId) throws BackendException { + // + // Map children = sMan.getChildData(fileId); + // for (String fileName : children.keySet()) { + // long childId = children.get(fileName).getId(); + // deleteRecursively(sMan, childId, fileName, fileId); + // } + // + // sMan.unlinkFile(name, fileId, parentId); + // } + // + // protected HTTPHeaders createSubtree(MRCRequest request, + // List treeData, String targetPath) throws UserException, + // BackendException, IOException, BrainException, JSONException { + // + // Path path = new Path(targetPath); + // VolumeInfo volume = sliceMan.getVolumeByName(path.getVolumeName()); + // StorageManager sMan = sliceMan.getSliceDB(volume.getId(), path + // .getPathWithoutVolume(), 'w'); + // long targetParentId = sMan.getFileEntity(path.getInnerPath(), true) + // .getId(); + // AbstractFileEntity tChild = sMan.getChild(path.getLastPathComponent(), + // targetParentId); + // + // long childId = 0; + // if (tChild != null) + // childId = tChild.getId(); + // + // // unwrap the tree data + // AbstractFileEntity file = Converter + // .mapToFile((Map) treeData.get(0)); + // int sourceType = file.isDirectory() ? FILETYPE_DIR : FILETYPE_FILE; + // + // List attributes = Converter + // .attrMapsToAttrList((List>) treeData.get(1)); + // List> subElements = (List>) treeData.get(2); + // + // int targetType = path.getPathWithoutVolume().length() == 0 ? FILETYPE_DIR + // : tChild == null ? FILETYPE_NOTEXIST + // : tChild.isDirectory() ? FILETYPE_DIR : FILETYPE_FILE; + // + // // peform the movement operation + // + // HTTPHeaders xCapHeaders = null; + // + // // if the source is a directory + // if (sourceType == FILETYPE_DIR) { + // + // switch (targetType) { + // + // case FILETYPE_NOTEXIST: // target does not exist + // { + // // recursively cross-volume-move the source directory tree + // // to the remote volume and link it to the parent directory + // // of the target path + // + // file.setName(path.getLastPathComponent()); + // file.setParentId(targetParentId); + // + // // create a new file on the target storage manager with the + // // data + // sMan.createFile(file, attributes); + // + // for (List subElement : subElements) + // createSubtree(request, subElement, targetPath + "/" + // + ((Map) subElement.get(0)).get("name")); + // + // break; + // } + // + // case FILETYPE_DIR: // target is a directory + // { + // + // // chech whether the target directory may be deleted; if not, + // // throw an exception + // + // if (!request.details.authorized) { + // + // // check whether the target directory may be overwritten + // faMan.checkPermission(FileAccessManager.DELETE_ACCESS, + // volume.getId(), tChild.getId(), request.details.userId, + // request.details.groupIds); + // } + // + // if (sMan.hasChildren(tChild.getId())) + // throw new UserException(ErrNo.ENOTEMPTY, + // "target directory '" + targetPath + "' is not empty"); + // + // sMan.deleteFile(childId); + // + // // recursively cross-volume-move the source directory tree + // // to the remote volume and link it to the parent directory + // file.setName(path.getLastPathComponent()); + // file.setParentId(targetParentId); + // + // // create a new file on the target storage manager with the + // // data + // sMan.createFile(file, attributes); + // + // for (List subElement : subElements) + // createSubtree(request, subElement, targetPath + "/" + // + ((Map) subElement.get(0)).get("name")); + // + // break; + // } + // + // case FILETYPE_FILE: // target is a file + // throw new UserException(ErrNo.ENOTDIR, + // "cannot rename directory '" + file.getName() + // + "' to file '" + targetPath + "'"); + // } + // + // } + // + // // if the source is a file + // else { + // + // switch (targetType) { + // + // case FILETYPE_NOTEXIST: // target does not exist + // { + // // create a new file on the remote volume and link it to the + // // parent directory of the target path + // + // file.setName(path.getLastPathComponent()); + // file.setParentId(targetParentId); + // + // break; + // } + // + // case FILETYPE_DIR: // target is a directory + // { + // throw new UserException(ErrNo.EISDIR, "cannot rename file '" + // + file.getName() + "' to directory '" + targetPath + "'"); + // } + // + // case FILETYPE_FILE: // target is a file + // { + // + // if (!request.details.authorized) { + // + // // obtain a deletion capability for the file + // String aMode = faMan.translateAccessMode(volume.getId(), + // FileAccessManager.DELETE_ACCESS); + // String capability = BrainHelper.createCapability(faMan, + // aMode, volume.getId(), tChild.getId(), request.details.userId, + // request.details.groupIds).toString(); + // + // // set the XCapability and XLocationsList headers + // xCapHeaders = BrainHelper.createXCapHeaders(capability, + // sMan.getXLocationsList(tChild.getId())); + // } + // + // // delete the target file, rename the source file and relink + // // it to the parent directory of the target path + // + // sMan.deleteFile(childId); + // + // file.setName(path.getLastPathComponent()); + // file.setParentId(targetParentId); + // + // break; + // } + // + // } + // + // // create a new file on the target storage manager with the data + // sMan.createFile(file, attributes); + // } + // + // return xCapHeaders; + // } + + protected void notifyRequestListener(MRCRequest request) { + if (!request.syncPseudoRequest) { + if (requestListener != null) + requestListener.brainRequestDone(request); + else + throw new RuntimeException("listener must not be null!"); + } + } + + protected VolumeInfo getVolumeData(String volumeId) throws UserException, BackendException { + return sliceMan.getVolumeById(volumeId); + } + + private void doSetACLEntries(MRCRequest request, String path, Map aclEntries, + boolean persistentOperation) throws UserException, BackendException, BrainException { + + Path p = new Path(path); + VolumeInfo volume = sliceMan.getVolumeByName(p.getVolumeName()); + StorageManager sMan = sliceMan.getSliceDB(volume.getId(), p.getPathWithoutVolume(), + request.syncPseudoRequest ? '*' : 'w'); + + AbstractFileEntity parentDir = sMan.getFileEntity(p.getInnerPath()); + + if (!request.details.authorized) { + // check whether the parent directory of the file grants search + // access + if (p.getLastPathComponent().length() != 0) + faMan.checkSearchPermission(volume.getId(), p.getInnerPath(), + request.details.userId, request.details.superUser, request.details.groupIds); + } + + AbstractFileEntity file = sMan.getChild(p.getLastPathComponent(), parentDir.getId()); + if (file == null) + throw new UserException(ErrNo.ENOENT, "file or directory '" + p.getLastPathComponent() + + "' does not exist"); + + if (!request.details.authorized) { + // check whether the access mode may be changed + faMan.checkPrivilegedPermissions(volume.getId(), file.getId(), request.details.userId, + request.details.superUser, request.details.groupIds); + } + + // if the file refers to a symbolic link, resolve the link + String target = sMan.getFileReference(file.getId()); + if (target != null) { + path = target; + p = new Path(path); + + // if the local MRC is not responsible, send a redirect + if (!sliceMan.hasVolume(p.getVolumeName())) { + MessageUtils.setRedirect(request, target); + this.notifyRequestListener(request); + return; + } + + volume = sliceMan.getVolumeByName(p.getVolumeName()); + sMan = sliceMan.getSliceDB(volume.getId(), p.getPathWithoutVolume(), 'r'); + + file = sMan.getFileEntity(p.getPathWithoutVolume()); + } + + // change the ACL + faMan.setACLEntries(volume.getId(), file.getId(), request.details.userId, + request.details.groupIds, aclEntries); + + // update POSIX timestamps + sMan.updateFileTimes(file.getId(), false, true, false); + + if (persistentOperation) { + request.details.sliceId = sMan.getSliceId(); + request.details.persistentOperation = true; + } + } + + private void doSetStripingPolicy(MRCRequest request, String path, + Map stripingPolicy, boolean persistentOperation) throws UserException, + BackendException, BrainException { + + Path p = new Path(path); + VolumeInfo volume = sliceMan.getVolumeByName(p.getVolumeName()); + StorageManager sMan = sliceMan.getSliceDB(volume.getId(), p.getPathWithoutVolume(), + request.syncPseudoRequest ? '*' : 'w'); + + AbstractFileEntity parentDir = sMan.getFileEntity(p.getInnerPath(), true); + + if (!request.details.authorized) { + // check whether the parent directory of the file grants search + // access + if (p.getLastPathComponent().length() != 0) + faMan.checkSearchPermission(volume.getId(), p.getInnerPath(), + request.details.userId, request.details.superUser, request.details.groupIds); + } + + AbstractFileEntity dir = sMan.getChild(p.getLastPathComponent(), parentDir.getId()); + if (dir == null) + throw new UserException(ErrNo.ENOENT, "directory '" + p.getLastPathComponent() + + "' does not exist"); + + // if the directory refers to a symbolic link, resolve the link + String target = sMan.getFileReference(dir.getId()); + if (target != null) { + path = target; + p = new Path(path); + + // if the local MRC is not responsible, send a redirect + if (!sliceMan.hasVolume(p.getVolumeName())) { + MessageUtils.setRedirect(request, target); + this.notifyRequestListener(request); + return; + } + + volume = sliceMan.getVolumeByName(p.getVolumeName()); + sMan = sliceMan.getSliceDB(volume.getId(), p.getPathWithoutVolume(), 'r'); + + dir = sMan.getFileEntity(p.getPathWithoutVolume()); + } + + if (!dir.isDirectory()) + throw new UserException(ErrNo.ENOTDIR, + "default striping policies are restricted to directories and volumes"); + + sMan.setStripingPolicy(dir.getId(), stripingPolicy); + + // update POSIX timestamps of parent directory + sMan.updateFileTimes(dir.getId(), false, true, false); + + if (persistentOperation) { + request.details.sliceId = sMan.getSliceId(); + request.details.persistentOperation = true; + } + } + + public void doChangeOwner(MRCRequest request, String path, String userId, String groupId, + boolean persistentOperation) throws UserException, BackendException, BrainException { + + try { + + Path p = new Path(path); + VolumeInfo volume = sliceMan.getVolumeByName(p.getVolumeName()); + StorageManager sMan = sliceMan.getSliceDB(volume.getId(), p.getPathWithoutVolume(), + request.syncPseudoRequest ? '*' : 'w'); + + AbstractFileEntity parentDir = sMan.getFileEntity(p.getInnerPath()); + + if (!request.details.authorized) { + // check whether the parent directory of the file grants search + // access + if (p.getLastPathComponent().length() != 0) + faMan + .checkSearchPermission(volume.getId(), p.getInnerPath(), + request.details.userId, request.details.superUser, + request.details.groupIds); + } + + AbstractFileEntity file = sMan.getChild(p.getLastPathComponent(), parentDir.getId()); + if (file == null) + throw new UserException(ErrNo.ENOENT, "file or directory '" + + p.getLastPathComponent() + "' does not exist"); + + if (!request.details.authorized) { + // check whether the owner may be changed + faMan.checkPrivilegedPermissions(volume.getId(), file.getId(), + request.details.userId, request.details.superUser, request.details.groupIds); + } + + if (groupId != null) + sMan.setFileGroup(file.getId(), groupId); + if (userId != null) + sMan.setFileOwner(file.getId(), userId); + + // update POSIX timestamps + sMan.updateFileTimes(file.getId(), false, true, false); + + if (persistentOperation) { + request.details.sliceId = sMan.getSliceId(); + request.details.persistentOperation = true; + } + + } catch (UserException e) { + throw e; + } catch (BrainException e) { + throw e; + } catch (Exception e) { + throw new BrainException(e); + } + } +} diff --git a/servers/src/org/xtreemfs/mrc/brain/BrainException.java b/servers/src/org/xtreemfs/mrc/brain/BrainException.java new file mode 100644 index 0000000000000000000000000000000000000000..e60f535e9fa6243eec6ea7621ac5e4ee223a7196 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/brain/BrainException.java @@ -0,0 +1,42 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + + +package org.xtreemfs.mrc.brain; + +public class BrainException extends Exception { + + public BrainException(String message, Exception cause) { + super(message, cause); + } + + public BrainException(String message) { + super(message); + } + + public BrainException(Exception cause) { + super(cause); + } + +} diff --git a/servers/src/org/xtreemfs/mrc/brain/BrainHelper.java b/servers/src/org/xtreemfs/mrc/brain/BrainHelper.java new file mode 100644 index 0000000000000000000000000000000000000000..d5f7ab575485d360ac4ffd03e44fd13b1bef0ff0 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/brain/BrainHelper.java @@ -0,0 +1,572 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc.brain; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.rmi.RemoteException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.StringTokenizer; + +import org.xtreemfs.common.Capability; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.uuids.UnknownUUIDException; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.json.JSONString; +import org.xtreemfs.foundation.pinky.HTTPHeaders; +import org.xtreemfs.foundation.pinky.HTTPUtils; +import org.xtreemfs.foundation.speedy.SpeedyRequest; +import org.xtreemfs.foundation.speedy.SpeedyRequest.RequestStatus; +import org.xtreemfs.mrc.MRCConfig; +import org.xtreemfs.mrc.MRCRequest; +import org.xtreemfs.mrc.ac.FileAccessManager; +import org.xtreemfs.mrc.brain.Brain.SysAttrs; +import org.xtreemfs.mrc.brain.storage.BackendException; +import org.xtreemfs.mrc.brain.storage.StorageManager; +import org.xtreemfs.mrc.brain.storage.entities.ACLEntry; +import org.xtreemfs.mrc.brain.storage.entities.AbstractFileEntity; +import org.xtreemfs.mrc.brain.storage.entities.DirEntity; +import org.xtreemfs.mrc.brain.storage.entities.FileEntity; +import org.xtreemfs.mrc.brain.storage.entities.StripingPolicy; +import org.xtreemfs.mrc.brain.storage.entities.XLocation; +import org.xtreemfs.mrc.brain.storage.entities.XLocationsList; +import org.xtreemfs.mrc.osdselection.OSDStatusManager; +import org.xtreemfs.mrc.slices.SliceManager; +import org.xtreemfs.mrc.slices.VolumeInfo; +import org.xtreemfs.mrc.utils.Converter; +import org.xtreemfs.mrc.utils.MessageUtils; + +/** + * A helper class used by the backend. + * + * @author stender + * + */ +public class BrainHelper { + + /** + * Creates a map containing information about a volume, as required by the + * directory service when registering a new volume. + * + * @param vol + * @param osdMan + * @param mrcUUID + * @return a map containing volume information + */ + public static Map createDSVolumeInfo(VolumeInfo vol, OSDStatusManager osdMan, + String mrcUUID) { + + String free = String.valueOf(osdMan.getFreeSpace(vol)); + + Map map = new HashMap(); + map.put("name", vol.getName()); + map.put("mrc", mrcUUID); + map.put("type", "volume"); + map.put("free", free); + + return map; + } + + /** + * Creates a capability for accessing a file. + * + * @param accessMode + * @param volumeId + * @param fileId + * @param epochNo + * @param sharedSecret + * @throws UserException + * @throws BrainException + */ + public static Capability createCapability(String accessMode, String volumeId, long fileId, + long epochNo, String sharedSecret) throws UserException, BrainException { + + return new Capability(volumeId + ":" + fileId, accessMode, epochNo, sharedSecret); + } + + /** + * Creates an HTTP headers object containing an X-Capability and X-Locations + * list entry. + * + * @param capability + * @param xLocList + * @return + * @throws JSONException + */ + public static HTTPHeaders createXCapHeaders(String capability, XLocationsList xLocList) + throws JSONException { + + HTTPHeaders headers = new HTTPHeaders(); + if (capability != null) + headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, capability); + if (xLocList != null) + headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, JSONParser.writeJSON(Converter + .xLocListToList(xLocList))); + + return headers; + } + + /** + * Creates a new X-Locations list + * + * @param xLocList + * an existing X-Locations list containing a version number (may + * be null) + * @param sMan + * the Storage Manager responsible for the file + * @param osdMan + * the OSD Status Manager that periodically reports a list of + * usable OSDs + * @param path + * the path to the file + * @param fileId + * the file ID + * @param parentDirId + * the ID of the parent directory + * @param volume + * information about the volume of the file + * @return an X-Locations list + * @throws UserException + * @throws BackendException + * @throws BrainException + */ + public static XLocationsList createXLocList(XLocationsList xLocList, StorageManager sMan, + OSDStatusManager osdMan, Path path, long fileId, long parentDirId, VolumeInfo volume, + InetSocketAddress clientAddress) throws UserException, BackendException, BrainException { + + xLocList = new XLocationsList(new XLocation[0], xLocList != null ? xLocList.getVersion() + : 0); + + // first, try to get the striping policy from the file itself + StripingPolicy stripingPolicy = sMan.getStripingPolicy(fileId); + + // if no such policy exists, try to retrieve it from the parent + // directory + if (stripingPolicy == null) + stripingPolicy = sMan.getStripingPolicy(parentDirId); + + // if the parent directory has no default policy, take the one + // associated with the volume + if (stripingPolicy == null) + stripingPolicy = sMan.getVolumeStripingPolicy(); + + if (stripingPolicy == null) + throw new UserException("could not open file " + path + + ": no default striping policy available"); + + Map> osdMaps = (Map>) osdMan + .getUsableOSDs(volume.getId()); + + if (osdMaps == null || osdMaps.size() == 0) + throw new BrainException("could not open file " + path + ": no feasible OSDs available"); + + // determine the actual striping width; if not enough OSDs are + // available, the width will be limited to the amount of + // available OSDs + int width = Math.min((int) stripingPolicy.getWidth(), osdMaps.size()); + stripingPolicy.setWidth(width); + + // add the OSDs to the X-Locations list, according to the OSD + // selection policy + String[] osds = osdMan.getOSDSelectionPolicy(volume.getOsdPolicyId()).getOSDsForNewFile( + osdMaps, clientAddress.getAddress(), width, volume.getOsdPolicyArgs()); + + XLocation xLoc = new XLocation(stripingPolicy, osds); + xLocList.addReplica(xLoc); + + return xLocList; + } + + /** + * Creates a map containing the result of a 'stat' invocation. + * + * @param faMan + * @param file + * @param ref + * @param volumeId + * @param userId + * @param xLocList + * @param xAttrs + * @param acl + * @return + * @throws UserException + * @throws BrainException + */ + public static Map createStatInfo(FileAccessManager faMan, + AbstractFileEntity file, String ref, String volumeId, String userId, List groupIds, + XLocationsList xLocList, Map xAttrs, ACLEntry[] acl) throws UserException, + BrainException { + + FileEntity fileAsFile = file.isDirectory() ? null : (FileEntity) file; + + Map map = new HashMap(); + map.put("fileId", volumeId + ":" + file.getId()); + map.put("objType", ref != null ? 3 : file.isDirectory() ? 2 : 1); + map.put("ownerId", file.getUserId()); + map.put("groupId", file.getGroupId()); + map.put("size", ref != null ? ref.length() : file.isDirectory() ? 0 : fileAsFile.getSize()); + map.put("epoch", file.isDirectory() ? 0 : fileAsFile.getEpoch()); + map.put("atime", file.getAtime()); + map.put("ctime", file.getCtime()); + map.put("mtime", file.getMtime()); + map.put("linkCount", file.getLinkCount()); + + if (ref != null) + map.put("linkTarget", ref); + if (xLocList != null) + map.put("replicas", Converter.xLocListToList(xLocList)); + if (xAttrs != null) + map.put("xAttrs", xAttrs); + if (acl != null) + map.put("acl", Converter.aclToMap(acl)); + + map.put("posixAccessMode", faMan.getPosixAccessMode(volumeId, file.getId(), userId, + groupIds)); + + return map; + } + + public static void submitRequest(Brain brain, MRCRequest req, InetSocketAddress endpoint, + String cmd, Object args) { + submitRequest(brain, req, endpoint, cmd, args, null); + } + + public static void submitRequest(Brain brain, MRCRequest req, InetSocketAddress endpoint, + String cmd, Object args, String authString) { + + try { + if (authString == null) + authString = req.getPinkyRequest().requestHeaders + .getHeader(HTTPHeaders.HDR_AUTHORIZATION); + + ReusableBuffer body = ReusableBuffer.wrap(JSONParser.writeJSON(args).getBytes( + HTTPUtils.ENC_UTF8)); + req.sr = new SpeedyRequest("GET", cmd, null, authString, body, HTTPUtils.DATA_TYPE.JSON); + req.srEndpoint = endpoint; + brain.notifyRequestListener(req); + + } catch (Exception exc) { + MessageUtils.marshallException(req, new BrainException("could not send request to '" + + endpoint + "'", exc)); + brain.notifyRequestListener(req); + } + } + + public static Object parseResponse(SpeedyRequest theRequest) throws Exception { + + assert (theRequest != null); + + try { + String body = null; + + if (theRequest.status == RequestStatus.FAILED) + throw new BrainException("sending request failed", null); + + if (theRequest.responseBody != null) { + byte bdy[] = null; + if (theRequest.responseBody.hasArray()) { + bdy = theRequest.responseBody.array(); + } else { + bdy = new byte[theRequest.responseBody.capacity()]; + theRequest.responseBody.position(0); + theRequest.responseBody.get(bdy); + } + + body = new String(bdy, "utf-8"); + if (body.startsWith("{\"exception\":")) { + Map map = (Map) JSONParser.parseJSON(new JSONString(body)); + throw new RemoteException(map.get("exception").toString()); + } else { + return body == null ? null : JSONParser.parseJSON(new JSONString(body)); + } + } + + } finally { + theRequest.freeBuffer(); + } + + return null; + } + + public static void updateFileSize(StorageManager sMan, boolean updateATime, String volumeId, + long fileId, String newFileSizeString) throws UserException, BackendException { + + try { + + FileEntity fileEntity = (FileEntity) sMan.getFileEntity(fileId); + if (fileEntity == null) + throw new UserException(ErrNo.ENOENT, "file '" + fileId + "' does not exist"); + + int index = newFileSizeString.indexOf(','); + if (index == -1) + throw new UserException(ErrNo.EINVAL, "invalid " + HTTPHeaders.HDR_XNEWFILESIZE + + " header"); + + // parse the file size and epoch number + long newFileSize = Long.parseLong(newFileSizeString.substring(1, index)); + long epochNo = Long.parseLong(newFileSizeString.substring(index + 1, newFileSizeString + .length() - 1)); + + // discard outdated file size updates + if (epochNo < fileEntity.getEpoch()) + return; + + // accept any file size in a new epoch but only larger file sizes in + // the current epoch + if (epochNo > fileEntity.getEpoch() || newFileSize > fileEntity.getSize()) { + + sMan.setFileSize(fileId, newFileSize, epochNo, fileEntity.getIssuedEpoch()); + + // update POSIX timestamps + sMan.updateFileTimes(fileId, updateATime, false, true); + } + + } catch (NumberFormatException exc) { + throw new UserException("invalid file size: " + newFileSizeString); + } catch (ClassCastException exc) { + throw new UserException("file ID " + fileId + " refers to a directory"); + } + } + + public static String getSysAttrValue(MRCConfig config, StorageManager sMan, + OSDStatusManager osdMan, VolumeInfo volume, Path p, AbstractFileEntity file, + String keyString) throws JSONException, BackendException, UnknownUUIDException { + + SysAttrs key = null; + try { + key = SysAttrs.valueOf(keyString); + } catch (IllegalArgumentException exc) { + // ignore, will be handled by the 'default' case + } + + switch (key) { + + case locations: + return file instanceof FileEntity ? JSONParser.writeJSON(Converter + .xLocListToList(((FileEntity) file).getXLocationsList())) : ""; + case file_id: + return volume.getId() + ":" + file.getId(); + case object_type: + String ref = sMan.getFileReference(file.getId()); + return ref != null ? "3" : file.isDirectory() ? "2" : "1"; + case url: + return "uuid:" + config.getUUID().toString() + "/" + p.getVolumeName() + "/" + + p.getPathWithoutVolume(); + case owner: + return file.getUserId(); + case group: + return file.getGroupId(); + case default_sp: + if (!(file instanceof DirEntity)) + return ""; + StripingPolicy sp = sMan.getStripingPolicy(file.getId()); + if (sp == null) + return ""; + return sp.toString(); + case ac_policy_id: + return file.getId() == 1 ? volume.getAcPolicyId() + "" : ""; + case osdsel_policy_id: + return file.getId() == 1 ? volume.getOsdPolicyId() + "" : ""; + case osdsel_policy_args: + return file.getId() == 1 ? (volume.getOsdPolicyArgs() == null ? "" : volume + .getOsdPolicyArgs()) : ""; + case read_only: + if (!(file instanceof FileEntity)) + return String.valueOf(false); + + return String.valueOf(sMan.isReadOnly(file.getId())); + case free_space: + return file.getId() == 1 ? String.valueOf(osdMan.getFreeSpace(volume)) : ""; + } + + return ""; + } + + public static void setSysAttrValue(StorageManager sMan, SliceManager sliceMan, + VolumeInfo volume, AbstractFileEntity file, String keyString, String value) + throws UserException, BackendException, IOException { + + SysAttrs key = null; + try { + key = SysAttrs.valueOf(keyString); + } catch (IllegalArgumentException exc) { + // ignore, will be handled by the 'default' case + } + + switch (key) { + + case locations: + + // explicitly setting X-Locations lists is only permitted for files + // that haven't yet been assigned an X-Locations list! + if (((FileEntity) file).getXLocationsList() != null) + throw new UserException(ErrNo.EPERM, + "cannot set X-Locations: OSDs have been assigned already"); + + try { + // parse the X-Locations list, ensure that it is correctly + // formatted and consistent + XLocationsList newXLoc = Converter.listToXLocList((List) JSONParser + .parseJSON(new JSONString(value))); + + if (!BrainHelper.isConsistent(newXLoc)) + throw new UserException(ErrNo.EINVAL, "inconsistent X-Locations list:" + + "at least one OSD occurs more than once"); + + sMan.setXLocationsList(file.getId(), newXLoc); + + } catch (JSONException exc) { + throw new UserException(ErrNo.EINVAL, "invalid X-Locations-List: " + value); + } + + break; + + case default_sp: + + if (!file.isDirectory()) + throw new UserException(ErrNo.EPERM, + "default striping policies can only be set on volumes and directories"); + + try { + Map sp = null; + if (!value.equals("null")) { + StringTokenizer st = new StringTokenizer(value, ", \t"); + sp = new HashMap(); + sp.put("policy", st.nextToken()); + sp.put("stripe-size", Long.parseLong(st.nextToken())); + sp.put("width", Long.parseLong(st.nextToken())); + } + + if (file.getId() == 1 && sp == null) + throw new UserException(ErrNo.EPERM, + "cannot remove volume default striping policy"); + + sMan.setStripingPolicy(file.getId(), sp); + } catch (NumberFormatException exc) { + throw new UserException(ErrNo.EINVAL, "invalid default striping policy: " + value); + } + + break; + + case osdsel_policy_id: + + if (file.getId() != 1) + throw new UserException(ErrNo.EINVAL, + "OSD selection policies can only be set on volumes"); + + try { + long newPol = Long.parseLong(value); + volume.setOsdPolicyId(newPol); + sliceMan.notifyVolumeChangeListeners(VolumeChangeListener.MOD_CHANGED, volume); + + } catch (NumberFormatException exc) { + throw new UserException(ErrNo.EINVAL, "invalid OSD selection policy: " + value); + } + + break; + + case osdsel_policy_args: + + if (file.getId() != 1) + throw new UserException(ErrNo.EINVAL, + "OSD selection policies can only be set and configured on volumes"); + + volume.setOsdPolicyArgs(value); + sliceMan.notifyVolumeChangeListeners(VolumeChangeListener.MOD_CHANGED, volume); + + break; + + case read_only: + + if (!(file instanceof FileEntity)) + throw new UserException(ErrNo.EPERM, "only files can be made read-only"); + + boolean readOnly = Boolean.valueOf(value); + + FileEntity fileAsFile = (FileEntity) file; + if (!readOnly && fileAsFile.getXLocationsList().getReplicas().length > 1) + throw new UserException(ErrNo.EPERM, + "read-only flag cannot be removed from files with multiple replicas"); + + sMan.setReadOnly(file.getId(), readOnly); + + break; + + default: + throw new UserException(ErrNo.EINVAL, "system attribute '" + key + + "' unknown or immutable"); + } + } + + /** + * Checks whether the given replica (i.e. list of OSDs) can be added to the + * given X-Locations list without compromising consistency. + * + * @param xLocList + * the X-Locations list + * @param newOSDs + * the list of new OSDs to add + * @return true, if adding the OSD list is possible, false + * , otherwise + */ + public static boolean isAddable(XLocationsList xLocList, List newOSDs) { + if (xLocList != null) + for (XLocation loc : xLocList.getReplicas()) + for (String osd : loc.getOsdList()) + for (Object newOsd : newOSDs) + if (osd.equals(newOsd)) + return false; + return true; + } + + /** + * Checks whether the given X-Locations list is consistent. It is regarded + * as consistent if no OSD in any replica occurs more than once. + * + * @param xLocList + * the X-Locations list to check for consistency + * @return true, if the list is consistent, false, + * otherwise + */ + public static boolean isConsistent(XLocationsList xLocList) { + Set tmp = new HashSet(); + if (xLocList != null) { + for (XLocation loc : xLocList.getReplicas()) + for (String osd : loc.getOsdList()) + if (!tmp.contains(osd)) + tmp.add(osd); + else + return false; + } + + return true; + } + +} diff --git a/servers/src/org/xtreemfs/mrc/brain/BrainRequestListener.java b/servers/src/org/xtreemfs/mrc/brain/BrainRequestListener.java new file mode 100644 index 0000000000000000000000000000000000000000..d0d6b5275c1c8e37bf11e5cc085c86e69b94aced --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/brain/BrainRequestListener.java @@ -0,0 +1,33 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc.brain; + +import org.xtreemfs.mrc.MRCRequest; + +public interface BrainRequestListener { + + public void brainRequestDone(MRCRequest request); + +} diff --git a/servers/src/org/xtreemfs/mrc/brain/BrainStage.java b/servers/src/org/xtreemfs/mrc/brain/BrainStage.java new file mode 100644 index 0000000000000000000000000000000000000000..77d5715f1e3efbdcc987dd69d449cfb6efef26c9 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/brain/BrainStage.java @@ -0,0 +1,442 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc.brain; + +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.LinkedBlockingQueue; + +import org.xtreemfs.common.auth.AuthenticationException; +import org.xtreemfs.common.auth.AuthenticationProvider; +import org.xtreemfs.common.auth.UserCredentials; +import org.xtreemfs.common.clients.dir.DIRClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.LifeCycleThread; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.pinky.HTTPHeaders; +import org.xtreemfs.foundation.pinky.PinkyRequest; +import org.xtreemfs.mrc.MRCConfig; +import org.xtreemfs.mrc.MRCRequest; +import org.xtreemfs.mrc.PolicyContainer; +import org.xtreemfs.mrc.osdselection.OSDStatusManager; +import org.xtreemfs.mrc.slices.SliceManager; +import org.xtreemfs.mrc.utils.MessageUtils; + +public class BrainStage extends LifeCycleThread { + + private final LinkedBlockingQueue queue; + + private final Brain brain; + + public final Map _statMap; + + private boolean blocked; + + private AuthenticationProvider auth; + + public BrainStage(MRCConfig config, DIRClient client, OSDStatusManager osdStatusManager, + SliceManager slices, PolicyContainer policyContainer, AuthenticationProvider auth, + String authString) throws BrainException { + + super("Brain"); + + brain = new Brain(config, client, osdStatusManager, slices, policyContainer, authString); + this.queue = new LinkedBlockingQueue(); + + this.auth = auth; + + _statMap = new HashMap(); + } + + public void run() { + + Logging.logMessage(Logging.LEVEL_INFO, this, "operational"); + notifyStarted(); + + try { + for (;;) { + + MRCRequest request = null; + try { + if (isInterrupted()) + break; + request = queue.take(); + + // FIXME!!! + while (blocked) + Thread.sleep(2000); + } catch (InterruptedException e1) { + break; + } + + // initial request + if (request.details.context == null) { + + try { + + Object args = MessageUtils.unmarshallRequest(request); + + // parse the user Id from the "AUTHORIZATION" header + if (request.details.userId == null) { + String authHeader = request.getPinkyRequest().requestHeaders + .getHeader(HTTPHeaders.HDR_AUTHORIZATION); + + if (authHeader == null) + throw new UserException(ErrNo.EPERM, + "authorization mechanism required"); + + UserCredentials cred = null; + try { + cred = auth.getEffectiveCredentials(authHeader, request + .getPinkyRequest().getChannelIO()); + request.details.superUser = cred.isSuperUser(); + request.details.groupIds = cred.getGroupIDs(); + request.details.userId = cred.getUserID(); + } catch (AuthenticationException ex) { + throw new UserException(ErrNo.EPERM, ex.getMessage()); + } + + /* + * if (authHeader.startsWith("{")) { //new JSON + * header format + * + * String mech = null; String GUID = null; + * List GGIDs = null; try { JSONString + * authStr = new JSONString(authHeader); + * Map authInfo = (Map) JSONParser.parseJSON(authStr); mech = + * (String) authInfo.get("mechanism"); GUID = + * (String) authInfo.get("guid"); GGIDs = + * (List) authInfo.get("ggids"); } catch + * (Exception ex) { throw new + * UserException(ErrNo.EPERM, "malformed + * authentication credentials: "+ex); } + * + * if (!mech.equals("nullauth")) throw new + * UserException(ErrNo.EPERM, "unknown authorization + * mechanism: " + mech); + * + * request.userId = GUID; request.superUser = false; + * // FIXME: set 'true' if superuser! + * request.groupIds = GGIDs; } else { //old header + * format for comapatability! StringTokenizer st = + * new StringTokenizer( authHeader, " "); String + * mech = st.nextToken(); + * + * if (mech.equals("nullauth")) { + * + * if (!st.hasMoreTokens()) throw new + * UserException(ErrNo.EPERM, "nullauth: user ID + * required"); // set the user ID request.userId = + * st.nextToken(); + * + * if (!st.hasMoreTokens()) throw new + * UserException(ErrNo.EPERM, "nullauth: at least + * one group ID required"); // set the group IDs + * request.groupIds = new ArrayList(); while + * (st.hasMoreTokens()) + * request.groupIds.add(st.nextToken()); + * + * } + * + * else throw new UserException(ErrNo.EPERM, + * "unknown authorization mechanism: " + mech); } + */ + + } + + if (Logging.tracingEnabled()) { + Logging.logMessage(Logging.LEVEL_TRACE, this, "request: " + + request.getPinkyRequest()); + Logging.logMessage(Logging.LEVEL_TRACE, this, "command: " + + request.getPinkyRequest().requestURI); + Logging.logMessage(Logging.LEVEL_TRACE, this, "args: " + args); + } + + executeCommand(request, args); + + } catch (Exception exc) { + MessageUtils.marshallException(request, exc); + brain.notifyRequestListener(request); + } + + } + + // subsequent request + else { + + try { + + String subsequentMethod = (String) request.details.context + .get("nextMethod"); + + Method m = brain.getClass().getMethod(subsequentMethod, + new Class[] { MRCRequest.class }); + m.invoke(brain, request); + + } catch (InvocationTargetException exc) { + // BrainHelper.submitException(brain, request, + // exc.getCause()); + MessageUtils.marshallException(request, exc.getCause()); + brain.notifyRequestListener(request); + } catch (Exception exc) { + // BrainHelper.submitException(brain, request, exc); + MessageUtils.marshallException(request, exc); + brain.notifyRequestListener(request); + Logging.logMessage(Logging.LEVEL_ERROR, this, exc); + } + + } + + } + + } catch (Throwable th) { + notifyCrashed(th instanceof Exception ? (Exception) th : new Exception(th)); + return; + } + + Logging.logMessage(Logging.LEVEL_INFO, this, "shudtown complete"); + notifyStopped(); + } + + public void block() { + blocked = true; + } + + public void unblock() { + blocked = false; + } + + public void replayLogEntry(String cmd, String uid, String gid, Object args) throws Exception { + + // a 'move' command has to be handled explicitly; in case a remote MRC + // is involved, only local activity has to be replayed + // if(cmd.equals("move")) + // cmd = "replayMove"; + + PinkyRequest pr = new PinkyRequest(null, cmd, null, null); + MRCRequest request = new MRCRequest(pr); + request.details.userId = uid; + request.details.groupIds = new ArrayList(1); + request.details.groupIds.add(gid); + request.details.authorized = true; // important: override access control + + executeCommandSync(request, args); + } + + public long getTotalDBSize() throws BrainException { + return brain.getTotalDBSize(); + } + + public long getTotalNumberOfFiles() throws BrainException { + return brain.getTotalNumberOfFiles(); + } + + public long getTotalNumberOfDirs() throws BrainException { + return brain.getTotalNumberOfDirs(); + } + + public void shutdown() throws Exception { + interrupt(); + brain.shutdown(); + } + + public void processRequest(MRCRequest request) { + queue.add(request); + } + + public void checkpointDB() throws BrainException { + brain.checkpointDB(); + } + + public void completeDBCheckpoint() throws BrainException { + brain.completeDBCheckpoint(); + } + + public void restoreDB() throws BrainException { + brain.restoreDB(); + } + + public void setRequestListener(BrainRequestListener listener) { + assert (listener != null); + brain.setRequestListener(listener); + } + + private void executeCommand(MRCRequest request, Object args) { + + try { + + // convert the arguments to a corresponding object array + Object[] argArray = null; + if (args != null) + try { + List argList = (List) args; + argList.add(0, request); + argArray = argList.toArray(); + } catch (ClassCastException exc) { + argArray = new Object[] { request, args }; + } + + // find the appropriate brain method + Method m = findMethod(request.getPinkyRequest().requestURI, argArray); + + // invoke the brain method + if (args == null) + m.invoke(brain, request); + else + m.invoke(brain, argArray); + + if (Logging.tracingEnabled()) + Logging.logMessage(Logging.LEVEL_TRACE, this, "exec: " + + request.getPinkyRequest().requestURI + " with " + argArray); + + Long count = _statMap.get(request.getPinkyRequest().requestURI); + long newCount = count == null ? 1 : count + 1; + _statMap.put(request.getPinkyRequest().requestURI, newCount); + + // check whether the operation needs to be logged + + } catch (InvocationTargetException exc) { + request.details.persistentOperation = false; + MessageUtils.marshallException(request, exc.getCause()); + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_ERROR, this, exc.getCause()); + brain.notifyRequestListener(request); + } catch (Exception exc) { + request.details.persistentOperation = false; + MessageUtils.marshallException(request, exc); + brain.notifyRequestListener(request); + Logging.logMessage(Logging.LEVEL_ERROR, this, exc); + } + } + + private void executeCommandSync(MRCRequest request, Object args) throws Exception { + + try { + request.syncPseudoRequest = true; + // convert the arguments to a corresponding object array + Object[] argArray = null; + if (args != null) + try { + List argList = (List) args; + argList.add(0, request); + argArray = argList.toArray(); + } catch (ClassCastException exc) { + argArray = new Object[] { request, args }; + } + + else + args = new Object[] { request }; + + // find the appropriate brain method + Method m = findMethod(request.getPinkyRequest().requestURI, argArray); + + Logging.logMessage(Logging.LEVEL_DEBUG, this, "executing " + + request.getPinkyRequest().requestURI + " with " + args); + + // invoke the brain method + if (args == null) + m.invoke(brain, request); + else + m.invoke(brain, argArray); + + } catch (InvocationTargetException exc) { + request.details.persistentOperation = false; + throw exc; + } catch (Exception exc) { + request.details.persistentOperation = false; + throw exc; + } + } + + private Method findMethod(String name, Object[] args) throws NoSuchMethodException { + + Method[] methods = brain.getClass().getMethods(); + Method m = null; + + for (Method method : methods) { + + if (method.getName().equals(name)) { + + Class[] paramTypes = method.getParameterTypes(); + if (args.length > 1 && args.length != paramTypes.length) + continue; + + boolean ok = true; + // TODO: check params + // for (int i = 0; i < paramTypes.length; i++) { + // + // if (argsArray[i] != null + // && !paramTypes[i].isInstance(argsArray[i])) { + // ok = false; + // } + // } + + if (ok) { + m = method; + break; + } + } + } + + if (m == null) { + + List list = new ArrayList(args.length); + for (Object arg : args) + if (!(arg instanceof MRCRequest)) + list.add(arg); + + String argList = null; + try { + argList = JSONParser.writeJSON(list); + } catch (JSONException exc) { + Logging.logMessage(Logging.LEVEL_ERROR, this, exc); + } + + throw new NoSuchMethodException("could not find appropriate method '" + name + + "' for arguments " + argList); + } + + return m; + } + + public void dumpDB(String dumpFilePath) throws Exception { + brain.dumpDB(dumpFilePath); + } + + public void restoreDBFromDump(String dumpFilePath) throws Exception { + brain.restoreDBFromDump(dumpFilePath); + } + + public int getQLength() { + return this.queue.size(); + } + +} diff --git a/servers/src/org/xtreemfs/mrc/brain/ErrNo.java b/servers/src/org/xtreemfs/mrc/brain/ErrNo.java new file mode 100644 index 0000000000000000000000000000000000000000..939c69ad07f0c4e5928352d0c51c9bc0d0a887f6 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/brain/ErrNo.java @@ -0,0 +1,360 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc.brain; + +/** + * POSIX error numbers. + * + * @author stender + */ +public final class ErrNo { + + public static final int EPERM = 1; /* Operation not permitted */ + + public static final int ENOENT = 2; /* No such file or directory */ + + public static final int ESRCH = 3;/* No such process */ + + public static final int EINTR = 4;/* Interrupted system call */ + + public static final int EIO = 5;/* I/O error */ + + public static final int ENXIO = 6; /* No such device or address */ + + public static final int E2BIG = 7; /* Argument list too long */ + + public static final int ENOEXEC = 8; /* Exec format error */ + + public static final int EBADF = 9; /* Bad file number */ + + public static final int ECHILD = 10; /* No child processes */ + + public static final int EAGAIN = 11; /* Try again */ + + public static final int ENOMEM = 12; /* Out of memory */ + + public static final int EACCES = 13; /* Permission denied */ + + public static final int EFAULT = 14; /* Bad address */ + + public static final int ENOTBLK = 15; /* Block device required */ + + public static final int EBUSY = 16; /* Device or resource busy */ + + public static final int EEXIST = 17; /* File exists */ + + public static final int EXDEV = 18; /* Cross-device link */ + + public static final int ENODEV = 19; /* No such device */ + + public static final int ENOTDIR = 20; /* Not a directory */ + + public static final int EISDIR = 21; /* Is a directory */ + + public static final int EINVAL = 22; /* Invalid argument */ + + public static final int ENFILE = 23; /* File table overflow */ + + public static final int EMFILE = 24; /* Too many open files */ + + public static final int ENOTTY = 25; /* Not a typewriter */ + + public static final int ETXTBSY = 26; /* Text file busy */ + + public static final int EFBIG = 27; /* File too large */ + + public static final int ENOSPC = 28; /* No space left on device */ + + public static final int ESPIPE = 29; /* Illegal seek */ + + public static final int EROFS = 30; /* Read-only file system */ + + public static final int EMLINK = 31; /* Too many links */ + + public static final int EPIPE = 32; /* Broken pipe */ + + public static final int EDOM = 33; /* + * Math argument out of domain of func + */ + + public static final int ERANGE = 34; /* Math result not representable */ + + public static final int EDEADLK = 35; /* Resource deadlock would occur */ + + public static final int ENAMETOOLONG = 36; /* File name too long */ + + public static final int ENOLCK = 37; /* No record locks available */ + + public static final int ENOSYS = 38; /* Function not implemented */ + + public static final int ENOTEMPTY = 39; /* Directory not empty */ + + public static final int ELOOP = 40; /* + * Too many symbolic links encountered + */ + + public static final int EWOULDBLOCK = EAGAIN; /* Operation would block */ + + public static final int ENOMSG = 42; /* No message of desired type */ + + public static final int EIDRM = 43; /* Identifier removed */ + + public static final int ECHRNG = 44; /* Channel number out of range */ + + public static final int EL2NSYNC = 45; /* Level 2 not synchronized */ + + public static final int EL3HLT = 46; /* Level 3 halted */ + + public static final int EL3RST = 47; /* Level 3 reset */ + + public static final int ELNRNG = 48; /* Link number out of range */ + + public static final int EUNATCH = 49; /* Protocol driver not attached */ + + public static final int ENOCSI = 50;/* No CSI structure available */ + + public static final int EL2HLT = 51; /* Level 2 halted */ + + public static final int EBADE = 52; /* Invalid exchange */ + + public static final int EBADR = 53; /* Invalid request descriptor */ + + public static final int EXFULL = 54; /* Exchange full */ + + public static final int ENOANO = 55; /* No anode */ + + public static final int EBADRQC = 56; /* Invalid request code */ + + public static final int EBADSLT = 57; /* Invalid slot */ + + public static final int EDEADLOCK = EDEADLK; + + public static final int EBFONT = 59; /* Bad font file format */ + + public static final int ENOSTR = 60; /* Device not a stream */ + + public static final int ENODATA = 61; /* No data available */ + + public static final int ETIME = 62; /* Timer expired */ + + public static final int ENOSR = 63; /* Out of streams resources */ + + public static final int ENONET = 64; /* Machine is not on the network */ + + public static final int ENOPKG = 65; /* Package not installed */ + + public static final int EREMOTE = 66; /* Object is remote */ + + public static final int ENOLINK = 67; /* Link has been severed */ + + public static final int EADV = 68; /* Advertise error */ + + public static final int ESRMNT = 69; /* Srmount error */ + + public static final int ECOMM = 70; /* Communication error on send */ + + public static final int EPROTO = 71; /* Protocol error */ + + public static final int EMULTIHOP = 72; /* Multihop attempted */ + + public static final int EDOTDOT = 73; /* RFS specific error */ + + public static final int EBADMSG = 74; /* Not a data message */ + + public static final int EOVERFLOW = 75; /* + * Value too large for defined data + * type + */ + + public static final int ENOTUNIQ = 76; /* Name not unique on network */ + + public static final int EBADFD = 77; /* File descriptor in bad state */ + + public static final int EREMCHG = 78; /* Remote address changed */ + + public static final int ELIBACC = 79; /* + * Can not access a needed shared + * library + */ + + public static final int ELIBBAD = 80; /* + * Accessing a corrupted shared + * library + */ + + public static final int ELIBSCN = 81; /* .lib section in a.out corrupted */ + + public static final int ELIBMAX = 82; /* + * Attempting to link in too many + * shared libraries + */ + + public static final int ELIBEXEC = 83; /* + * Cannot exec a shared library + * directly + */ + + public static final int EILSEQ = 84; /* Illegal byte sequence */ + + public static final int ERESTART = 85; /* + * Interrupted system call should be + * restarted + */ + + public static final int ESTRPIPE = 86; /* Streams pipe error */ + + public static final int EUSERS = 87; /* Too many users */ + + public static final int ENOTSOCK = 88; /* Socket operation on non-socket */ + + public static final int EDESTADDRREQ = 89; /* Destination address required */ + + public static final int EMSGSIZE = 90; /* Message too long */ + + public static final int EPROTOTYPE = 91; /* + * Protocol wrong type for + * socket + */ + + public static final int ENOPROTOOPT = 92; /* Protocol not available */ + + public static final int EPROTONOSUPPORT = 93; /* Protocol not supported */ + + public static final int ESOCKTNOSUPPORT = 94; /* Socket type not supported */ + + public static final int EOPNOTSUPP = 95; /* + * Operation not supported on + * transport endpoint + */ + + public static final int EPFNOSUPPORT = 96; /* Protocol family not supported */ + + public static final int EAFNOSUPPORT = 97; /* + * Address family not supported + * by protocol + */ + + public static final int EADDRINUSE = 98; /* Address already in use */ + + public static final int EADDRNOTAVAIL = 99; /* + * Cannot assign requested + * address + */ + + public static final int ENETDOWN = 100; /* Network is down */ + + public static final int ENETUNREACH = 101; /* Network is unreachable */ + + public static final int ENETRESET = 102; /* + * Network dropped connection + * because of reset + */ + + public static final int ECONNABORTED = 103; /* + * Software caused connection + * abort + */ + + public static final int ECONNRESET = 104; /* Connection reset by peer */ + + public static final int ENOBUFS = 105; /* No buffer space available */ + + public static final int EISCONN = 106; /* + * Transport endpoint is already + * connected + */ + + public static final int ENOTCONN = 107; /* + * Transport endpoint is not + * connected + */ + + public static final int ESHUTDOWN = 108; /* + * Cannot send after transport + * endpoint shutdown + */ + + public static final int ETOOMANYREFS = 109; /* + * Too many references: cannot + * splice + */ + + public static final int ETIMEDOUT = 110; /* Connection timed out */ + + public static final int ECONNREFUSED = 111; /* Connection refused */ + + public static final int EHOSTDOWN = 112; /* Host is down */ + + public static final int EHOSTUNREACH = 113;/* No route to host */ + + public static final int EALREADY = 114;/* Operation already in progress */ + + public static final int EINPROGRESS = 115;/* Operation now in progress */ + + public static final int ESTALE = 116; /* Stale NFS file handle */ + + public static final int EUCLEAN = 117; /* Structure needs cleaning */ + + public static final int ENOTNAM = 118; /* Not a XENIX named type file */ + + public static final int ENAVAIL = 119; /* No XENIX semaphores available */ + + public static final int EISNAM = 120; /* Is a named type file */ + + public static final int EREMOTEIO = 121; /* Remote I/O error */ + + public static final int EDQUOT = 122; /* Quota exceeded */ + + public static final int ENOMEDIUM = 123; /* No medium found */ + + public static final int EMEDIUMTYPE = 124; /* Wrong medium type */ + + public static final int ECANCELED = 125; /* Operation Canceled */ + + public static final int ENOKEY = 126; /* Required key not available */ + + public static final int EKEYEXPIRED = 127; /* Key has expired */ + + public static final int EKEYREVOKED = 128; /* Key has been revoked */ + + public static final int EKEYREJECTED = 129; /* Key was rejected by service */ + + /* for robust mutexes */ + public static final int EOWNERDEAD = 130; /* Owner died */ + + public static final int ENOTRECOVERABLE = 131; /* State not recoverable */ + + public static String getErrorMessage(int errorCode) { + + switch (errorCode) { + case EPERM : return "EPERM - Operation not permitted"; + case EBUSY : return "EBUSY - Device or resource busy"; + case ENOENT : return "ENOENT - No such file or directory"; + default : return "error code "+errorCode; + } + + } + +} diff --git a/servers/src/org/xtreemfs/mrc/brain/Path.java b/servers/src/org/xtreemfs/mrc/brain/Path.java new file mode 100644 index 0000000000000000000000000000000000000000..ccb99d6f545ee751bac75dd6932ad4c5b929729d --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/brain/Path.java @@ -0,0 +1,115 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc.brain; + +/** + * Parses a path separated by '/' into multiple sections. + * + * @author stender + * + */ +public class Path { + + private String volume; + + private String innerPart; + + private String lastPart; + + private String pathWithoutVolume; + + private String path; + + public Path(String volume, String innerPart, String lastPart) { + this.volume = volume; + this.innerPart = innerPart; + this.lastPart = lastPart; + + pathWithoutVolume = innerPart + "/" + lastPart; + path = volume + + "/" + + (innerPart.isEmpty() ? lastPart + : (innerPart + "/" + lastPart)); + } + + public Path(String path) { + + this.path = path; + + StringBuffer sb = new StringBuffer(); + for (int i = 0; i < path.length() && path.charAt(i) != '/'; i++) + sb.append(path.charAt(i)); + + volume = sb.toString(); + + sb = new StringBuffer(); + for (int i = path.length() - 1; i >= volume.length() + && path.charAt(i) != '/'; i--) + sb.append(path.charAt(i)); + + lastPart = sb.reverse().toString(); + + sb = new StringBuffer(); + for (int i = volume.length() + 1; i <= path.length() + - lastPart.length() - 1; i++) { + sb.append(path.charAt(i)); + } + + innerPart = sb.toString(); + + for (int i = path.length() - lastPart.length(); i < path.length(); i++) + sb.append(path.charAt(i)); + + pathWithoutVolume = sb.toString(); + } + + public String getVolumeName() { + return volume; + } + + public String getInnerPath() { + return innerPart; + } + + public String getPathWithoutVolume() { + return pathWithoutVolume; + } + + public String getLastPathComponent() { + return lastPart; + } + + public static void main(String[] args) { + Path path = new Path("myVolume/test/blub/bla.txt"); + System.out.println(path.getVolumeName() + "\n" + path.getInnerPath() + + "\n" + path.getLastPathComponent() + "\n" + + path.getPathWithoutVolume()); + } + + public String toString() { + return path; + } + +} diff --git a/servers/src/org/xtreemfs/mrc/brain/UserException.java b/servers/src/org/xtreemfs/mrc/brain/UserException.java new file mode 100644 index 0000000000000000000000000000000000000000..355a79f1830779c542f17eab7572d464af4b9949 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/brain/UserException.java @@ -0,0 +1,59 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc.brain; + +/** + * This exception is thrown if something + * + * @author bjko, stender + */ +public class UserException extends java.lang.Exception { + + private int errno = 0; + + /** + * Creates a new instance of XtreemFSException without detail + * message. + */ + public UserException() { + } + + public UserException(int errno) { + this.errno = errno; + } + + public UserException(String message) { + super(message); + } + + public UserException(int errno, String message) { + super(message + " (errno=" + errno + ")"); + this.errno = errno; + } + + public int getErrno() { + return this.errno; + } +} diff --git a/servers/src/org/xtreemfs/mrc/brain/VolumeChangeListener.java b/servers/src/org/xtreemfs/mrc/brain/VolumeChangeListener.java new file mode 100644 index 0000000000000000000000000000000000000000..61c912f6c107599b663052a7ff5aac4e3eff80d0 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/brain/VolumeChangeListener.java @@ -0,0 +1,39 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc.brain; + +import java.io.IOException; + +import org.xtreemfs.mrc.slices.VolumeInfo; + +public interface VolumeChangeListener { + + public static final int MOD_CHANGED = 1; + + public static final int MOD_DELETED = 2; + + public void volumeChanged(int mod, VolumeInfo vol) throws IOException; + +} diff --git a/servers/src/org/xtreemfs/mrc/brain/metadata/ACL.java b/servers/src/org/xtreemfs/mrc/brain/metadata/ACL.java new file mode 100644 index 0000000000000000000000000000000000000000..3af6e0d5bd955a2bd7e30dd3fa0b06688b1e3dea --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/brain/metadata/ACL.java @@ -0,0 +1,88 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.mrc.brain.metadata; + +import java.util.Iterator; + +/** + * Interface for accessing an Access Control List. Access Control Lists may be + * assigned to files and directories. + */ +public interface ACL extends Metadata { + + /** + * Represents an ACL entry. + */ + public static interface Entry { + + public String getEntity(); + + public int getRights(); + } + + /** + * Returns an iterator for all entries. + * + * @return an iterator + */ + public Iterator iterator(); + + /** + * Returns the number of entries stored in the ACL. + * + * @return the number of entries + */ + public int getEntryCount(); + + /** + * Returns the access rights associated with the entity. + * + * @param entity + * the entity + * @return the access rights associated with the entity, or + * null if the entity does not exist + */ + public Integer getRights(String entity); + + /** + * Modifies an entry. If the entity does not exist, a new entry will be + * created. + * + * @param entity + * the entity + * @param rights + * the access rights + */ + public void editEntry(String entity, int rights); + + /** + * Deletes an existing entry. Does nothing if the entity does not exist. + * + * @param entity + * the entity of the entry to delete + */ + public void deleteEntry(String entity); + +} \ No newline at end of file diff --git a/servers/src/org/xtreemfs/mrc/brain/metadata/BufferBackedACL.java b/servers/src/org/xtreemfs/mrc/brain/metadata/BufferBackedACL.java new file mode 100644 index 0000000000000000000000000000000000000000..9649b525f614e61a1e640c18657b37e6f54d19df --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/brain/metadata/BufferBackedACL.java @@ -0,0 +1,244 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.mrc.brain.metadata; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.Iterator; +import java.util.List; + +import org.xtreemfs.common.buffer.ASCIIString; +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; + +public class BufferBackedACL extends BufferBackedMetadata implements ACL { + + public static class Entry implements ACL.Entry { + + private String entity; + + private int rights; + + public Entry(String entity, int rights) { + this.entity = entity; + this.rights = rights; + } + + public String getEntity() { + return entity; + } + + public int getRights() { + return rights; + } + + public String toString() { + return "(" + entity + "=" + rights + ")"; + } + } + + public BufferBackedACL(ReusableBuffer buffer, boolean copy, boolean freeOnDestroy) { + super(buffer, copy, freeOnDestroy); + } + + public BufferBackedACL(String[] entities, int[] rights) { + + super(null, false, true); + + // create a sorted list of entries + List list = new ArrayList(entities.length); + for (int i = 0; i < entities.length; i++) + list.add(new Entry(entities[i], rights[i])); + Collections.sort(list, new Comparator() { + public int compare(Entry o1, Entry o2) { + return o1.entity.compareTo(o2.entity); + } + }); + + // determine required buffer size + int bufSize = 4; // number of entries stored in first 4 bytes + for (String entity : entities) + // length + 4 len bytes + 4 rights bytes + bufSize += entity.length() + 8; + + // allocate a new buffer + buffer = BufferPool.allocate(bufSize); + buffer.putInt(entities.length); + + // fill the buffer with the sorted list + for (Entry entry : list) { + buffer.putString(entry.entity); + buffer.putInt(entry.rights); + } + } + + public Iterator iterator() { + + return new Iterator() { + + private int count = 0; + + private int index = 4; + + public boolean hasNext() { + return count < getEntryCount(); + } + + public Entry next() { + + buffer.position(index); + Entry entry = new Entry(buffer.getString(), buffer.getInt()); + index = buffer.position(); + count++; + + return entry; + } + + public void remove() { + throw new UnsupportedOperationException("remove not implemented"); + } + }; + } + + public int getEntryCount() { + buffer.position(0); + return buffer.getInt(); + } + + public Integer getRights(String entity) { + + int index = getIndexPosition(entity); + if (index == -1) + return null; + + buffer.position(index); + buffer.position(index + buffer.getInt() + 4); + return buffer.getInt(); + } + + public void editEntry(String entity, int rights) { + + // first, find the position and check whether an insert operation is + // necessary + int index = 4; + boolean insert = false; + for (;;) { + assert (index <= buffer.limit()); + + if (index == buffer.limit()) { + insert = true; + break; + } + + buffer.position(index); + ASCIIString ent = buffer.getBufferBackedASCIIString(); + + if (ent.toString().compareTo(entity) < 0) + index += getEntrySize(index); + else if (ent.toString().compareTo(entity) == 0) { + insert = false; + break; + } else { + insert = true; + break; + } + } + + // if no insert operation is necessary, simply replace the rights string + if (!insert) + buffer.putInt(rights); + + // otherwise, create a buffer containing the new entry and insert it + else { + // determine the size for the entry buffer + final int size = entity.length() + 8; + + // create and fill the buffer + ReusableBuffer tmp = BufferPool.allocate(size); + tmp.putString(entity); + tmp.putInt(rights); + + // insert the buffer + insert(index, tmp); + + // update the entry count + int entryCount = getEntryCount(); + buffer.position(0); + buffer.putInt(entryCount + 1); + } + + } + + public void deleteEntry(String entity) { + + int index = getIndexPosition(entity); + if (index == -1) + return; + + // determine the entry size + buffer.position(index); + final int count = buffer.getInt() + 8; + + // delete the entry + delete(index, count); + + // update the entry count + int entryCount = getEntryCount(); + buffer.position(0); + buffer.putInt(entryCount - 1); + } + + private int getEntrySize(int index) { + + buffer.position(index); + int len = buffer.getInt(); + assert (len > 0); + + return len + 8; + } + + private int getIndexPosition(String entity) { + + int index = 4; + for (;;) { + assert (index <= buffer.limit()); + + if (index == buffer.limit()) + return -1; + + buffer.position(index); + ASCIIString ent = buffer.getBufferBackedASCIIString(); + + if (ent.toString().compareTo(entity) < 0) + index += getEntrySize(index); + else if (ent.toString().compareTo(entity) == 0) + return index; + else + return -1; + } + } + +} diff --git a/servers/src/org/xtreemfs/mrc/brain/metadata/BufferBackedDirObject.java b/servers/src/org/xtreemfs/mrc/brain/metadata/BufferBackedDirObject.java new file mode 100644 index 0000000000000000000000000000000000000000..66065fc0e02c949a2a376def6975426f2a5d7d84 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/brain/metadata/BufferBackedDirObject.java @@ -0,0 +1,126 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.mrc.brain.metadata; + +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; + +public class BufferBackedDirObject extends BufferBackedFSObject implements FSObject { + + private static final int DYNAMIC_PART_INDEX = 20; + + public BufferBackedDirObject(ReusableBuffer buffer, boolean copy, boolean freeOnDestroy) { + super(buffer, copy, freeOnDestroy); + } + + public BufferBackedDirObject(long id, int atime, int ctime, int mtime, String ownerID, + String owningGroupId, BufferBackedACL acl, BufferBackedStripingPolicy defaultSP, + String linkTarget, BufferBackedXAttrs xattrs) { + + super(null, false, true); + + // allocate a new buffer from the pool + buffer = BufferPool.allocate(DYNAMIC_PART_INDEX + ownerID.getBytes().length + 4 + + owningGroupId.getBytes().length + 4 + + (linkTarget == null ? 4 : linkTarget.getBytes().length + 4) + + (acl == null ? 4 : acl.size() + 4) + (defaultSP == null ? 4 : defaultSP.size() + 4) + + (xattrs == null ? 4 : xattrs.size()) + 4); + + // fill the buffer with the given data + buffer.position(0); + buffer.putLong(id); + buffer.putInt(atime); + buffer.putInt(ctime); + buffer.putInt(mtime); + buffer.putString(ownerID); + buffer.putString(owningGroupId); + + if (acl != null) { + buffer.putInt(acl.size()); + acl.getBuffer().position(0); + buffer.put(acl.getBuffer()); + } else { + buffer.putInt(0); + } + + if (defaultSP != null) { + buffer.putInt(defaultSP.size()); + defaultSP.getBuffer().position(0); + buffer.put(defaultSP.getBuffer()); + } else { + buffer.putInt(0); + } + + buffer.putString(linkTarget); + + if (xattrs != null) { + buffer.putInt(xattrs.size()); + xattrs.getBuffer().position(0); + buffer.put(xattrs.getBuffer()); + } else { + buffer.putInt(0); + } + } + + protected int getFixedBufferIndex(int fixedAttrIndex) { + + switch (fixedAttrIndex) { + case ID: + return 0; + case ATIME: + return 8; + case CTIME: + return 12; + case MTIME: + return 16; + default: + throw new IllegalArgumentException("invalid index: " + fixedAttrIndex); + } + + } + + protected int getDynamicBufferStartIndex() { + return DYNAMIC_PART_INDEX; + } + + protected int getDynamicIndex(int attr) { + switch (attr) { + case OWNER: + return 0; + case GROUP: + return 1; + case ACL: + return 2; + case SP: + return 3; + case LINKTRG: + return 4; + case XATTRS: + return 5; + default: + throw new IllegalArgumentException("invalid attribute: " + attr); + } + } +} diff --git a/servers/src/org/xtreemfs/mrc/brain/metadata/BufferBackedFSObject.java b/servers/src/org/xtreemfs/mrc/brain/metadata/BufferBackedFSObject.java new file mode 100644 index 0000000000000000000000000000000000000000..f9c0aabb0f5748c41d65960cc202a8f382ce4310 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/brain/metadata/BufferBackedFSObject.java @@ -0,0 +1,369 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.mrc.brain.metadata; + +import org.xtreemfs.common.buffer.ASCIIString; +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; + +public abstract class BufferBackedFSObject extends BufferBackedMetadata { + + protected static final int ID = 0; + + protected static final int ATIME = 1; + + protected static final int CTIME = 2; + + protected static final int MTIME = 3; + + protected static final int SIZE = 4; + + protected static final int LINKCOUNT = 5; + + protected static final int EPOCH = 6; + + protected static final int ISSEPOCH = 7; + + protected static final int READONLY = 8; + + protected static final int OWNER = 9; + + protected static final int GROUP = 10; + + protected static final int ACL = 11; + + protected static final int XLOC = 12; + + protected static final int SP = 13; + + protected static final int LINKTRG = 14; + + protected static final int XATTRS = 15; + + private ASCIIString ownerIDString; + + private ASCIIString owningGroupIDString; + + private ASCIIString linkTargetString; + + private BufferBackedACL acl; + + private BufferBackedXAttrs xattrs; + + private BufferBackedStripingPolicy sp; + + protected BufferBackedFSObject(ReusableBuffer buffer, boolean copy, boolean freeOnDestroy) { + super(buffer, copy, freeOnDestroy); + } + + /** + * Returns all internally allocated buffers. + */ + public void destroy() { + + if (sp != null) + sp.destroy(); + if (xattrs != null) + xattrs.destroy(); + if (acl != null) + acl.destroy(); + + super.destroy(); + } + + public long getId() { + buffer.position(getFixedBufferIndex(ID)); + return buffer.getLong(); + } + + public void setId(long id) { + buffer.position(getFixedBufferIndex(ID)); + buffer.putLong(id); + } + + public int getAtime() { + buffer.position(getFixedBufferIndex(ATIME)); + return buffer.getInt(); + } + + public void setAtime(int atime) { + buffer.position(getFixedBufferIndex(ATIME)); + buffer.putInt(atime); + } + + public int getCtime() { + buffer.position(getFixedBufferIndex(CTIME)); + return buffer.getInt(); + } + + public void setCtime(int ctime) { + buffer.position(getFixedBufferIndex(CTIME)); + buffer.putInt(ctime); + } + + public int getMtime() { + buffer.position(getFixedBufferIndex(MTIME)); + return buffer.getInt(); + } + + public void setMtime(int mtime) { + buffer.position(getFixedBufferIndex(MTIME)); + buffer.putInt(mtime); + } + + public ASCIIString getOwnerId() { + + if (ownerIDString == null) + ownerIDString = getAttrString(getDynamicIndex(OWNER)); + + return ownerIDString; + } + + public void setOwnerId(String ownerId) { + + assert (ownerId != null); + + int offset = getDynamicBufferIndex(getDynamicIndex(OWNER)); + + ASCIIString currentOwner = getOwnerId(); + ReusableBuffer buf = BufferPool.allocate(ownerId.length() + 4); + buf.putString(ownerId); + + delete(offset, currentOwner.toString().length() + 4); + insert(offset, buf); + + ownerIDString = null; + BufferPool.free(buf); + } + + public ASCIIString getOwningGroupId() { + + if (owningGroupIDString == null) + owningGroupIDString = getAttrString(getDynamicIndex(GROUP)); + + return owningGroupIDString; + } + + public void setOwningGroupId(String groupId) { + + assert (groupId != null); + + int offset = getDynamicBufferIndex(getDynamicIndex(GROUP)); + + ASCIIString currentGroup = getOwningGroupId(); + ReusableBuffer buf = BufferPool.allocate(groupId.length() + 4); + buf.putString(groupId); + + delete(offset, currentGroup.toString().length() + 4); + insert(offset, buf); + + owningGroupIDString = null; + BufferPool.free(buf); + } + + public ASCIIString getLinkTarget() { + + if (linkTargetString == null) + linkTargetString = getAttrString(getDynamicIndex(LINKTRG)); + + return linkTargetString; + } + + public void setLinkTarget(String linkTarget) { + + int offset = getDynamicBufferIndex(getDynamicIndex(LINKTRG)); + + ASCIIString currentLinkTarget = getLinkTarget(); + ReusableBuffer buf = BufferPool + .allocate((linkTarget == null ? 0 : linkTarget.length()) + 4); + buf.putString(linkTarget); + + delete(offset, (currentLinkTarget == null ? 0 : currentLinkTarget.toString().length()) + 4); + insert(offset, buf); + + linkTargetString = null; + BufferPool.free(buf); + } + + public BufferBackedACL getAcl() { + + if (acl == null) { + ReusableBuffer buf = getAttrBuffer(getDynamicIndex(ACL)); + if (buf != null) + acl = new BufferBackedACL(buf, false, true); + } + + return acl; + } + + public void setACL(ACL acl) { + + assert (acl == null || acl instanceof BufferBackedACL); + + BufferBackedACL newACL = (BufferBackedACL) acl; + BufferBackedACL currentACL = getAcl(); + int offset = getDynamicBufferIndex(getDynamicIndex(ACL)); + + // remove the current entity + delete(offset + 4, currentACL == null ? 0 : currentACL.size()); + this.acl.destroy(); + this.acl = null; + + // update the entity size + buffer.position(offset); + buffer.putInt(newACL.size()); + + // insert the new entity + insert(offset + 4, newACL.getBuffer()); + } + + public BufferBackedStripingPolicy getStripingPolicy() { + + if (sp == null) { + ReusableBuffer buf = getAttrBuffer(getDynamicIndex(SP)); + if (buf != null) + sp = new BufferBackedStripingPolicy(buf, false, true); + } + + return sp; + } + + public void setStripingPolicy(StripingPolicy sp) { + + assert (sp == null || sp instanceof BufferBackedStripingPolicy); + + BufferBackedStripingPolicy newSP = (BufferBackedStripingPolicy) sp; + BufferBackedStripingPolicy currentSP = getStripingPolicy(); + int offset = getDynamicBufferIndex(getDynamicIndex(SP)); + + // remove the current entity + delete(offset + 4, currentSP == null ? 0 : currentSP.size()); + this.sp.destroy(); + this.sp = null; + + // update the entity size + buffer.position(offset); + buffer.putInt(newSP.size()); + + // insert the new entity + insert(offset + 4, newSP.getBuffer()); + } + + public BufferBackedXAttrs getXAttrs() { + + if (xattrs == null) { + ReusableBuffer buf = getAttrBuffer(getDynamicIndex(XATTRS)); + if (buf != null) + xattrs = new BufferBackedXAttrs(buf, false, true); + } + + return xattrs; + } + + public void setXAttrs(XAttrs xattrs) { + + assert (xattrs == null || xattrs instanceof BufferBackedXAttrs); + + BufferBackedXAttrs newXAttrs = (BufferBackedXAttrs) xattrs; + BufferBackedXAttrs currentXAttrs = getXAttrs(); + int offset = getDynamicBufferIndex(getDynamicIndex(XATTRS)); + + // remove the current entity + delete(offset + 4, currentXAttrs == null ? 0 : currentXAttrs.size()); + this.xattrs.destroy(); + this.xattrs = null; + + // update the entity size + buffer.position(offset); + buffer.putInt(newXAttrs.size()); + + // insert the new entity + insert(offset + 4, newXAttrs.getBuffer()); + } + + protected ASCIIString getAttrString(int attrIndex) { + + // find the correct index position in the buffer + int index = getDynamicBufferIndex(attrIndex); + buffer.position(index); + + // total length = string length + # length bytes + int len = buffer.getInt() + 4; + + // if string length == -1, return null string + if (len == 3) + return null; + + // create the string from a view buffer + ReusableBuffer buf = buffer.createViewBuffer(); + buf.range(index, len); + buf.position(0); + ASCIIString string = buf.getBufferBackedASCIIString(); + BufferPool.free(buf); + + return string; + } + + protected ReusableBuffer getAttrBuffer(int attrIndex) { + + // find the correct index position in the buffer + int index = getDynamicBufferIndex(attrIndex); + buffer.position(index); + + int len = buffer.getInt(); + assert (len >= 0); + + if (len == 0) + return null; + + // create the target object from a view buffer (skip the len bytes) + ReusableBuffer buf = buffer.createViewBuffer(); + buf.range(index + 4, len); + buf.position(0); + + return buf; + } + + protected int getDynamicBufferIndex(int dynAttrIndex) { + + int index = getDynamicBufferStartIndex(); + for (int i = 0; i < dynAttrIndex; i++) { + buffer.position(index); + int len = buffer.getInt(); + assert (len >= -1); + + index += 4 + (len > -1 ? len : 0); + } + + return index; + } + + protected abstract int getFixedBufferIndex(int fixedAttrIndex); + + protected abstract int getDynamicBufferStartIndex(); + + protected abstract int getDynamicIndex(int attr); + +} diff --git a/servers/src/org/xtreemfs/mrc/brain/metadata/BufferBackedFileObject.java b/servers/src/org/xtreemfs/mrc/brain/metadata/BufferBackedFileObject.java new file mode 100644 index 0000000000000000000000000000000000000000..f97729f485f94bab76da98d983784a53940b9e79 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/brain/metadata/BufferBackedFileObject.java @@ -0,0 +1,248 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.mrc.brain.metadata; + +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; + +public class BufferBackedFileObject extends BufferBackedFSObject implements FileObject { + + private static final int DYNAMIC_PART_INDEX = 39; + + private BufferBackedXLocList xLocList; + + public BufferBackedFileObject(ReusableBuffer buffer, boolean copy, boolean freeOnDestroy) { + super(buffer, copy, freeOnDestroy); + } + + public BufferBackedFileObject(long id, int atime, int ctime, int mtime, long size, + short linkcount, int epoch, int issuedEpoch, boolean readonly, String ownerID, + String owningGroupId, BufferBackedACL acl, BufferBackedXLocList xlocList, + BufferBackedStripingPolicy sp, String linkTarget, BufferBackedXAttrs xattrs) { + + super(null, false, true); + + // allocate a new buffer from the pool + buffer = BufferPool.allocate(DYNAMIC_PART_INDEX + ownerID.getBytes().length + 4 + + owningGroupId.getBytes().length + 4 + (xlocList == null ? 4 : xlocList.size() + 4) + + (linkTarget == null ? 4 : linkTarget.getBytes().length + 4) + + (acl == null ? 4 : acl.size() + 4) + (sp == null ? 4 : sp.size() + 4) + + (xattrs == null ? 4 : xattrs.size()) + 4); + + // fill the buffer with the given data + buffer.position(0); + buffer.putLong(id); + buffer.putInt(atime); + buffer.putInt(ctime); + buffer.putInt(mtime); + buffer.putLong(size); + buffer.putShort(linkcount); + buffer.putInt(epoch); + buffer.putInt(issuedEpoch); + buffer.putBoolean(readonly); + buffer.putString(ownerID); + buffer.putString(owningGroupId); + + if (acl != null) { + buffer.putInt(acl.size()); + acl.getBuffer().position(0); + buffer.put(acl.getBuffer()); + } else { + buffer.putInt(0); + } + + if (xlocList != null) { + buffer.putInt(xlocList.size()); + xlocList.getBuffer().position(0); + buffer.put(xlocList.getBuffer()); + } else { + buffer.putInt(0); + } + + if (sp != null) { + buffer.putInt(sp.size()); + sp.getBuffer().position(0); + buffer.put(sp.getBuffer()); + } else { + buffer.putInt(0); + } + + buffer.putString(linkTarget); + + if (xattrs != null) { + buffer.putInt(xattrs.size()); + xattrs.getBuffer().position(0); + buffer.put(xattrs.getBuffer()); + } else { + buffer.putInt(0); + } + } + + /** + * Returns all internally allocated buffers. + */ + public void destroy() { + + if (xLocList != null) + xLocList.destroy(); + + super.destroy(); + } + + public long getSize() { + buffer.position(getFixedBufferIndex(SIZE)); + return buffer.getLong(); + } + + public void setSize(long size) { + buffer.position(getFixedBufferIndex(SIZE)); + buffer.putLong(size); + } + + public short getLinkCount() { + buffer.position(getFixedBufferIndex(LINKCOUNT)); + return buffer.getShort(); + } + + public void setLinkCount(short count) { + buffer.position(getFixedBufferIndex(LINKCOUNT)); + buffer.putShort(count); + } + + public int getEpoch() { + buffer.position(getFixedBufferIndex(EPOCH)); + return buffer.getInt(); + } + + public void setEpoch(int epoch) { + buffer.position(getFixedBufferIndex(EPOCH)); + buffer.putInt(epoch); + } + + public int getIssuedEpoch() { + buffer.position(getFixedBufferIndex(ISSEPOCH)); + return buffer.getInt(); + } + + public void setIssuedEpoch(int epoch) { + buffer.position(getFixedBufferIndex(ISSEPOCH)); + buffer.putInt(epoch); + } + + public boolean isReadOnly() { + buffer.position(getFixedBufferIndex(READONLY)); + return buffer.getBoolean(); + } + + public void setReadOnly(boolean readOnly) { + buffer.position(getFixedBufferIndex(READONLY)); + buffer.putBoolean(readOnly); + } + + public BufferBackedXLocList getXLocList() { + + if (xLocList == null) { + ReusableBuffer buf = getAttrBuffer(getDynamicIndex(XLOC)); + if (buf != null) + xLocList = new BufferBackedXLocList(buf, false, true); + } + + return xLocList; + } + + public void setXLocList(XLocList xlocList) { + + assert (xlocList instanceof BufferBackedXLocList); + + BufferBackedXLocList newXLoc = (BufferBackedXLocList) xlocList; + BufferBackedXLocList currentXLoc = getXLocList(); + int offset = getDynamicBufferIndex(getDynamicIndex(XLOC)); + + // remove the current entity + delete(offset + 4, currentXLoc == null ? 0 : currentXLoc.size()); + this.xLocList.destroy(); + this.xLocList = null; + + // update the entity size + buffer.position(offset); + buffer.putInt(newXLoc.size()); + + // insert the new entity + insert(offset + 4, newXLoc.getBuffer()); + } + + protected int getFixedBufferIndex(int fixedAttrIndex) { + + switch (fixedAttrIndex) { + case ID: + return 0; + case ATIME: + return 8; + case CTIME: + return 12; + case MTIME: + return 16; + case SIZE: + return 20; + case LINKCOUNT: + return 28; + case EPOCH: + return 30; + case ISSEPOCH: + return 34; + case READONLY: + return 38; + default: + throw new IllegalArgumentException("invalid index: " + fixedAttrIndex); + } + + } + + protected int getDynamicBufferStartIndex() { + return DYNAMIC_PART_INDEX; + } + + protected int getDynamicIndex(int attr) { + switch (attr) { + case OWNER: + return 0; + case GROUP: + return 1; + case ACL: + return 2; + case XLOC: + return 3; + case SP: + return 4; + case LINKTRG: + return 5; + case XATTRS: + return 6; + default: + throw new IllegalArgumentException("invalid attribute: " + attr); + } + } + +} diff --git a/servers/src/org/xtreemfs/mrc/brain/metadata/BufferBackedMetadata.java b/servers/src/org/xtreemfs/mrc/brain/metadata/BufferBackedMetadata.java new file mode 100644 index 0000000000000000000000000000000000000000..5ce67217eba4c438789832874ca88b6dd83e4f39 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/brain/metadata/BufferBackedMetadata.java @@ -0,0 +1,190 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.mrc.brain.metadata; + +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.util.OutputUtils; + +public abstract class BufferBackedMetadata { + + protected ReusableBuffer buffer; + + protected boolean freeOnDestroy; + + /** + * Creates a new metadata object backed by the given buffer. + * + * @param buffer + * the backing buffer + * @param copy + * specifies whether a copy of the backing buffer will be created + * and used + * @param freeOnDestroy + * specifies whether the backing buffer will be freed when + * destroy() is invoked + */ + protected BufferBackedMetadata(ReusableBuffer buffer, boolean copy, boolean freeOnDestroy) { + + this.freeOnDestroy = freeOnDestroy; + + if (copy) { + buffer.position(0); + this.buffer = BufferPool.allocate(buffer.limit()); + this.buffer.put(buffer); + } else + this.buffer = buffer; + } + + /** + * Returns the backing buffer. + * + * @return the backing buffer + */ + public ReusableBuffer getBuffer() { + return buffer; + } + + /** + * Destroys the metadata object. Depending on whether + * freeOnDestroy was true when creating the metadata object, + * the backing buffer will also be freed.
On destroy(), + * derived classes should additionally free any buffer that has been created + * internally. + * + */ + public void destroy() { + if (freeOnDestroy && buffer != null) + BufferPool.free(buffer); + } + + /** + * Returns the size of the backing buffer. + * + * @return the size of the backing buffer + */ + public int size() { + return buffer.capacity(); + } + + /** + * Inserts a buffer in the backing buffer at a given object. + * + * @param offset + * the offset + * @param buf + * the buffer to insert + */ + protected void insert(int offset, ReusableBuffer buf) { + + buf.position(0); + buffer.position(0); + + // if the existing buffer has enough capacity, use it ... + if (buffer.capacity() >= buffer.limit() + buf.limit()) { + + // create a view buffer encapsulating the trailing part of the + // backing buffer + ReusableBuffer tmp = buffer.createViewBuffer(); + tmp.range(offset, buffer.limit() - offset); + tmp.position(0); + + // shift the limit + buffer.limit(buffer.limit() + buf.limit()); + + // copy the view buffer to the new end of the backing buffer + buffer.position(offset + buf.limit()); + buffer.put(tmp); + BufferPool.free(tmp); + + // insert the argument buffer at the offset + buffer.position(offset); + buffer.put(buf); + } + + // otherwise, allocate and fill new sufficiently sized buffer + else { + + // allocate a new sufficiently-sized buffer + ReusableBuffer newBuf = BufferPool.allocate(buffer.limit() + buf.limit()); + + // create a view buffer from 0 to offset + ReusableBuffer tmp = buffer.createViewBuffer(); + tmp.range(0, offset); + tmp.position(0); + + // copy the leading buffer to the new buffer + newBuf.put(tmp); + BufferPool.free(tmp); + + // insert the argument buffer + newBuf.put(buf); + + // create view buffer from offset to limit + tmp = buffer.createViewBuffer(); + tmp.range(offset, buffer.limit() - offset); + tmp.position(0); + + // insert the trailing buffer + newBuf.put(tmp); + BufferPool.free(tmp); + + // replace the backing buffer w/ the new buffer, and relinquish the + // old backing buffer's resources + BufferPool.free(buffer); + buffer = newBuf; + } + } + + /** + * Deletes the given amount of bytes at the given offset in the backing + * buffer. + * + * @param offset + * the offset at which to delete the bytes + * @param count + * the amount of bytes to delete + */ + protected void delete(int offset, int count) { + + ReusableBuffer tmp = buffer.createViewBuffer(); + tmp.range(offset + count, buffer.limit() - (offset + count)); + tmp.position(0); + + buffer.position(offset); + buffer.put(tmp); + buffer.flip(); + + BufferPool.free(tmp); + } + + /** + * Generates a formatted hex string from the backing buffer. + */ + public String toString() { + return OutputUtils.byteArrayToFormattedHexString(buffer.array()); + } + +} diff --git a/servers/src/org/xtreemfs/mrc/brain/metadata/BufferBackedStripingPolicy.java b/servers/src/org/xtreemfs/mrc/brain/metadata/BufferBackedStripingPolicy.java new file mode 100644 index 0000000000000000000000000000000000000000..5ae26e9dca337bafc4cf48b528bb2ad11c7d84a4 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/brain/metadata/BufferBackedStripingPolicy.java @@ -0,0 +1,108 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.mrc.brain.metadata; + +import org.xtreemfs.common.buffer.ASCIIString; +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; + +public class BufferBackedStripingPolicy extends BufferBackedMetadata implements StripingPolicy { + + private static final int SIZE_INDEX = 0; + + private static final int WIDTH_INDEX = 4; + + private static final int DYNAMIC_PART_INDEX = 8; + + private ASCIIString pattern; + + public BufferBackedStripingPolicy(ReusableBuffer buffer, boolean copy, boolean freeOnDestroy) { + super(buffer, copy, freeOnDestroy); + } + + public BufferBackedStripingPolicy(String pattern, int stripeSize, int width) { + + super(null, false, true); + + buffer = BufferPool.allocate(pattern.getBytes().length + 12); + buffer.putInt(stripeSize); + buffer.putInt(width); + buffer.putString(pattern); + } + + public ASCIIString getPattern() { + + if (pattern == null) { + + buffer.position(DYNAMIC_PART_INDEX); + int len = buffer.getInt() + 4; + + assert (len > 3); + ReusableBuffer tmpBuf = buffer.createViewBuffer(); + tmpBuf.range(DYNAMIC_PART_INDEX, len); + tmpBuf.position(0); + pattern = tmpBuf.getBufferBackedASCIIString(); + BufferPool.free(tmpBuf); + } + + return pattern; + } + + public void setPattern(String pattern) { + + buffer.position(DYNAMIC_PART_INDEX); + int strLen = buffer.getInt(); + delete(DYNAMIC_PART_INDEX, strLen + 4); + + ReusableBuffer newBuf = BufferPool.allocate(pattern.length() + 4); + newBuf.putString(pattern); + + insert(DYNAMIC_PART_INDEX, newBuf); + BufferPool.free(newBuf); + + this.pattern = null; + } + + public int getStripeSize() { + buffer.position(SIZE_INDEX); + return buffer.getInt(); + } + + public void setStripeSize(int stripeSize) { + buffer.position(SIZE_INDEX); + buffer.putInt(stripeSize); + } + + public int getWidth() { + buffer.position(WIDTH_INDEX); + return buffer.getInt(); + } + + public void setWidth(int width) { + buffer.position(WIDTH_INDEX); + buffer.putInt(width); + } + +} diff --git a/servers/src/org/xtreemfs/mrc/brain/metadata/BufferBackedXAttrs.java b/servers/src/org/xtreemfs/mrc/brain/metadata/BufferBackedXAttrs.java new file mode 100644 index 0000000000000000000000000000000000000000..17ba18d3f06358f7e5b233f69885fa0edd74a81b --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/brain/metadata/BufferBackedXAttrs.java @@ -0,0 +1,247 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.mrc.brain.metadata; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.Iterator; +import java.util.List; + +import org.xtreemfs.common.buffer.ASCIIString; +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; + +public class BufferBackedXAttrs extends BufferBackedMetadata implements XAttrs { + + public static class Entry implements XAttrs.Entry { + + private String key; + + private String value; + + private String uid; + + public Entry(String key, String uid, String value) { + this.key = key; + this.value = value; + this.uid = uid; + } + + public String getKey() { + return key; + } + + public String getUID() { + return uid; + } + + public String getValue() { + return value; + } + + public String toString() { + return "(" + key + ", " + value + ", " + uid + ")"; + } + + } + + public BufferBackedXAttrs(ReusableBuffer buffer, boolean copy, boolean freeOnDestroy) { + super(buffer, copy, freeOnDestroy); + } + + public BufferBackedXAttrs(String[] keys, String[] values, String[] userIDs) { + + super(null, false, true); + + assert (keys.length == values.length && keys.length == userIDs.length); + + // create a sorted list of entries + List list = new ArrayList(keys.length); + for (int i = 0; i < keys.length; i++) + list.add(new Entry(keys[i], userIDs[i], values[i])); + Collections.sort(list, new Comparator() { + public int compare(Entry o1, Entry o2) { + int tmp = o1.key.compareTo(o2.key); + return tmp == 0 ? o1.uid.compareTo(o2.uid) : tmp; + } + }); + + // determine required buffer size + int bufSize = 4; // number of entries stored in first 4 bytes + for (int i = 0; i < keys.length; i++) + // length + 4 len bytes for each part + bufSize += keys[i].length() + userIDs[i].length() + values[i].length() + 12; + + // allocate a new buffer + buffer = BufferPool.allocate(bufSize); + buffer.putInt(keys.length); + + // fill the buffer with the sorted list + for (Entry entry : list) { + buffer.putString(entry.key); + buffer.putString(entry.uid); + buffer.putString(entry.value); + } + } + + public int getEntryCount() { + buffer.position(0); + return buffer.getInt(); + } + + public Iterator iterator() { + + return new Iterator() { + + private int count = 0; + + private int index = 4; + + public boolean hasNext() { + return count < getEntryCount(); + } + + public Entry next() { + + buffer.position(index); + Entry entry = new Entry(buffer.getString(), buffer.getString(), buffer.getString()); + index = buffer.position(); + count++; + + return entry; + } + + public void remove() { + throw new UnsupportedOperationException("remove not implemented"); + } + }; + } + + public ASCIIString getValue(String key, String uid) { + + int index = getIndexPosition(key, uid, false); + if (index == -1) + return null; + + // skip key and uid + for (int i = 0; i < 2; i++) { + buffer.position(index); + index = index + buffer.getInt() + 4; + } + + buffer.position(index); + return buffer.getBufferBackedASCIIString(); + } + + public void deleteEntry(String key, String uid) { + + int index = getIndexPosition(key, uid, false); + if (index == -1) + return; + + // determine the entry size + buffer.position(index); + final int count = getEntrySize(index); + + // delete the entry + delete(index, count); + + // update the entry count + int entryCount = getEntryCount(); + buffer.position(0); + buffer.putInt(entryCount - 1); + + } + + public void editEntry(String key, String value, String uid) { + + // first, delete the former entry if necessary + deleteEntry(key, uid); + + // create a buffer containing the new entry and insert it + + // determine the size for the entry buffer + final int size = key.length() + value.length() + uid.length() + 12; + + // create and fill the buffer + ReusableBuffer tmp = BufferPool.allocate(size); + tmp.putString(key); + tmp.putString(uid); + tmp.putString(value); + + // insert the buffer + final int index = getIndexPosition(key, uid, true); + insert(index, tmp); + + // update the entry count + int entryCount = getEntryCount(); + buffer.position(0); + buffer.putInt(entryCount + 1); + } + + private int getEntrySize(int index) { + + int size = 0; + + for (int i = 0; i < 3; i++) { + buffer.position(index + size); + int len = buffer.getInt(); + assert (len >= 0); + size += len + 4; + } + + return size; + } + + private int getIndexPosition(String key, String uid, boolean insert) { + + int index = 4; + for (;;) { + + assert (index <= buffer.limit()); + + if (index == buffer.limit()) + return insert ? index : -1; + + buffer.position(index); + + String k = buffer.getBufferBackedASCIIString().toString(); + int cmp = k.compareTo(key); + if (cmp == 0) { + String u = buffer.getBufferBackedASCIIString().toString(); + cmp = u.compareTo(uid); + } + + if (cmp < 0) + index += getEntrySize(index); + else if (cmp == 0) + return index; + else + return insert ? index : -1; + } + } + +} diff --git a/servers/src/org/xtreemfs/mrc/brain/metadata/BufferBackedXLoc.java b/servers/src/org/xtreemfs/mrc/brain/metadata/BufferBackedXLoc.java new file mode 100644 index 0000000000000000000000000000000000000000..5b93b2bca401413a1353caf44d031621afbf99fe --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/brain/metadata/BufferBackedXLoc.java @@ -0,0 +1,175 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.mrc.brain.metadata; + +import org.xtreemfs.common.buffer.ASCIIString; +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; + +public class BufferBackedXLoc extends BufferBackedMetadata implements XLoc { + + private static final int SP_DYN_INDEX = 0; + + private static final int OSDS_DYN_INDEX = 1; + + private ASCIIString[] osdCache; + + private StripingPolicy stripingPolicy; + + public BufferBackedXLoc(ReusableBuffer buffer, boolean copy, boolean freeOnDestroy) { + + super(buffer, copy, freeOnDestroy); + + buffer.position(getBufferIndex(OSDS_DYN_INDEX)); + osdCache = new ASCIIString[buffer.getShort()]; + } + + public BufferBackedXLoc(BufferBackedStripingPolicy stripingPolicy, String[] osds) { + + super(null, false, true); + + assert (osds.length <= Short.MAX_VALUE); + + // determine required buffer size + osdCache = new ASCIIString[osds.length]; + + int osdListSize = 2; // 2 bytes for # OSDs + for (String osd : osds) + // size + 4 length bytes + osdListSize += osd.getBytes().length + 4; + + int spolSize = stripingPolicy.size(); + assert (spolSize <= Short.MAX_VALUE); + + // + 2 len bytes for sp + int bufSize = 2 + spolSize + osdListSize; + + // allocate a new buffer and fill it with the given data + buffer = BufferPool.allocate(bufSize); + buffer.putShort((short) spolSize); + stripingPolicy.getBuffer().position(0); + buffer.put(stripingPolicy.getBuffer()); + + buffer.putShort((short) osds.length); + for (String osd : osds) + buffer.putString(osd); + + } + + public void destroy() { + + if (stripingPolicy != null) + stripingPolicy.destroy(); + + super.destroy(); + } + + public short getOSDCount() { + buffer.position(getBufferIndex(OSDS_DYN_INDEX)); + return buffer.getShort(); + } + + public ASCIIString getOSD(int osdIndex) { + + if (osdCache[osdIndex] == null) { + + // find the correct index position in the buffer + int index = getOSDBufferIndex(osdIndex); + buffer.position(index); + + // total length = string length + # length bytes + int len = buffer.getInt() + 4; + assert (len >= 0); + + // create the string from a view buffer + ReusableBuffer buf = buffer.createViewBuffer(); + buf.range(index, len); + buf.position(0); + osdCache[osdIndex] = buf.getBufferBackedASCIIString(); + BufferPool.free(buf); + } + + return osdCache[osdIndex]; + } + + public StripingPolicy getStripingPolicy() { + + if (stripingPolicy == null) { + + // find the correct index position in the buffer + int index = getBufferIndex(SP_DYN_INDEX); + buffer.position(index); + + short len = buffer.getShort(); + assert (len >= 0); + + if (len == 0) + return null; + + // create the target object from a view buffer (skip the len bytes) + ReusableBuffer buf = buffer.createViewBuffer(); + buf.range(index + 2, len); + buf.position(0); + stripingPolicy = new BufferBackedStripingPolicy(buf, false, true); + } + + return stripingPolicy; + + } + + private int getBufferIndex(int entityIndex) { + + switch (entityIndex) { + + case SP_DYN_INDEX: + return 0; + + case OSDS_DYN_INDEX: + buffer.position(0); + short len = buffer.getShort(); + assert (len > 0); + return len + 2; + + default: + return -1; + } + } + + private int getOSDBufferIndex(int osdPosition) { + + // calculate the index; skip the first 2 bytes (# OSDs) + int index = getBufferIndex(OSDS_DYN_INDEX) + 2; + for (int i = 0; i < osdPosition; i++) { + buffer.position(index); + int len = buffer.getInt(); + assert (len > 0); + + index += len + 4; + } + + return index; + } + +} diff --git a/servers/src/org/xtreemfs/mrc/brain/metadata/BufferBackedXLocList.java b/servers/src/org/xtreemfs/mrc/brain/metadata/BufferBackedXLocList.java new file mode 100644 index 0000000000000000000000000000000000000000..afadc50be61c8b845da79a5019d7c9e63019e026 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/brain/metadata/BufferBackedXLocList.java @@ -0,0 +1,222 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.mrc.brain.metadata; + +import java.util.Iterator; + +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; + +public class BufferBackedXLocList extends BufferBackedMetadata implements XLocList { + + private static final int VERSION_INDEX = 0; + + private static final int REPLICA_LIST_INDEX = 4; + + private XLoc[] replicaCache; + + public BufferBackedXLocList(ReusableBuffer buffer, boolean copy, boolean freeOnDestroy) { + + super(buffer, copy, freeOnDestroy); + + buffer.position(REPLICA_LIST_INDEX); + replicaCache = new XLoc[buffer.getInt()]; + } + + public BufferBackedXLocList(BufferBackedXLoc[] replicas, int version) { + + super(null, false, true); + + // determine required buffer size + replicaCache = new XLoc[replicas.length]; + int bufSize = 8; // 1st & 2nd 4 bytes: version + # replicas + for (BufferBackedXLoc repl : replicas) + // size + 4 length bytes + bufSize += repl.size() + 4; + + // allocate a new buffer and fill it with the given data + buffer = BufferPool.allocate(bufSize); + buffer.putInt(version); + buffer.putInt(replicas.length); + for (BufferBackedXLoc replica : replicas) { + buffer.putInt(replica.size()); + replica.getBuffer().position(0); + buffer.put(replica.getBuffer()); + } + + } + + public void destroy() { + + for (XLoc replica : replicaCache) + if (replica != null) + replica.destroy(); + + super.destroy(); + } + + public int getReplicaCount() { + buffer.position(REPLICA_LIST_INDEX); + return buffer.getInt(); + } + + public XLoc getReplica(int replicaIndex) { + + if (replicaCache[replicaIndex] == null) { + + // find the correct index position in the buffer + int index = getBufferIndex(replicaIndex); + buffer.position(index); + + int len = buffer.getInt(); + assert (len >= 0); + + if (len == 0) + return null; + + // create the target object from a view buffer (skip the len bytes) + ReusableBuffer buf = buffer.createViewBuffer(); + buf.range(index + 4, len); + buf.position(0); + replicaCache[replicaIndex] = new BufferBackedXLoc(buf, false, true); + } + + return replicaCache[replicaIndex]; + } + + public int getVersion() { + buffer.position(VERSION_INDEX); + return buffer.getInt(); + } + + public Iterator iterator() { + + return new Iterator() { + + private int index = 0; + + public boolean hasNext() { + return index < getReplicaCount(); + } + + public XLoc next() { + return getReplica(index++); + } + + public void remove() { + throw new UnsupportedOperationException("remove not implemented"); + } + }; + } + + public void addReplica(XLoc replica, boolean incVersion) { + + assert (replica instanceof BufferBackedXLoc); + BufferBackedXLoc repl = (BufferBackedXLoc) replica; + + // we assume that the buffer's limit marks the end of the last replica + int offset = buffer.limit(); + + // allocate a temporary buffer containing the replica's buffer + 4 bytes + // for the size + ReusableBuffer tmp = BufferPool.allocate(repl.getBuffer().limit() + 4); + tmp.putInt(repl.getBuffer().limit()); + repl.getBuffer().position(0); + tmp.put(repl.getBuffer()); + + // insert the temp buffer at the end offset + insert(offset, tmp); + BufferPool.free(tmp); + + // if version incrementation is necessary, increment the version number + // of the list + if (incVersion) { + int oldVer = getVersion(); + buffer.position(VERSION_INDEX); + buffer.putInt(oldVer + 1); + } + + // increment the replica count + int count = getReplicaCount() + 1; + buffer.position(REPLICA_LIST_INDEX); + buffer.putInt(count); + + // empty the replica cache + for (XLoc r : replicaCache) + if (r != null) + r.destroy(); + replicaCache = new XLoc[count]; + + } + + public void removeReplica(int replicaIndex, boolean incVersion) { + + // determine buffer index and size of the replica to delete + int index = getBufferIndex(replicaIndex); + int size = getReplicaSize(index); + + // delete the replica + delete(index, size + 4); + + // if version incrementation is necessary, increment the version number + // of the list + if (incVersion) { + int oldVer = getVersion(); + buffer.position(VERSION_INDEX); + buffer.putInt(oldVer + 1); + } + + // decrement the replica count + int count = getReplicaCount() - 1; + buffer.position(REPLICA_LIST_INDEX); + buffer.putInt(count); + + // empty the replica cache + for (XLoc r : replicaCache) + if (r != null) + r.destroy(); + replicaCache = new XLoc[count]; + } + + private int getBufferIndex(int replicaIndex) { + + int index = 8; + for (int i = 0; i < replicaIndex; i++) { + buffer.position(index); + int len = buffer.getInt(); + assert (len > 0); + + index += len + 4; + } + + return index; + } + + private int getReplicaSize(int index) { + buffer.position(index); + return buffer.getInt(); + } + +} diff --git a/servers/src/org/xtreemfs/mrc/brain/metadata/FSObject.java b/servers/src/org/xtreemfs/mrc/brain/metadata/FSObject.java new file mode 100644 index 0000000000000000000000000000000000000000..8ee755db96d734afb11faa1924149e35f97c9d3d --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/brain/metadata/FSObject.java @@ -0,0 +1,74 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.mrc.brain.metadata; + +import org.xtreemfs.common.buffer.ASCIIString; + +/** + * Interface for accessing file system objects, i.e. files and directories. + */ +public interface FSObject extends Metadata { + + public long getId(); + + public void setId(long id); + + public int getAtime(); + + public void setAtime(int atime); + + public int getCtime(); + + public void setCtime(int ctime); + + public int getMtime(); + + public void setMtime(int mtime); + + public ASCIIString getOwnerId(); + + public void setOwnerId(String ownerId); + + public ASCIIString getOwningGroupId(); + + public void setOwningGroupId(String groupId); + + public ASCIIString getLinkTarget(); + + public void setLinkTarget(String linkTarget); + + public StripingPolicy getStripingPolicy(); + + public void setStripingPolicy(StripingPolicy sp); + + public ACL getAcl(); + + public void setACL(ACL acl); + + public XAttrs getXAttrs(); + + public void setXAttrs(XAttrs xattrs); + +} diff --git a/servers/src/org/xtreemfs/mrc/brain/metadata/FileObject.java b/servers/src/org/xtreemfs/mrc/brain/metadata/FileObject.java new file mode 100644 index 0000000000000000000000000000000000000000..4810efd7aaaf184e31cad935e7866cb1ceb04fa3 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/brain/metadata/FileObject.java @@ -0,0 +1,56 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.mrc.brain.metadata; + +/** + * Interface for accessing files. + */ +public interface FileObject extends FSObject { + + public long getSize(); + + public void setSize(long size); + + public short getLinkCount(); + + public void setLinkCount(short linkCount); + + public int getEpoch(); + + public void setEpoch(int epoch); + + public int getIssuedEpoch(); + + public void setIssuedEpoch(int issuedEpoch); + + public boolean isReadOnly(); + + public void setReadOnly(boolean readOnly); + + public XLocList getXLocList(); + + public void setXLocList(XLocList xLocList); + +} diff --git a/servers/src/org/xtreemfs/mrc/brain/metadata/Metadata.java b/servers/src/org/xtreemfs/mrc/brain/metadata/Metadata.java new file mode 100644 index 0000000000000000000000000000000000000000..2f5bd370a478f93dc3089b991b460fa16dc69473 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/brain/metadata/Metadata.java @@ -0,0 +1,37 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.mrc.brain.metadata; + +/** + * The base interface for any kind of extended metadata. + */ +public interface Metadata { + + /** + * Destroys a metadata object and frees any internally allocated resources. + */ + public void destroy(); + +} diff --git a/servers/src/org/xtreemfs/mrc/brain/metadata/StripingPolicy.java b/servers/src/org/xtreemfs/mrc/brain/metadata/StripingPolicy.java new file mode 100644 index 0000000000000000000000000000000000000000..27dad3a23a3383719436e2bf8c802d393cbaad96 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/brain/metadata/StripingPolicy.java @@ -0,0 +1,81 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.mrc.brain.metadata; + +import org.xtreemfs.common.buffer.ASCIIString; + +/** + * Interface for accessing a striping policy. Striping policies may either be + * part of a file's X-Locations List, or default striping policies assigned for + * directories, which will be assigned to newly created files. + */ +public interface StripingPolicy extends Metadata { + + /** + * Returns the striping pattern. + * + * @return the striping pattern + */ + public ASCIIString getPattern(); + + /** + * Changes the striping pattern. + * + * @param pattern + * the new striping pattern + */ + public void setPattern(String pattern); + + /** + * Returns the striping width, i.e. number of OSDs used for the pattern. + * + * @return the striping width + */ + public int getWidth(); + + /** + * Sets the striping width to a given number. + * + * @param width + * the striping width + */ + public void setWidth(int width); + + /** + * Returns the stripe size, i.e. size of a single object in kBytes. + * + * @return the stripe size + */ + public int getStripeSize(); + + /** + * Sets the stripe size. + * + * @param stripeSize + * the new stripe size + */ + public void setStripeSize(int stripeSize); + +} \ No newline at end of file diff --git a/servers/src/org/xtreemfs/mrc/brain/metadata/XAttrs.java b/servers/src/org/xtreemfs/mrc/brain/metadata/XAttrs.java new file mode 100644 index 0000000000000000000000000000000000000000..5d4e1cfe106780c43fddbf77564d7c8ccb3d24ae --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/brain/metadata/XAttrs.java @@ -0,0 +1,96 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.mrc.brain.metadata; + +import java.util.Iterator; + +import org.xtreemfs.common.buffer.ASCIIString; + +/** + * Interface for accessing a set of extended attributes. A set of extended + * attributes may be assigned to a file or directory. + */ +public interface XAttrs extends Metadata { + + /** + * Represents a single extended attribute. + */ + public static interface Entry { + + public String getKey(); + + public String getValue(); + + public String getUID(); + } + + /** + * Returns an iterator for all entries. + * + * @return an iterator + */ + public Iterator iterator(); + + /** + * Returns the number of stored xattrs. + * + * @return the number of xattrs + */ + public int getEntryCount(); + + /** + * Returns an extended attribute from a given user with a given key. + * + * @param key + * the key (name) of the attribute + * @param uid + * the uid of the user + * @return the value of the attribute, or an empty string if no such + * attribute exists + */ + public ASCIIString getValue(String key, String uid); + + /** + * Modifies an entry. If the key + uid combination does not exist, a new + * entry will be created. + * + * @param key + * the key + * @param value + * the value + * @param uid + * the uid + */ + public void editEntry(String key, String value, String uid); + + /** + * Deletes an existing entry. Does nothing if the entity does not exist. + * + * @param key + * the key of the entry to delete + */ + public void deleteEntry(String key, String uid); + +} \ No newline at end of file diff --git a/servers/src/org/xtreemfs/mrc/brain/metadata/XLoc.java b/servers/src/org/xtreemfs/mrc/brain/metadata/XLoc.java new file mode 100644 index 0000000000000000000000000000000000000000..700290c3d7fdcb10c87c90c97d5d28013640588a --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/brain/metadata/XLoc.java @@ -0,0 +1,57 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.mrc.brain.metadata; + +import org.xtreemfs.common.buffer.ASCIIString; + +/** + * Interface for accessing information about single replica (X-Location) of a + * file. X-Locations are stored in a file's X-Locations List and contain + * information about file data storage locations. + */ +public interface XLoc extends Metadata { + + /** + * The number of OSDs in the X-Location. + * + * @return the number of OSDs + */ + public short getOSDCount(); + + /** + * Returns the OSD UUID at the given index position. + * + * @param index the index of the OSD UUID + * @return the OSD UUID at index position position + */ + public ASCIIString getOSD(int index); + + /** + * Returns the striping policy assigned to the X-Location. + * + * @return the striping policy + */ + public StripingPolicy getStripingPolicy(); +} \ No newline at end of file diff --git a/servers/src/org/xtreemfs/mrc/brain/metadata/XLocList.java b/servers/src/org/xtreemfs/mrc/brain/metadata/XLocList.java new file mode 100644 index 0000000000000000000000000000000000000000..26985cb76561f1fc5ef06009a881fc030b6fd7e1 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/brain/metadata/XLocList.java @@ -0,0 +1,86 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.mrc.brain.metadata; + +import java.util.Iterator; + +/** + * Interface for accessing an X-Locations List. X-Locations Lists contain + * information about storage locations (OSDs) used for different replicas of a + * file. + */ +public interface XLocList extends Metadata { + + /** + * Returns the number of replicas currently stored in the X-Locations List. + * + * @return the number of replicas + */ + public int getReplicaCount(); + + /** + * Returns the replica at the given index position. + * + * @param index + * position for the replica to return + * @return the replica at position index + */ + public XLoc getReplica(int index); + + /** + * Returns the version number associated with the X-Locations List. + * + * @return the version number + */ + public int getVersion(); + + /** + * Returns an iterator for all replicas. + * + * @return an iterator + */ + public Iterator iterator(); + + /** + * Appends a new replica at the end of the X-Locations List. + * + * @param replica + * the replica to append + * @param incVersion + * defines whether or not to increment the list's version number + */ + public void addReplica(XLoc replica, boolean incVersion); + + /** + * Deletes a replica at the given index position in the X-Locations List. + * + * @param replicaIndex + * the index position at which to delete the replica + * @param incVersion + * defines whether or not to increment the list's version number + */ + public void removeReplica(int replicaIndex, boolean incVersion); + +} \ No newline at end of file diff --git a/servers/src/org/xtreemfs/mrc/brain/storage/BackendException.java b/servers/src/org/xtreemfs/mrc/brain/storage/BackendException.java new file mode 100644 index 0000000000000000000000000000000000000000..465c642f4fccf589623e7bd92ebceeac7f46443d --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/brain/storage/BackendException.java @@ -0,0 +1,48 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc.brain.storage; + +public class BackendException extends Exception { + + public BackendException() { + // TODO Auto-generated constructor stub + } + + public BackendException(String message) { + super(message); + // TODO Auto-generated constructor stub + } + + public BackendException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } + + public BackendException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } + +} diff --git a/servers/src/org/xtreemfs/mrc/brain/storage/DBStorageBackend.java.old b/servers/src/org/xtreemfs/mrc/brain/storage/DBStorageBackend.java.old new file mode 100644 index 0000000000000000000000000000000000000000..1a1cab3880d511bbcc947d1e62386ec6a62b1209 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/brain/storage/DBStorageBackend.java.old @@ -0,0 +1,379 @@ +package org.xtreemfs.mrc.brain.storage; + +import java.io.File; +import java.util.Collection; +import java.util.LinkedList; +import java.util.List; + +import org.xtreemfs.mrc.brain.storage.entities.AbstractFileEntity; +import org.xtreemfs.mrc.brain.storage.entities.DirEntity; +import org.xtreemfs.mrc.brain.storage.entities.FileAttributeEntity; +import org.xtreemfs.mrc.brain.storage.entities.FileEntity; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.persist.EntityCursor; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.SecondaryIndex; +import com.sleepycat.persist.StoreConfig; + +/** + * The storage backend used to store files and directories. + * + * @author stender + * + */ +public class DBStorageBackend implements StorageBackend { + + private Environment env; + + private EntityStore store; + + private File dbDir; + + private PrimaryIndex fileIdIndex; + + private PrimaryIndex dirIdIndex; + + private SecondaryIndex fileIndex; + + private SecondaryIndex dirIndex; + + private SecondaryIndex fileParentIndex; + + private SecondaryIndex dirParentIndex; + + private PrimaryIndex attrIndex; + + public DBStorageBackend(String dbDirectory, String volumeOwnerId, + String volumeGroupId) throws BackendException { + + try { + + EnvironmentConfig envCfg = new EnvironmentConfig(); + StoreConfig storeCfg = new StoreConfig(); + + envCfg.setReadOnly(false); + storeCfg.setReadOnly(false); + + envCfg.setAllowCreate(true); + storeCfg.setAllowCreate(true); + + envCfg.setTransactional(false); + storeCfg.setTransactional(false); + + storeCfg.setDeferredWrite(true); + + dbDir = new File(dbDirectory); + dbDir.mkdir(); + + envCfg.setLocking(false); + envCfg.setCachePercent(90); + envCfg.setTxnNoSync(true); + // envCfg.setConfigParam("je.env.recovery", "false"); + // envCfg.setConfigParam("je.env.isTransactional", "false"); + // envCfg.setConfigParam("je.log.memOnly", "true"); + envCfg.setConfigParam("je.env.runCheckpointer", "false"); + envCfg.setConfigParam("je.env.runCleaner", "false"); + envCfg.setConfigParam("je.env.checkLeaks", "false"); + envCfg.setConfigParam("je.log.useNIO", "true"); + envCfg.setConfigParam("je.log.directNIO", "false"); + envCfg.setConfigParam("je.env.runINCompressor", "false"); + envCfg.setConfigParam("je.deferredWrite.temp", "true"); + + env = new Environment(dbDir, envCfg); + env.sync(); + + store = new EntityStore(env, "EntityStore", storeCfg); + store.getSequenceConfig("Sequence_File").setCacheSize(0); + // store.getSequenceConfig("Sequence_File").setExclusiveCreate(true); + + fileIdIndex = store.getPrimaryIndex(Long.class, FileEntity.class); + dirIdIndex = store.getPrimaryIndex(Long.class, DirEntity.class); + + fileIndex = store.getSecondaryIndex(fileIdIndex, String.class, + "indexId"); + dirIndex = store.getSecondaryIndex(dirIdIndex, String.class, + "indexId"); + + fileParentIndex = store.getSecondaryIndex(fileIdIndex, Long.class, + "parentId"); + dirParentIndex = store.getSecondaryIndex(dirIdIndex, Long.class, + "parentId"); + + attrIndex = store.getPrimaryIndex(String.class, + FileAttributeEntity.class); + + // if the root element does not exist, create it + if (dirIdIndex.count() == 0) { + long time = System.currentTimeMillis() / 1000; + dirIdIndex.put(new DirEntity(0, "/", -1, volumeOwnerId, + volumeGroupId, time, time, time, null)); + } + + } catch (DatabaseException exc) { + throw new BackendException(exc); + } + } + + public AbstractFileEntity getFileById(long fileId) throws BackendException { + + try { + + AbstractFileEntity data = dirIdIndex.get(fileId); + if (data == null) + data = fileIdIndex.get(fileId); + + return data; + + } catch (DatabaseException exc) { + throw new BackendException(exc); + } + } + + public List getAttrsByFileId(long fileId, long attrType) + throws BackendException { + + try { + + List result = new LinkedList(); + + EntityCursor csr = attrIndex.entities(); + for (FileAttributeEntity data : csr) + if (data.getFileId() == fileId && data.getType() == attrType) + result.add(data); + + csr.close(); + + return result; + + } catch (DatabaseException exc) { + throw new BackendException(exc); + } + } + + public List getAttrsByFileId(long fileId) + throws BackendException { + + try { + + List result = new LinkedList(); + + EntityCursor csr = attrIndex.entities(); + for (FileAttributeEntity data : csr) + if (data.getFileId() == fileId) + result.add(data); + + csr.close(); + + return result; + + } catch (DatabaseException exc) { + throw new BackendException(exc); + } + } + + public String getUserAttrByFileId(long fileId, String attr) + throws BackendException { + + try { + + EntityCursor csr = attrIndex.entities(); + for (FileAttributeEntity data : csr) + if (data.getFileId() == fileId + && data.getType() == FileAttributeEntity.TYPE_USER + && data.getKey().equals(attr)) { + csr.close(); + return (String) data.getValue(); + } + + csr.close(); + return null; + + } catch (DatabaseException exc) { + throw new BackendException(exc); + } + } + + public Object getSystemAttrByFileId(long fileId, String attr) + throws BackendException { + + try { + + EntityCursor csr = attrIndex.entities(); + for (FileAttributeEntity data : csr) + if (data.getFileId() == fileId + && data.getType() == FileAttributeEntity.TYPE_SYSTEM + && data.getKey().equals(attr)) { + csr.close(); + return data.getValue(); + } + + csr.close(); + return null; + + } catch (DatabaseException exc) { + throw new BackendException(exc); + } + } + + public Collection getFilesByParent(long parentId) + throws BackendException { + + try { + return fileParentIndex.subIndex(parentId).map().values(); + } catch (DatabaseException exc) { + throw new BackendException(exc); + } + } + + public Collection getDirsByParent(long parentId) + throws BackendException { + + try { + return dirParentIndex.subIndex(parentId).map().values(); + + } catch (DatabaseException exc) { + throw new BackendException(exc); + } + } + + public void deleteFile(long fileId) throws BackendException { + + try { + + // delete the file + if (!fileIdIndex.delete(fileId)) + dirIdIndex.delete(fileId); + + // delete all related attributes + EntityCursor csr = attrIndex.entities(); + for (FileAttributeEntity data : csr) { + if (data.getFileId() == fileId) + csr.delete(); + } + csr.close(); + + } catch (DatabaseException exc) { + throw new BackendException(exc); + } + } + + public AbstractFileEntity getChild(String name, long parentId) + throws BackendException { + + try { + + AbstractFileEntity file = dirIndex.get(createId(parentId, name)); + if (file == null) + file = fileIndex.get(createId(parentId, name)); + + return file; + + } catch (DatabaseException exc) { + throw new BackendException(exc); + } + } + + public void deleteAttribute(long fileId, String key) + throws BackendException { + try { + attrIndex.delete(fileId + ":" + key); + } catch (DatabaseException exc) { + throw new BackendException(exc); + } + } + + public void put(FileAttributeEntity data) throws BackendException { + try { + attrIndex.put(data); + } catch (DatabaseException exc) { + throw new BackendException(exc); + } + } + + public void put(FileEntity data) throws BackendException { + try { + fileIdIndex.put(data); + } catch (DatabaseException exc) { + throw new BackendException(exc); + } + } + + public void put(DirEntity data) throws BackendException { + try { + dirIdIndex.put(data); + } catch (DatabaseException exc) { + throw new BackendException(exc); + } + } + + public void sync() throws BackendException { + try { + env.sync(); + store.sync(); + } catch (DatabaseException exc) { + throw new BackendException(exc); + } + } + + public void close() throws BackendException { + + try { + + if (store != null) + store.close(); + + if (env != null) + env.close(); + + } catch (DatabaseException exc) { + throw new BackendException(exc); + } + } + + public void destroy() { + + if (store != null) + try { + store.close(); + } catch (DatabaseException dbe) { + } + + if (env != null) + try { + env.close(); + } catch (DatabaseException dbe) { + } + + FSTools.delTree(dbDir); + } + + public void dumpDB() throws BackendException { + + try { + + System.out.println("files:"); + EntityCursor csr = fileIndex.entities(); + for (AbstractFileEntity data : csr) + System.out.println(data); + csr.close(); + + System.out.println("\ndirectories:"); + EntityCursor csr2 = dirIndex.entities(); + for (AbstractFileEntity data : csr2) + System.out.println(data); + csr2.close(); + + } catch (DatabaseException exc) { + throw new BackendException(exc); + } + } + + private static String createId(long parentId, String name) { + return name + "#" + parentId; + } + +} diff --git a/servers/src/org/xtreemfs/mrc/brain/storage/DiskLogger.java b/servers/src/org/xtreemfs/mrc/brain/storage/DiskLogger.java new file mode 100644 index 0000000000000000000000000000000000000000..785a01cc4499206f8e024b109a7ac91a3b1f2a00 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/brain/storage/DiskLogger.java @@ -0,0 +1,280 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc.brain.storage; + +import java.io.File; +import java.io.FileDescriptor; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.SyncFailedException; +import java.nio.BufferUnderflowException; +import java.nio.channels.FileChannel; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.concurrent.LinkedBlockingQueue; + +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.LifeCycleThread; + +/** + * Writes entries to the on disc append log and syncs after blocks of MAX_ENTRIES_PER_BLOCK. + * @author bjko + */ +public class DiskLogger extends LifeCycleThread { + + public static final byte OTYPE_REPLICATION = 1; + + public static final byte OPTYPE_BRAIN = 2; + + public static final byte OPTYPE_MRC = 3; + + /** + * NIO FileChannel used to write ByteBuffers directly to file. + */ + private FileChannel channel; + /** + * Stream used to obtain the FileChannel and to flush file. + */ + private FileOutputStream fos; + /** + * Used to sync file. + */ + private FileDescriptor fdes; + + /** + * The LogEntries to be written to disk. + */ + private LinkedBlockingQueue entries; + + /** + * If set to true the thread will shutdown. + */ + boolean quit; + + /** + * If set to true the DiskLogger is down + */ + boolean down; + + /** + * LogFile name + */ + private String logfile; + + + /** + * if set to true, no fsync is executed after disk writes. DANGEROUS. + */ + private final boolean noFsync; + + /** + * Max number of LogEntries to write before sync. + */ + public static final int MAX_ENTRIES_PER_BLOCK = 100; + + + /** + * Creates a new instance of DiskLogger + * @param logfile Name and path of file to use for append log. + * @throws java.io.FileNotFoundException If that file cannot be created. + * @throws java.io.IOException If that file cannot be created. + */ + public DiskLogger(String logfile, boolean noFsync) throws FileNotFoundException, IOException { + + super("DiskLogger thr."); + + if (logfile == null) + throw new RuntimeException("expected a non-null logfile name!"); + File lf = new File(logfile); + if(!lf.getParentFile().exists() && !lf.getParentFile().mkdirs()) + throw new IOException("could not create parent directory for database log file"); + + fos = new FileOutputStream(lf,true); + channel = fos.getChannel(); + fdes = fos.getFD(); + entries = new LinkedBlockingQueue(); + quit = false; + this.logfile = logfile; + this.down = false; + this.noFsync = noFsync; + } + + public long getLogFileSize() { + return new File(logfile).length(); + } + + /** + * Appends an entry to the write queue. + * @param entry entry to write + */ + public void append(LogEntry entry) { + assert(entry != null); + entries.add(entry); + } + + /** + * Main loop. + */ + public void run() { + + ArrayList tmpE = new ArrayList(MAX_ENTRIES_PER_BLOCK); + Logging.logMessage(Logging.LEVEL_DEBUG,this,"operational"); + notifyStarted(); + + down = false; + while (!quit) { + try { + //wait for an entry + tmpE.add( entries.take() ); + if (entries.size() > 0) { + while (tmpE.size() < MAX_ENTRIES_PER_BLOCK-1) { + LogEntry tmp = entries.poll(); + if (tmp == null) + break; + tmpE.add(tmp); + } + } + for (LogEntry le : tmpE) { + assert (le != null) : "Entry must not be null"; + ReusableBuffer buf = le.marshall(); + channel.write(buf.getBuffer()); + BufferPool.free(buf); + } + fos.flush(); + if (!noFsync) + fdes.sync(); + for (LogEntry le : tmpE) { + le.listener.synced(le); + } + tmpE.clear(); + } catch (SyncFailedException ex) { + Logging.logMessage(Logging.LEVEL_ERROR,this,ex); + for (LogEntry le : tmpE) { + le.listener.failed(le,ex); + } + tmpE.clear(); + } catch (IOException ex) { + Logging.logMessage(Logging.LEVEL_ERROR,this,ex); + for (LogEntry le : tmpE) { + le.listener.failed(le,ex); + } + tmpE.clear(); + } catch (InterruptedException ex) { + } + } + Logging.logMessage(Logging.LEVEL_DEBUG,this,"shutdown complete"); + down = true; + notifyStopped(); + } + + /** + * stops this thread + */ + + public void shutdown() { + quit = true; + synchronized (this) { + this.interrupt(); + } + } + + /** This operation assumes that the DiskLogger is dead already + */ + public void cleanLog() throws IOException{ + if (!down) { + channel.truncate(0); + } else { + File f = new File(logfile); + f.delete(); + } + } + + public boolean isDown() { + return down; + } + + + /** + * shut down files. + */ + protected void finalize() throws Throwable { + try { + fos.flush(); + fdes.sync(); + fos.close(); + } catch (SyncFailedException ex) { + } catch (IOException ex) { + } finally { + super.finalize(); + } + } + + + public ReusableBuffer getLog(SliceID slice, int sqStart, int sqEnd) throws IOException { + File f = new File(logfile); + LinkedList entries = new LinkedList(); + int size = 0; + FileInputStream fis = new FileInputStream(f); + FileChannel fc = fis.getChannel(); + try { + while (true) { + LogEntry e = new LogEntry(fc); + + if (!e.slID.equals(slice)) + continue; + + if (e.sequenceID < sqStart) + continue; + + if (e.sequenceID > sqEnd) + break; + + entries.add(e); + size += e.binarySize(); + } + } catch (IOException ex) { + Logging.logMessage(Logging.LEVEL_ERROR,this,ex); + } catch (BufferUnderflowException ex) { + //who cares! probably "a half entry" + //ex.printStackTrace(); + } + ReusableBuffer logbuf = BufferPool.allocate(size); + for (LogEntry e : entries) { + e.marshall(logbuf); + } + logbuf.position(0); + Logging.logMessage(Logging.LEVEL_DEBUG,this,"sending..."+logbuf); + return logbuf; + } + + public int getQLength() { + return this.entries.size(); + } + +} diff --git a/servers/src/org/xtreemfs/mrc/brain/storage/InvalidLogEntryException.java b/servers/src/org/xtreemfs/mrc/brain/storage/InvalidLogEntryException.java new file mode 100644 index 0000000000000000000000000000000000000000..909b5d30b1db65133385865239b4dd78816be810 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/brain/storage/InvalidLogEntryException.java @@ -0,0 +1,50 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc.brain.storage; + +import java.io.IOException; + +/** + * Thrown if an invalid LogEntry is read + * @author bjko + */ +public class InvalidLogEntryException extends IOException { + + private int leLength; + + /** Creates a new instance of InvalidLogEntryException */ + public InvalidLogEntryException(String message, int length) { + super(message); + this.leLength = length; + } + + /** The length is the size of the entire packet. Can be used + * to find out, if this is the last packet in the log + */ + public int getLength() { + return leLength; + } + +} diff --git a/servers/src/org/xtreemfs/mrc/brain/storage/JavaStorageBackend.java b/servers/src/org/xtreemfs/mrc/brain/storage/JavaStorageBackend.java new file mode 100644 index 0000000000000000000000000000000000000000..730051a0884600291c18585214c60cfbe8e623da --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/brain/storage/JavaStorageBackend.java @@ -0,0 +1,566 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc.brain.storage; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutput; +import java.io.ObjectOutputStream; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +import org.xtreemfs.common.TimeSync; +import org.xtreemfs.common.VersionManagement; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.util.FSUtils; +import org.xtreemfs.mrc.brain.storage.entities.AbstractFileEntity; +import org.xtreemfs.mrc.brain.storage.entities.DirEntity; +import org.xtreemfs.mrc.brain.storage.entities.FileAttributeEntity; +import org.xtreemfs.mrc.brain.storage.entities.FileEntity; + +public class JavaStorageBackend implements StorageBackend { + + private static final String DB_FILE_PREFIX = "mrcdb"; + + private static final String DB_FILE_NAME = DB_FILE_PREFIX + + "." + + VersionManagement + .getMrcDataVersion(); + + /* file ID (file) -> file */ + private Map fileMap; + + /* file ID (dir) -> dir */ + private Map dirMap; + + /* file ID (dir) -> maps of references to nested files */ + private Map[]> fileChildrenMap; + + /* file ID (dir) -> maps of references to nested directories */ + private Map[]> dirChildrenMap; + + /* file ID -> (attr name -> attr entity) */ + private Map> fileAttributeMap; + + private long nextFileId; + + private DirEntity rootDir; + + private File dbDir; + + public JavaStorageBackend(String dbDirectory) throws BackendException { + + dbDir = new File(dbDirectory); + dbDir.mkdir(); + File dbFile = new File(dbDir, DB_FILE_NAME); + + ObjectInputStream ois = null; + + if (dbFile.exists()) { + + try { + long startTime = 0; + if (Logging.isDebug()) { + Logging.logMessage(Logging.LEVEL_DEBUG, this, "loading database " + dbDirectory + + " ..."); + startTime = System.currentTimeMillis(); + } + + ois = new ObjectInputStream(new FileInputStream(dbFile)); + fileMap = (TreeMap) ois.readObject(); + dirMap = (TreeMap) ois.readObject(); + fileChildrenMap = (TreeMap[]>) ois.readObject(); + dirChildrenMap = (TreeMap[]>) ois.readObject(); + fileAttributeMap = (TreeMap>) ois + .readObject(); + nextFileId = ois.readLong(); + rootDir = (DirEntity) ois.readObject(); + + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, "loading database '" + + dbDirectory + "' finished after " + + (System.currentTimeMillis() - startTime) + " ms"); + + } catch (Exception exc) { + throw new BackendException(exc); + + } finally { + try { + if (ois != null) + ois.close(); + } catch (IOException exc) { + throw new BackendException(exc); + } + } + + } else { + + String[] files = dbDir.list(); + if (files != null) { + for (String file : files) { + if (file.startsWith(DB_FILE_PREFIX + ".")) { + try { + int version = Integer.parseInt(file + .substring(DB_FILE_PREFIX.length() + 1)); + if (version < VersionManagement.getMrcDataVersion()) { + + String msg1 = "outdated version of MRC database detected: " + + version + " (current version: " + + VersionManagement.getMrcDataVersion() + ")"; + String msg2 = "please convert or delete file '" + + new File(dbDir, file).getAbsolutePath() + "' on server"; + + Logging.logMessage(Logging.LEVEL_ERROR, this, msg1); + Logging.logMessage(Logging.LEVEL_ERROR, this, msg2); + throw new BackendException(msg1 + "\n" + msg2); + } + } catch (NumberFormatException exc) { + // ignore + } + } + } + } + + fileMap = new TreeMap(); + dirMap = new TreeMap(); + fileChildrenMap = new TreeMap[]>(); + dirChildrenMap = new TreeMap[]>(); + fileAttributeMap = new TreeMap>(); + nextFileId = 1; + + // create the root element + long time = TimeSync.getGlobalTime() / 1000; + rootDir = new DirEntity(1, null, null, time, time, time, null, 1); + } + } + + public void deleteAttribute(long fileId, String name) throws BackendException { + + Map attrMap = fileAttributeMap.get(fileId); + if (attrMap != null) + attrMap.remove(name); + } + + public long link(long fileId, String fileName, long parentId) throws BackendException { + + AbstractFileEntity abstractFile = fileMap.get(fileId); + if (abstractFile == null) + abstractFile = dirMap.get(fileId); + assert (abstractFile != null); + + // first, increment the link count in the metadata object + abstractFile.setLinkCount(abstractFile.getLinkCount() + 1); + + // then, create a new link in the parent directory + + if (abstractFile instanceof FileEntity) { + + FileEntity file = (FileEntity) abstractFile; + + // retrieve the hashes containing the parent directory children + Map[] childHashes = fileChildrenMap.get(parentId); + + // if no file hashes have yet been created for the directory, create + // one for mapping the file ID and one for mapping the file name to + // the entity + if (childHashes == null) { + childHashes = new HashMap[2]; + childHashes[0] = new HashMap(); + childHashes[1] = new HashMap(); + + fileChildrenMap.put(parentId, childHashes); + } + + Object obj = childHashes[0].get(fileId); + if (obj == null) { + obj = new Object[] { 1, file }; + childHashes[0].put(fileId, obj); + } else { + Object[] objArray = (Object[]) obj; + objArray[0] = (Integer) objArray[0] + 1; + objArray[1] = file; + } + + childHashes[0].put(fileId, obj); + childHashes[1].put(fileName, file); + + } else { + + DirEntity dir = (DirEntity) abstractFile; + + // retrieve the hashes containing the parent directory children + Map[] childHashes = dirChildrenMap.get(parentId); + + // if no directory hashes have yet been created for the directory, + // one for mapping the directory ID and one for mapping the + // directory name to the entity + if (childHashes == null) { + childHashes = new HashMap[2]; + childHashes[0] = new HashMap(); + childHashes[1] = new HashMap(); + + dirChildrenMap.put(parentId, childHashes); + } + + Object obj = childHashes[0].get(fileId); + if (obj == null) { + obj = new Object[] { 1, dir }; + childHashes[0].put(fileId, obj); + } else { + Object[] objArray = (Object[]) obj; + objArray[0] = (Integer) objArray[0] + 1; + objArray[1] = dir; + } + + childHashes[0].put(fileId, obj); + childHashes[1].put(fileName, dir); + } + + return abstractFile.getLinkCount(); + + } + + public long unlink(long fileId, String name, long parentId) throws BackendException { + + AbstractFileEntity abstractFile = fileMap.get(fileId); + if (abstractFile == null) + abstractFile = dirMap.get(fileId); + assert (abstractFile != null); + + // find the link and remove it from the children map + + if (abstractFile instanceof FileEntity) { + + // retrieve the hashes containing the parent directory children + Map[] childHashes = fileChildrenMap.get(parentId); + + // if an entity is linked to a directory more than once, decrement + // the directory link counter in the 'by ID' map; + // otherwise, remove the entry from the 'by ID' map + Object[] entity1 = (Object[]) childHashes[0].get(fileId); + if (((Integer) entity1[0]).equals(1)) + childHashes[0].remove(fileId); + else + entity1[0] = (Integer) entity1[0] - 1; + + // remove the entry from the 'by name' map + FileEntity entity2 = (FileEntity) childHashes[1].remove(name); + assert (entity1 != null) : "file not found in directory table"; + assert (entity1[1] == entity2) : "inconsistent directory entry"; + + // remove the file children map entry if child map is empty + if (childHashes[0].size() == 0) + fileChildrenMap.remove(parentId); + + // check whether the metadata object can be deleted, as no more + // links will exist after the unlink operation + if (abstractFile.getLinkCount() == 1) + fileMap.remove(fileId); + + } else { + + // retrieve the hashes containing the parent directory children + Map[] childHashes = dirChildrenMap.get(parentId); + + // if an entity is linked to a directory more than once, decrement + // the directory link counter in the 'by ID' map; + // otherwise, remove the entry from the 'by ID' map + Object[] entity1 = (Object[]) childHashes[0].get(fileId); + if (((Integer) entity1[0]).equals(1)) + childHashes[0].remove(fileId); + else + entity1[0] = (Integer) entity1[0] - 1; + + // remove the entry from the 'by name' map + DirEntity entity2 = (DirEntity) childHashes[1].remove(name); + assert (entity1 != null) : "directory not found in directory table"; + assert (entity1[1] == entity2) : "inconsistent directory entry"; + + // remove the dir children map entry if child map is empty + if (childHashes[0].size() == 0) + dirChildrenMap.remove(parentId); + + // check whether the metadata object can be deleted, as no more + // links will exist after the unlink operation + if (abstractFile.getLinkCount() == 1) + dirMap.remove(fileId); + } + + // when the last link to the file has been removed, remove all related + // metadata from other indices as well + if (abstractFile.getLinkCount() == 1) + fileAttributeMap.remove(fileId); + + abstractFile.setLinkCount(abstractFile.getLinkCount() - 1); + return abstractFile.getLinkCount(); + } + + public List getAttrsByFileId(long fileId, long attrType) + throws BackendException { + + List result = new LinkedList(); + + if (fileAttributeMap.get(fileId) == null) + return result; + + for (FileAttributeEntity entity : fileAttributeMap.get(fileId).values()) + if (entity.getType() == attrType) + result.add(new FileAttributeEntity(entity)); + + return result; + } + + public List getAttrsByFileId(long fileId) throws BackendException { + + if (fileAttributeMap.get(fileId) == null) + return new ArrayList(0); + + LinkedList list = new LinkedList(); + for (FileAttributeEntity entity : fileAttributeMap.get(fileId).values()) + list.add(new FileAttributeEntity(entity)); + + return list; + } + + public AbstractFileEntity getChild(String name, long parentId) throws BackendException { + + if (parentId == rootDir.getId() && name.length() == 0) + return new DirEntity(rootDir); + + Map[] dirs = dirChildrenMap.get(parentId); + if (dirs != null) { + DirEntity dir = (DirEntity) dirs[1].get(name); + if (dir != null) + return new DirEntity(dir); + } + + Map[] files = fileChildrenMap.get(parentId); + if (files != null) { + FileEntity file = (FileEntity) files[1].get(name); + if (file != null) + return new FileEntity(file); + } + + return null; + } + + public AbstractFileEntity getFileById(long fileId) throws BackendException { + + if (fileId == 1) + return new DirEntity(rootDir); + + DirEntity dir = dirMap.get(fileId); + if (dir != null) + return new DirEntity(dir); + + FileEntity file = fileMap.get(fileId); + if (file != null) + return new FileEntity(file); + + return null; + } + + public Map getFilesByParent(long parentId) throws BackendException { + + Map result = new HashMap(); + + Map[] files = fileChildrenMap.get(parentId); + if (files != null) + for (Object fileName : files[1].keySet()) + result.put((String) fileName, new FileEntity((FileEntity) files[1].get(fileName))); + + return result; + } + + public Map getDirsByParent(long parentId) throws BackendException { + + Map result = new HashMap(); + + Map[] dirs = dirChildrenMap.get(parentId); + if (dirs != null) + for (Object dirName : dirs[1].keySet()) + result.put((String) dirName, new DirEntity((DirEntity) dirs[1].get(dirName))); + + return result; + } + + public String getUserAttrByFileId(long fileId, String attr) { + + Map attrs = fileAttributeMap.get(fileId); + if (attrs == null) + return null; + + FileAttributeEntity ae = attrs.get(attr); + + return ae == null || ae.getType() != FileAttributeEntity.TYPE_USER ? null : ae.getValue() + .toString(); + } + + public Object getSystemAttrByFileId(long fileId, String attr) throws BackendException { + + // TODO: distinguish between system and user attributes + + Map attrs = fileAttributeMap.get(fileId); + if (attrs == null) + return null; + + FileAttributeEntity ae = attrs.get(attr); + + return ae == null || ae.getType() != FileAttributeEntity.TYPE_SYSTEM ? null : ae.getValue(); + } + + public void put(FileAttributeEntity data) throws BackendException { + + Map attrs = fileAttributeMap.get(data.getFileId()); + if (attrs == null) { + attrs = new TreeMap(); + fileAttributeMap.put(data.getFileId(), attrs); + } + + attrs.put(data.getKey(), data); + } + + public void put(FileEntity data) throws BackendException { + + // if no file ID has been assigned to the file yet, assign it now + if (data.getId() == 0) + data.setId(++nextFileId); + else if (data.getId() > nextFileId) + nextFileId = data.getId(); + + FileEntity file = fileMap.get(data.getId()); + + // if no file with the given ID exists yet, create a new one and add it + // to the file map + if (file == null) { + file = new FileEntity(data); + fileMap.put(data.getId(), file); + } + + // otherwise, replace the content of the existing metadata object with + // the given content + else + file.setContent(data); + + } + + public void put(DirEntity data) throws BackendException { + + // first, check if the entity refers to the root directory; if so + // directly set it and return + if (data.getId() == 1) { + rootDir.setContent(data); + return; + } + + // if no file ID has been assigned to the directory yet, assign it now + if (data.getId() == 0) + data.setId(++nextFileId); + else if (data.getId() > nextFileId) + nextFileId = data.getId(); + + DirEntity dir = dirMap.get(data.getId()); + + // if no directory with the given ID exists yet, create a new one and + // add it to the directory map + if (dir == null) { + dir = new DirEntity(data); + dirMap.put(data.getId(), dir); + } + + // otherwise, replace the content of the existing metadata object with + // the given content + else + dir.setContent(data); + + } + + public void sync() throws BackendException { + + try { + + // flush the entire database state to disk + + dbDir.mkdirs(); + File dbFile = new File(dbDir, DB_FILE_NAME); + FileOutputStream fos = new FileOutputStream(dbFile); + + ObjectOutput oos = new ObjectOutputStream(fos); + oos.writeObject(fileMap); + oos.writeObject(dirMap); + oos.writeObject(fileChildrenMap); + oos.writeObject(dirChildrenMap); + oos.writeObject(fileAttributeMap); + oos.writeLong(nextFileId); + oos.writeObject(rootDir); + + fos.getFD().sync(); + + oos.close(); + + } catch (IOException exc) { + throw new BackendException(exc); + } + } + + public void close() throws BackendException { + } + + public void destroy() { + FSUtils.delTree(dbDir); + fileMap = null; + dirMap = null; + fileChildrenMap = null; + dirChildrenMap = null; + fileAttributeMap = null; + } + + public long getDBFileSize() { + File dbFile = new File(dbDir, DB_FILE_NAME); + return dbFile.length(); + } + + public long getNumberOfFiles() { + return fileMap.size(); + } + + public long getNumberOfDirs() { + return dirMap.size(); + } + + public void dumpDB() throws BackendException { + System.out.println("files: " + fileChildrenMap); + System.out.println(); + System.out.println("dirs: " + dirChildrenMap); + } + +} diff --git a/servers/src/org/xtreemfs/mrc/brain/storage/LogEntry.java b/servers/src/org/xtreemfs/mrc/brain/storage/LogEntry.java new file mode 100644 index 0000000000000000000000000000000000000000..153795a7881b89aa44bb27e2918a00437c76839f --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/brain/storage/LogEntry.java @@ -0,0 +1,426 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc.brain.storage; + +import java.io.IOException; +import java.nio.BufferUnderflowException; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.util.concurrent.atomic.AtomicInteger; +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; + +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.mrc.MRCRequest; +import org.xtreemfs.foundation.pinky.HTTPUtils; + +/** + * Holds an operation. + * A log entry has the following format:
+ * packet length ( 4 bytes )
+ * view ID (4 bytes)
+ * sequence ID (4 bytes)
+ * SliceID (16 bytes)
+ * operationType (1 byte)
+ * OpName (1 byte) + OpnameLength bytes
+ * UserID (1 byte) + UserID bytes
+ * GroupID (1 byte) + GroupID bytes
+ * Payload (packet length - 27 bytes - OpName+UserID+GroupID bytes)
+ * packet length (copy of first integer)
+ * @author bjko
+ */
+public final class LogEntry {
+
+    /**
+     * The viewID of the view in which the operation was executed.
+     */
+    public final int viewID;
+
+    /**
+     * SequenceID of operation. Must be unique in each view.
+     */
+    public final int sequenceID;
+
+    /**
+     * Slice ID in which this operation was executed
+     */
+    public final SliceID slID;
+
+    /**
+     * Application specific field. Can be used to multiplex multiple
+     * writers into one log.
+     */
+    public final byte operationType;
+
+    /**
+     * Operation name.
+     */
+    public final String operationName;
+
+    /**
+     * The ID of the user who created the log entry
+     */
+    public final String userID;
+
+    /**
+     * The ID of the group who created the log entry
+     */
+    public final String groupID;
+
+    /**
+     * Can be anything. Max MAX_INT bytes.
+     */
+    public final ReusableBuffer payload;
+
+
+    /**
+     * A listener waiting for this LogEntry to be synced/sent.
+     */
+    public SyncListener listener;
+
+    /**
+     * Something the SyncListener may need to identify the LogEntry
+     * e.g. the request to acknowledge
+     */
+    public MRCRequest attachment;
+
+    public static final int FIXED_HEADER_SIZE = Integer.SIZE/8*2 +
+                      SliceID.SIZE_IN_BYTES +
+                      Byte.SIZE/8 * 4;
+
+    protected AtomicInteger refCount;
+
+
+    /**
+     * Creates a new empty log entry.
+     */
+    public LogEntry(int viewID, int sequenceID, SliceID slID, byte operationType,
+            String operationName, String userID, String groupID,
+            ReusableBuffer payload, MRCRequest attachment) {
+        this.viewID = viewID;
+        this.sequenceID = sequenceID;
+        this.slID = slID;
+        this.operationType = operationType;
+        this.operationName = operationName;
+        this.userID = userID;
+        this.groupID = groupID;
+        this.payload = payload;
+        this.attachment = attachment;
+        this.refCount = new AtomicInteger(1);
+    }
+
+    private int getLengthField() {
+        int plc = (payload == null) ? 0 : payload.capacity();
+        int flength = FIXED_HEADER_SIZE+
+                      + userID.length() + groupID.length() + operationName.length() +
+                     plc;
+        assert(plc >= 0);
+        assert(flength > 0);
+        assert(flength > FIXED_HEADER_SIZE);
+        return flength;
+    }
+
+    /**
+     * Unmarshalls a LogEntry.
+     * @param src The ByteBuffer holding the binary representation of the LogEntry.
+     */
+    public LogEntry(ReusableBuffer src) throws IOException {
+        try {
+
+            int leSize = src.getInt();
+
+            if (leSize < 0)
+                throw new IOException("LogEntry length cannot be negative");
+
+            int payloadl = leSize - FIXED_HEADER_SIZE;
+
+            viewID = src.getInt();
+            sequenceID = src.getInt();
+            slID = new SliceID(src);
+            operationType = src.get();
+
+            int strlen = src.get() & 0x00FF;
+            assert(strlen < 256);
+            assert(strlen >= 0);
+            payloadl -= strlen;
+            byte[] tmp = new byte[strlen];
+            src.get(tmp);
+            operationName = new String(tmp,HTTPUtils.ENC_ASCII);
+
+            strlen = src.get() & 0x00FF;
+            assert(strlen < 256);
+            assert(strlen >= 0);
+            if (strlen > 0) {
+                payloadl -= strlen;
+                tmp = new byte[strlen];
+                src.get(tmp);
+                userID = new String(tmp,HTTPUtils.ENC_ASCII);
+            } else
+                userID = "";
+
+            strlen = src.get() & 0x00FF;
+            assert(strlen < 256);
+            assert(strlen >= 0);
+            if (strlen > 0) {
+                payloadl -= strlen;
+                tmp = new byte[strlen];
+                src.get(tmp);
+                groupID = new String(tmp,HTTPUtils.ENC_ASCII);
+            } else
+                groupID = "";
+
+            assert(payloadl >= 0);
+
+            if (payloadl == 0) {
+                payload = null;
+            } else {
+                if (src.remaining() < payloadl)
+                    throw new InvalidLogEntryException("buffer does not contain enough bytes in payload. LogEntry corrupted. "+payloadl,leSize);
+                byte[] arr = new byte[payloadl];
+                src.get(arr);
+                payload = ReusableBuffer.wrap(arr);
+            }
+            listener = null;
+        } catch (BufferUnderflowException ex) {
+            Logging.logMessage(Logging.LEVEL_ERROR,this,ex);
+            throw new InvalidLogEntryException(ex.getMessage(),0);
+        }
+        this.refCount = new AtomicInteger(1);
+    }
+
+    /**
+     * Unmarshalls a LogEntry.
+     * @param src The ByteBuffer holding the binary representation of the LogEntry.
+     */
+    public LogEntry(FileChannel fc) throws IOException {
+        ByteBuffer src2 = ByteBuffer.allocate(Integer.SIZE/8);
+        fc.read(src2);
+        src2.flip();
+        int leSize = src2.getInt();
+
+        ReusableBuffer src = null;
+        try {
+
+            //ReusableBuffer src = ByteBuffer.allocateDirect(leSize);
+            src = BufferPool.allocate(leSize);
+            fc.read(src.getBuffer());
+            src.flip();
+
+            int payloadl = leSize - FIXED_HEADER_SIZE;
+
+            viewID = src.getInt();
+            sequenceID = src.getInt();
+            slID = new SliceID(src);
+            operationType = src.get();
+
+            int strlen = src.get() & 0x00FF;
+            assert(strlen < 256);
+            assert(strlen >= 0);
+            payloadl -= strlen;
+            byte[] tmp = new byte[strlen];
+            src.get(tmp);
+            operationName = new String(tmp,HTTPUtils.ENC_ASCII);
+
+            strlen = src.get() & 0x00FF;
+            assert(strlen < 256);
+            assert(strlen >= 0);
+            if (strlen > 0) {
+                payloadl -= strlen;
+                tmp = new byte[strlen];
+                src.get(tmp);
+                userID = new String(tmp,HTTPUtils.ENC_ASCII);
+            } else
+                userID = "";
+
+            strlen = src.get() & 0x00FF;
+            assert(strlen < 256);
+            assert(strlen >= 0);
+            if (strlen > 0) {
+                payloadl -= strlen;
+                tmp = new byte[strlen];
+                src.get(tmp);
+                groupID = new String(tmp,HTTPUtils.ENC_ASCII);
+            } else
+                groupID = "";
+
+
+
+            if (payloadl == 0) {
+                payload = null;
+            } else {
+                if (src.remaining() < payloadl)
+                    throw new InvalidLogEntryException("buffer does not contain enough bytes in payload. LogEntry corrupted. "+payloadl,leSize);
+                byte[] arr = new byte[payloadl];
+                src.get(arr);
+                payload = ReusableBuffer.wrap(arr);
+            }
+            listener = null;
+        } catch (BufferUnderflowException ex) {
+            throw new InvalidLogEntryException(ex.getMessage(),0);
+        } finally {
+            if(src != null)
+                BufferPool.free(src);
+        }
+        this.refCount = new AtomicInteger(1);
+    }
+
+    @Override
+    public LogEntry clone() {
+        LogEntry myClone = new LogEntry(this.viewID,this.sequenceID,this.slID,
+                                       this.operationType,this.operationName,
+                                       this.userID,this.groupID,this.payload.createViewBuffer(),
+                                       this.attachment);
+        return myClone;
+    }
+
+    /**
+     * Marshalls a log entry.
+     * @return the binary representation of the LogEntry.
+     */
+    public ReusableBuffer marshall() {
+        int leSize = this.getLengthField();
+        //entry size + integer for length field itself
+        //ByteBuffer m = ByteBuffer.allocateDirect(Integer.SIZE/8 + leSize);
+        ReusableBuffer m = BufferPool.allocate(Integer.SIZE/8 + leSize);
+
+        m.putInt(leSize);
+        m.putInt(viewID);
+        m.putInt(sequenceID);
+        slID.write(m);
+        m.put(operationType);
+
+        assert(operationName.length() < 256);
+        m.put((byte)operationName.length());
+        if (operationName.length() > 0)
+            m.put(operationName.getBytes(HTTPUtils.ENC_ASCII));
+
+        assert(userID.length() < 256);
+        m.put((byte)userID.length());
+        if (userID.length() > 0)
+            m.put(userID.getBytes(HTTPUtils.ENC_ASCII));
+
+        assert(groupID.length() < 256);
+        m.put((byte)groupID.length());
+        if (groupID.length() > 0)
+            m.put(groupID.getBytes(HTTPUtils.ENC_ASCII));
+
+        if (payload != null) {
+            payload.position(0);
+            m.put(payload.getBuffer());
+            payload.position(0);
+        }
+        m.position(0);
+        return m;
+
+    }
+
+    /**
+     * Marshalls a log entry.
+     * @return the binary representation of the LogEntry.
+     */
+    public void marshall(ReusableBuffer m) {
+
+        // int plc = (payload == null) ? 0 : payload.capacity();
+        int leSize = this.getLengthField();
+        //entry size + integer for length field itself
+
+        m.putInt(leSize);
+        m.putInt(viewID);
+        m.putInt(sequenceID);
+        slID.write(m);
+        m.put(operationType);
+
+        assert(operationName.length() < 256);
+        m.put((byte)operationName.length());
+        if (operationName.length() > 0)
+            m.put(operationName.getBytes(HTTPUtils.ENC_ASCII));
+
+        assert(userID.length() < 256);
+        m.put((byte)userID.length());
+        if (userID.length() > 0)
+            m.put(userID.getBytes(HTTPUtils.ENC_ASCII));
+
+        assert(groupID.length() < 256);
+        m.put((byte)groupID.length());
+        if (groupID.length() > 0)
+            m.put(groupID.getBytes(HTTPUtils.ENC_ASCII));
+
+        if (payload != null) {
+            payload.position(0);
+            m.put(payload.getBuffer());
+            payload.position(0);
+        }
+
+    }
+
+    public int binarySize() {
+        return Integer.SIZE/8 + this.getLengthField();
+    }
+
+
+    /**
+     * Registers a listener called when the LogEntry was synced or sent.
+     * @param listener Listener to be called.
+     */
+    public void registerListener(SyncListener listener) {
+        this.listener = listener;
+    }
+
+    public boolean equals(LogEntry obj) {
+        //payload is not compared. Who cares?!
+        //viewID+sequenceID should be unique and enough
+        if (obj == null)
+            throw new RuntimeException("other object must not be null!");
+        return ( ( obj.viewID == this.viewID ) &&
+                (obj.sequenceID == this.sequenceID) &&
+                (obj.operationType == this.operationType) &&
+                (obj.operationName.equals(this.operationName)) &&
+                (obj.slID.equals(this.slID) ));
+
+    }
+
+    public int hashCode() {
+        throw new UnsupportedOperationException("there is no hash function defined for LogEntry");
+    }
+
+    public String toString() {
+        return "LogEntry(viewID=" + viewID + ", sequenceId=" + sequenceID
+                + ", sliceId=" + slID + ", operationType=" + operationType
+                + ", operationName=" + operationName + ", uid=" + userID
+                + ", gid=" + groupID + ", args=" + new String(payload.array()) + ")";
+    }
+
+    public void free() {
+        if (refCount.getAndDecrement() == 1) {
+            //free everything
+            BufferPool.free(this.payload);
+        }
+    }
+
+    public void newReference() {
+        refCount.incrementAndGet();
+    }
+
+}
diff --git a/servers/src/org/xtreemfs/mrc/brain/storage/SliceID.java b/servers/src/org/xtreemfs/mrc/brain/storage/SliceID.java
new file mode 100644
index 0000000000000000000000000000000000000000..53c26b13a882e7b25f48384144bd5671712de1dc
--- /dev/null
+++ b/servers/src/org/xtreemfs/mrc/brain/storage/SliceID.java
@@ -0,0 +1,192 @@
+/*  Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+    This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+    Grid Operating System, see  for more details.
+    The XtreemOS project has been developed with the financial support of the
+    European Commission's IST program under contract #FP6-033576.
+
+    XtreemFS is free software: you can redistribute it and/or modify it under
+    the terms of the GNU General Public License as published by the Free
+    Software Foundation, either version 2 of the License, or (at your option)
+    any later version.
+
+    XtreemFS is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with XtreemFS. If not, see .
+*/
+/*
+ * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB)
+ */
+
+package org.xtreemfs.mrc.brain.storage;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.net.SocketException;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import org.xtreemfs.common.buffer.ReusableBuffer;
+
+/**
+ * 
+ * @author bjko
+ */
+public class SliceID implements Serializable {
+
+    public static final int SIZE_IN_BYTES = 16;
+
+    private static final int VOLID_SIZE_BYTES = 12;
+
+    private static final int SLICENO_SIZE_BYTES = Integer.SIZE / 8;
+
+    private final String volumeID;
+    
+    private final byte[] volID;
+
+    private int sliceNo;
+
+    private String strRepres;
+
+    /** Creates a new instance of SliceID from a String */
+    public SliceID(String in) {
+        strRepres = in;
+        volID = new byte[VOLID_SIZE_BYTES];
+        for (int i = 0; i < VOLID_SIZE_BYTES; i++) {
+            volID[i] = (byte) Integer.parseInt(in.substring(i * 2, i * 2 + 2),
+                    16);
+        }
+        sliceNo = Integer.parseInt(in.substring(24), 16);
+        
+        volumeID = strRepres.substring(0, VOLID_SIZE_BYTES * 2);
+    }
+    
+    public SliceID(String volumeId, int sliceNo) {
+
+        volID = new byte[VOLID_SIZE_BYTES];
+        for (int i = 0; i < VOLID_SIZE_BYTES; i++) {
+            volID[i] = (byte) Integer.parseInt(volumeId.substring(i * 2, i * 2 + 2),
+                    16);
+        }
+        this.volumeID = volumeId;
+        this.sliceNo = sliceNo;
+        
+        strRepres = volumeId + VolIDGen.intToHex(sliceNo);
+    }
+
+    /**
+     * Generates a new SliceID
+     */
+    public SliceID(int sliceNo) throws SocketException {
+        volID = VolIDGen.getGenerator().getNewVolIDBytes();
+        this.sliceNo = sliceNo;
+        strRepres = "";
+        for (int i = 0; i < volID.length; i++) {
+            strRepres += VolIDGen.byteToHex(volID[i] & 0xFF);
+        }
+        volumeID = strRepres;
+        
+        strRepres += VolIDGen.intToHex(sliceNo);
+    }
+
+    /**
+     * Generates a new SliceID for an existing volume
+     */
+    public SliceID(SliceID volumeID, int sliceNo) throws SocketException {
+        volID = volumeID.volID;
+        this.sliceNo = sliceNo;
+        strRepres = "";
+        for (int i = 0; i < volID.length; i++) {
+            strRepres += VolIDGen.byteToHex(volID[i] & 0xFF);
+        }
+        this.volumeID = strRepres;
+        
+        strRepres += VolIDGen.intToHex(sliceNo);
+    }
+
+    /**
+     * reads a sliceID from a byteBuffer
+     */
+    public SliceID(ReusableBuffer buff) {
+        volID = new byte[VOLID_SIZE_BYTES];
+        buff.get(volID);
+        sliceNo = buff.getInt();
+
+        strRepres = "";
+        for (int i = 0; i < volID.length; i++) {
+            strRepres += VolIDGen.byteToHex(volID[i] & 0xFF);
+        }
+        this.volumeID = strRepres;
+        
+        strRepres += VolIDGen.intToHex(sliceNo);
+    }
+
+    /**
+     * reads a sliceID from a FileChannel
+     */
+    public SliceID(FileChannel fc) throws IOException {
+        ByteBuffer buff = ByteBuffer.allocateDirect(SIZE_IN_BYTES);
+        fc.read(buff);
+        buff.position(0);
+        volID = new byte[VOLID_SIZE_BYTES];
+        buff.get(volID);
+        sliceNo = buff.getInt();
+
+        strRepres = "";
+        for (int i = 0; i < volID.length; i++) {
+            strRepres += VolIDGen.byteToHex(volID[i] & 0xFF);
+        }
+        this.volumeID = strRepres;
+        
+        strRepres += VolIDGen.intToHex(sliceNo);
+    }
+
+    /**
+     * Returns the volume ID part of the slice ID.
+     */
+    public String getVolumeId() {
+        return volumeID;
+    }
+
+    public String toString() {
+        return strRepres;
+    }
+
+    public boolean equals(Object obj) {
+        try {
+            SliceID other = (SliceID) obj;
+            if (sliceNo != other.sliceNo)
+                return false;
+            for (int i = 0; i < volID.length; i++) {
+                if (volID[i] != other.volID[i])
+                    return false;
+            }
+            return true;
+        } catch (ClassCastException ex) {
+            return false;
+        }
+    }
+
+    public void write(ReusableBuffer buff) {
+        buff.put(volID);
+        buff.putInt(sliceNo);
+    }
+
+    public void write(FileChannel fc) throws IOException {
+        ByteBuffer buff = ByteBuffer.allocateDirect(SIZE_IN_BYTES);
+        buff.put(volID);
+        buff.putInt(sliceNo);
+        buff.position(0);
+        fc.write(buff);
+    }
+
+    public int hashCode() {
+        if (this.strRepres == null)
+            throw new RuntimeException("no string representation of sliceID");
+        return this.strRepres.hashCode();
+    }
+
+}
diff --git a/servers/src/org/xtreemfs/mrc/brain/storage/StorageBackend.java b/servers/src/org/xtreemfs/mrc/brain/storage/StorageBackend.java
new file mode 100644
index 0000000000000000000000000000000000000000..6964820483e75e51d7b07030abc7d00e0936085c
--- /dev/null
+++ b/servers/src/org/xtreemfs/mrc/brain/storage/StorageBackend.java
@@ -0,0 +1,235 @@
+/*  Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+    This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+    Grid Operating System, see  for more details.
+    The XtreemOS project has been developed with the financial support of the
+    European Commission's IST program under contract #FP6-033576.
+
+    XtreemFS is free software: you can redistribute it and/or modify it under
+    the terms of the GNU General Public License as published by the Free
+    Software Foundation, either version 2 of the License, or (at your option)
+    any later version.
+
+    XtreemFS is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with XtreemFS. If not, see .
+*/
+/*
+ * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB)
+ */
+
+package org.xtreemfs.mrc.brain.storage;
+
+import java.util.List;
+import java.util.Map;
+
+import org.xtreemfs.mrc.brain.storage.entities.AbstractFileEntity;
+import org.xtreemfs.mrc.brain.storage.entities.DirEntity;
+import org.xtreemfs.mrc.brain.storage.entities.FileAttributeEntity;
+import org.xtreemfs.mrc.brain.storage.entities.FileEntity;
+
+public interface StorageBackend {
+
+    /**
+     * Returns a the metadata object of a file with a given file ID.
+     *
+     * @param fileId
+     * @return
+     * @throws BackendException
+     */
+    public AbstractFileEntity getFileById(long fileId) throws BackendException;
+
+    /**
+     * Returns all attributes of a given type associated with a file.
+     *
+     * @param fileId
+     * @param attrType
+     * @return
+     * @throws BackendException
+     */
+    public List getAttrsByFileId(long fileId, long attrType)
+        throws BackendException;
+
+    /**
+     * Returns all attributes associated with a file.
+     *
+     * @param fileId
+     * @return
+     * @throws BackendException
+     */
+    public List getAttrsByFileId(long fileId)
+        throws BackendException;
+
+    /**
+     * Returns all user-defined attributes associated with a file.
+     *
+     * @param fileId
+     * @param attr
+     * @return
+     * @throws BackendException
+     */
+    public String getUserAttrByFileId(long fileId, String attr)
+        throws BackendException;
+
+    /**
+     * Returns all system internal attributes associated with a file.
+     *
+     * @param fileId
+     * @param attr
+     * @return
+     * @throws BackendException
+     */
+    public Object getSystemAttrByFileId(long fileId, String attr)
+        throws BackendException;
+
+    /**
+     * Returns mapping from names to metadata objects of files nested in the
+     * given directory.
+     *
+     * @param parentId
+     * @return
+     * @throws BackendException
+     */
+    public Map getFilesByParent(long parentId)
+        throws BackendException;
+
+    /**
+     * Returns a mapping from names to metadata objects of directories nested in
+     * the given directory.
+     *
+     * @param parentId
+     * @return
+     * @throws BackendException
+     */
+    public Map getDirsByParent(long parentId)
+        throws BackendException;
+
+    /**
+     * Returns the metadata object of the child with the given name in the given
+     * directory.
+     *
+     * @param name
+     * @param parentId
+     * @return
+     * @throws BackendException
+     */
+    public AbstractFileEntity getChild(String name, long parentId)
+        throws BackendException;
+
+    /**
+     * Creates a new link to a metadata object with a given name from within a
+     * given directory.
+     *
+     * @param fileId
+     * @param fileName
+     * @param parentId
+     * @return the new number of links to the file
+     * @throws BackendException
+     */
+    public long link(long fileId, String fileName, long parentId)
+        throws BackendException;
+
+    /**
+     * Removes a link to a metadata object from a given directory. If no more
+     * links exist, the metadata object itself should also be removed.
+     *
+     * @param fileId
+     * @param name
+     * @param parentId
+     * @return the new number of links to the file
+     * @throws BackendException
+     */
+    public long unlink(long fileId, String name, long parentId)
+        throws BackendException;
+
+    /**
+     * Removes an attribute associated with a given file from the internal
+     * database.
+     *
+     * @param fileId
+     * @param key
+     * @throws BackendException
+     */
+    public void deleteAttribute(long fileId, String key)
+        throws BackendException;
+
+    /**
+     * Adds a new attribute to the internal database. If an attribute with the
+     * given ID already exists, it is replaced.
+     *
+     * @param data
+     * @throws BackendException
+     */
+    public void put(FileAttributeEntity data) throws BackendException;
+
+    /**
+     * Adds a new file metadata object to the internal database. If a file
+     * metadata object with the given ID already exists, it is replaced.
+     *
+     * @param data
+     * @throws BackendException
+     */
+    public void put(FileEntity data) throws BackendException;
+
+    /**
+     * Adds a new directory metadata object to the internal database. If a
+     * directory metadata object with the given ID already exists, it is
+     * replaced.
+     *
+     * @param data
+     * @throws BackendException
+     */
+    public void put(DirEntity data) throws BackendException;
+
+    /**
+     * Writes the in-memory representation of the internal database to disk.
+     *
+     * @throws BackendException
+     */
+    public void sync() throws BackendException;
+
+    /**
+     * Shuts down the internal database.
+     *
+     * @throws BackendException
+     */
+    public void close() throws BackendException;
+
+    /**
+     * Removes all data from the internal database and shuts it down.
+     */
+    public void destroy();
+
+    /**
+     * Returns the current size of the database state file.
+     *
+     * @return the current DB file size
+     */
+    public long getDBFileSize();
+
+    /**
+     * Returns the current number of files stored in the database.
+     *
+     * @return the current number of files
+     */
+    public long getNumberOfFiles();
+
+    /**
+     * Returns the current number of directories stored in the database.
+     *
+     * @return the current number of directories
+     */
+    public long getNumberOfDirs();
+
+    /**
+     * Dumps the content currently held in the interal database.
+     *
+     * @throws BackendException
+     */
+    public void dumpDB() throws BackendException;
+
+}
diff --git a/servers/src/org/xtreemfs/mrc/brain/storage/StorageManager.java b/servers/src/org/xtreemfs/mrc/brain/storage/StorageManager.java
new file mode 100644
index 0000000000000000000000000000000000000000..4fd21c7cf6798cc3ecd3d57220576734d1f5231f
--- /dev/null
+++ b/servers/src/org/xtreemfs/mrc/brain/storage/StorageManager.java
@@ -0,0 +1,779 @@
+/*  Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see  for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB)
+ */
+
+package org.xtreemfs.mrc.brain.storage;
+
+import java.io.BufferedWriter;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.StringTokenizer;
+
+import org.xml.sax.Attributes;
+import org.xtreemfs.common.TimeSync;
+import org.xtreemfs.common.logging.Logging;
+import org.xtreemfs.common.util.OutputUtils;
+import org.xtreemfs.mrc.brain.ErrNo;
+import org.xtreemfs.mrc.brain.UserException;
+import org.xtreemfs.mrc.brain.storage.entities.ACLEntry;
+import org.xtreemfs.mrc.brain.storage.entities.AbstractFileEntity;
+import org.xtreemfs.mrc.brain.storage.entities.DirEntity;
+import org.xtreemfs.mrc.brain.storage.entities.FileAttributeEntity;
+import org.xtreemfs.mrc.brain.storage.entities.FileEntity;
+import org.xtreemfs.mrc.brain.storage.entities.StripingPolicy;
+import org.xtreemfs.mrc.brain.storage.entities.XLocation;
+import org.xtreemfs.mrc.brain.storage.entities.XLocationsList;
+import org.xtreemfs.mrc.utils.Converter;
+
+/**
+ * This class is responsible for holding information about file system content.
+ * It is used by the default brain implementation in order to manage files and
+ * directories.
+ * 
+ * @author stender
+ */
+public class StorageManager {
+    
+    private String         dbDirectory;
+    
+    private StorageBackend backend;
+    
+    private SliceID        sliceId;
+    
+    public StorageManager(String dbDirectory, SliceID sliceId) throws UserException {
+        this.dbDirectory = dbDirectory;
+        this.sliceId = sliceId;
+    }
+    
+    public void startup() throws BackendException {
+        // backend = new DBStorageBackend(dbDirectory, volume.getOwnerId(),
+        // volume.getGroupId());
+        backend = new JavaStorageBackend(dbDirectory);
+    }
+    
+    public void addXAttributes(long fileId, Map attrs) throws BackendException {
+        
+        if (attrs != null) {
+            
+            for (String key : attrs.keySet()) {
+                
+                FileAttributeEntity data = new FileAttributeEntity(key, String
+                        .valueOf(attrs.get(key)), FileAttributeEntity.TYPE_USER, fileId, null);
+                
+                // add the new key-value pair to the index
+                backend.put(data);
+            }
+        }
+    }
+    
+    public long createFile(String ref, String userId, String groupId,
+        Map stripingPolicy, boolean directory, Map aclMap)
+        throws BackendException {
+        
+        long time = TimeSync.getGlobalTime() / 1000;
+        
+        // convert the access control map to a
+        ACLEntry[] acl = Converter.mapToACL(aclMap);
+        
+        AbstractFileEntity file = null;
+        if (directory) {
+            DirEntity d = new DirEntity(0, userId, groupId, time, time, time, acl, 0);
+            backend.put(d);
+            file = d;
+        } else {
+            FileEntity f = new FileEntity(0, userId, groupId, time, time, time, 0, null, acl, 0, 0,
+                0);
+            backend.put(f);
+            file = f;
+        }
+        
+        if (ref != null)
+            backend.put(new FileAttributeEntity("ref", ref,
+                FileAttributeEntity.TYPE_SYSTEM, file.getId(), userId));
+        
+        StripingPolicy spol = stripingPolicy == null ? null : Converter
+                .mapToStripingPolicy(stripingPolicy);
+        
+        if (spol != null)
+            backend.put(new FileAttributeEntity("spol", spol,
+                FileAttributeEntity.TYPE_SYSTEM, file.getId(), userId));
+        
+        return file.getId();
+    }
+    
+    public long createFile(AbstractFileEntity file, List attrs)
+        throws BackendException {
+        
+        if (file instanceof FileEntity)
+            backend.put((FileEntity) file);
+        else
+            backend.put((DirEntity) file);
+        
+        if (attrs != null)
+            for (FileAttributeEntity attr : attrs) {
+                attr.setFileId(file.getId());
+                backend.put(attr);
+            }
+        
+        return file.getId();
+    }
+    
+    public long linkFile(String fileName, long fileId, long parentDirId) throws BackendException {
+        return backend.link(fileId, fileName, parentDirId);
+    }
+    
+    public long unlinkFile(String fileName, long fileId, long parentDirId) throws BackendException {
+        return backend.unlink(fileId, fileName, parentDirId);
+    }
+    
+    public void deleteXAttributes(long fileId, List attrKeys) throws BackendException {
+        
+        // if attrKeys == null, delete all attributes
+        if (attrKeys == null) {
+            
+            Collection list = backend.getAttrsByFileId(fileId,
+                FileAttributeEntity.TYPE_USER);
+            
+            for (FileAttributeEntity att : list)
+                backend.deleteAttribute(att.getFileId(), att.getKey());
+        }
+
+        else {
+            
+            for (Object key : attrKeys) {
+                
+                Collection list = backend.getAttrsByFileId(fileId,
+                    FileAttributeEntity.TYPE_USER);
+                
+                for (FileAttributeEntity att : list) {
+                    if (att.getKey().equals(key))
+                        backend.deleteAttribute(att.getFileId(), att.getKey());
+                }
+            }
+        }
+    }
+    
+    public Map getXAttributes(long fileId) throws BackendException {
+        
+        Map map = new HashMap();
+        
+        Collection list = backend.getAttrsByFileId(fileId,
+            FileAttributeEntity.TYPE_USER);
+        for (FileAttributeEntity att : list)
+            map.put(att.getKey(), att.getValue());
+        
+        return map;
+    }
+    
+    public List getAllAttributes(long fileId) throws BackendException {
+        return backend.getAttrsByFileId(fileId);
+    }
+    
+    public AbstractFileEntity getFileEntity(String path) throws UserException, BackendException {
+        
+        AbstractFileEntity file = getFile(backend.getFileById(1), path);
+        if (file == null)
+            throw new UserException(ErrNo.ENOENT, "could not find file or directory '" + path + "'");
+        
+        return file;
+    }
+    
+    public AbstractFileEntity getFileEntity(String path, boolean directory)
+        throws BackendException, UserException {
+        
+        AbstractFileEntity file = getFileEntity(path);
+        
+        if (file.getId() == 1 || file.isDirectory() == directory)
+            return file;
+        
+        throw new UserException(directory ? ErrNo.ENOTDIR : ErrNo.EISDIR, "'" + path
+            + "' is not a " + (directory ? "directory" : "file"));
+    }
+    
+    public AbstractFileEntity getFileEntity(long fileId) throws BackendException {
+        return backend.getFileById(fileId);
+    }
+    
+    public XLocationsList getXLocationsList(long fileId) throws BackendException, UserException {
+        
+        AbstractFileEntity file = getFileEntity(fileId);
+        if (file == null || !(file instanceof FileEntity))
+            throw new UserException(ErrNo.ENOENT, "file does not exist or is a directory");
+        
+        return ((FileEntity) file).getXLocationsList();
+    }
+    
+    public StripingPolicy getStripingPolicy(long fileId) throws BackendException {
+        StripingPolicy sp = (StripingPolicy) backend.getSystemAttrByFileId(fileId, "spol");
+        return sp == null ? null : new StripingPolicy(sp.getPolicy(), sp.getStripeSize(), sp
+                .getWidth());
+    }
+    
+    public StripingPolicy getVolumeStripingPolicy() throws BackendException {
+        StripingPolicy sp = (StripingPolicy) backend.getSystemAttrByFileId(1, "spol");
+        return sp == null ? null : new StripingPolicy(sp.getPolicy(), sp.getStripeSize(), sp
+                .getWidth());
+    }
+    
+    public boolean isReadOnly(long fileId) throws BackendException {
+        Boolean ro = (Boolean) backend.getSystemAttrByFileId(fileId, "ro");
+        return ro == null ? false : ro;
+    }
+    
+    public String getFileReference(long fileId) throws BackendException {
+        return (String) backend.getSystemAttrByFileId(fileId, "ref");
+    }
+    
+    public boolean hasChildren(long fileId) throws BackendException {
+        return !(backend.getFilesByParent(fileId).isEmpty() && backend.getDirsByParent(fileId)
+                .isEmpty());
+    }
+    
+    public AbstractFileEntity getChild(String fileName, long parentDirId) throws BackendException {
+        return backend.getChild(fileName, parentDirId);
+    }
+    
+    public List getChildren(long fileId) throws BackendException {
+        
+        List list = new LinkedList();
+        
+        Map files = backend.getFilesByParent(fileId);
+        list.addAll(files.keySet());
+        
+        Map dirs = backend.getDirsByParent(fileId);
+        list.addAll(dirs.keySet());
+        
+        return list;
+    }
+    
+    public Map getChildData(long fileId) throws BackendException {
+        
+        Map map = new HashMap();
+        
+        Map files = backend.getFilesByParent(fileId);
+        map.putAll(files);
+        
+        Map dirs = backend.getDirsByParent(fileId);
+        map.putAll(dirs);
+        
+        return map;
+    }
+    
+    public ACLEntry[] getVolumeACL() throws BackendException {
+        AbstractFileEntity file = backend.getFileById(1);
+        return file.getAcl();
+    }
+    
+    public boolean fileExists(long parentDir, String file) throws BackendException {
+        return backend.getChild(file, parentDir) != null;
+    }
+    
+    public void setFileACL(long fileId, Map acl) throws BackendException {
+        setFileACL(fileId, Converter.mapToACL(acl));
+    }
+    
+    public void setFileACL(long fileId, ACLEntry[] acl) throws BackendException {
+        
+        AbstractFileEntity file = backend.getFileById(fileId);
+        file.setAcl(acl);
+        
+        if (file instanceof FileEntity)
+            backend.put((FileEntity) file);
+        else
+            backend.put((DirEntity) file);
+    }
+    
+    public void setFileSize(long fileId, long fileSize, long epoch, long issuedEpoch)
+        throws BackendException {
+        
+        FileEntity file = (FileEntity) backend.getFileById(fileId);
+        file.setSize(fileSize);
+        file.setEpoch(epoch);
+        file.setIssuedEpoch(issuedEpoch);
+        backend.put(file);
+    }
+    
+    public void setFileOwner(long fileId, String owner) throws BackendException {
+        
+        AbstractFileEntity file = backend.getFileById(fileId);
+        file.setUserId(owner);
+        
+        if (file instanceof FileEntity)
+            backend.put((FileEntity) file);
+        else
+            backend.put((DirEntity) file);
+    }
+    
+    public void setFileGroup(long fileId, String group) throws BackendException {
+        
+        AbstractFileEntity file = backend.getFileById(fileId);
+        file.setGroupId(group);
+        
+        if (file instanceof FileEntity)
+            backend.put((FileEntity) file);
+        else
+            backend.put((DirEntity) file);
+    }
+    
+    public void setXLocationsList(long fileId, XLocationsList xLocList) throws BackendException {
+        
+        FileEntity file = (FileEntity) backend.getFileById(fileId);
+        file.setXLocationsList(xLocList);
+        backend.put(file);
+    }
+    
+    public void setVolumeACL(Map acl) throws BackendException {
+        setFileACL(1, acl);
+    }
+    
+    public void setVolumeStripingPolicy(Map stripingPolicy) throws BackendException {
+        setStripingPolicy(1, stripingPolicy);
+    }
+    
+    public void setStripingPolicy(long fileId, Map stripingPolicy)
+        throws BackendException {
+        
+        if (stripingPolicy != null)
+            backend.put(new FileAttributeEntity("spol", Converter
+                    .mapToStripingPolicy(stripingPolicy), FileAttributeEntity.TYPE_SYSTEM, fileId,
+                ""));
+        else
+            backend.deleteAttribute(fileId, "spol");
+    }
+    
+    public void setReadOnly(long fileId, boolean readOnly) throws BackendException {
+        backend.put(new FileAttributeEntity("ro", readOnly,
+            FileAttributeEntity.TYPE_SYSTEM, fileId, ""));
+    }
+    
+    public List submitQuery(String contextPath, String queryString)
+        throws BackendException, UserException {
+        
+        // TODO
+        
+        return null;
+    }
+    
+    public void sync() throws BackendException {
+        backend.sync();
+    }
+    
+    public void shutdown() throws BackendException {
+        backend.close();
+        backend = null;
+    }
+    
+    public void cleanup() {
+        if (backend != null)
+            backend.destroy();
+        backend = null;
+    }
+    
+    private AbstractFileEntity getFile(AbstractFileEntity parent, String path)
+        throws BackendException, UserException {
+        
+        if (path.equals(""))
+            return parent;
+        
+        int i = path.indexOf('/');
+        String first = i == -1 ? path : path.substring(0, i);
+        String remainder = i == -1 ? "" : path.substring(i + 1);
+        
+        // check if there is a subdirectory with the name of the topmost path
+        // component
+        AbstractFileEntity child = backend.getChild(first, parent.getId());
+        
+        if (child == null)
+            throw new UserException(ErrNo.ENOENT, "path component '" + first + "' does not exist");
+        
+        if (!child.isDirectory() && remainder.length() > 0)
+            throw new UserException(ErrNo.ENOTDIR, "inner path component '" + first
+                + "' is not a directory");
+        
+        return getFile(child, remainder);
+    }
+    
+    public void updateFileTimes(long fileId, boolean setATime, boolean setCTime, boolean setMTime)
+        throws BackendException {
+        
+        AbstractFileEntity file = backend.getFileById(fileId);
+        updateFileTimes(file, setATime, setCTime, setMTime);
+    }
+    
+    private void updateFileTimes(AbstractFileEntity file, boolean setATime, boolean setCTime,
+        boolean setMTime) throws BackendException {
+        
+        long currentTime = TimeSync.getGlobalTime() / 1000;
+        
+        if (setATime)
+            file.setAtime(currentTime);
+        if (setCTime)
+            file.setCtime(currentTime);
+        if (setMTime)
+            file.setMtime(currentTime);
+        
+        if (file instanceof FileEntity)
+            backend.put((FileEntity) file);
+        else
+            backend.put((DirEntity) file);
+    }
+    
+    public SliceID getSliceId() {
+        return sliceId;
+    }
+    
+    public long getDBFileSize() {
+        return backend.getDBFileSize();
+    }
+    
+    public long getNumberOfFiles() {
+        return backend.getNumberOfFiles();
+    }
+    
+    public long getNumberOfDirs() {
+        return backend.getNumberOfDirs();
+    }
+    
+    public void dumpDB(BufferedWriter xmlWriter) throws BackendException, IOException {
+        
+        DirEntity dir = (DirEntity) backend.getFileById(1);
+        
+        // serialize the root directory
+        xmlWriter.write("\n");
+        
+        // serialize the root directory's ACL
+        ACLEntry[] acl = dir.getAcl();
+        dumpACL(xmlWriter, acl);
+        
+        // serialize the root directory's attributes
+        List attrs = backend.getAttrsByFileId(dir.getId());
+        dumpAttrs(xmlWriter, attrs);
+        
+        dumpDB(xmlWriter, 1);
+        xmlWriter.write("\n");
+    }
+    
+    private void dumpDB(BufferedWriter xmlWriter, long parentId) throws BackendException,
+        IOException {
+        
+        // serialize all directories
+        Map dirs = backend.getDirsByParent(parentId);
+        for (String name : dirs.keySet()) {
+            
+            DirEntity dir = dirs.get(name);
+            
+            // serialize the directory
+            xmlWriter.write("\n");
+            
+            // serialize the directory's ACL
+            ACLEntry[] acl = dir.getAcl();
+            dumpACL(xmlWriter, acl);
+            
+            // serialize the directory's attributes
+            List attrs = backend.getAttrsByFileId(dir.getId());
+            dumpAttrs(xmlWriter, attrs);
+            
+            dumpDB(xmlWriter, dir.getId());
+            xmlWriter.write("\n");
+        }
+        
+        // serialize all files
+        Map files = backend.getFilesByParent(parentId);
+        for (String name : files.keySet()) {
+            
+            FileEntity file = files.get(name);
+            
+            // serialize the file
+            xmlWriter.write("\n");
+            
+            // serialize the file's xLoc list
+            XLocationsList xloc = file.getXLocationsList();
+            if (xloc != null) {
+                xmlWriter.write("\n");
+                for (XLocation replica : xloc.getReplicas()) {
+                    xmlWriter.write("\n");
+                    for (String osd : replica.getOsdList())
+                        xmlWriter.write("\n");
+                }
+                xmlWriter.write("\n");
+                xmlWriter.write("\n");
+            }
+            
+            // serialize the file's ACL
+            ACLEntry[] acl = file.getAcl();
+            dumpACL(xmlWriter, acl);
+            
+            // serialize the file's attributes
+            List attrs = backend.getAttrsByFileId(file.getId());
+            dumpAttrs(xmlWriter, attrs);
+            
+            xmlWriter.write("\n");
+        }
+    }
+    
+    private void dumpAttrs(BufferedWriter xmlWriter, List attrs)
+        throws IOException {
+        
+        if (attrs != null && attrs.size() != 0) {
+            xmlWriter.write("\n");
+            for (FileAttributeEntity attr : attrs) {
+                
+                String key = attr.getKey().toString();
+                String val = attr.getValue().toString();
+                
+                // create byte array for user attributes, as they might
+                // contain binary content
+                if (attr.getType() != FileAttributeEntity.TYPE_SYSTEM) {
+                    key = OutputUtils.byteArrayToHexString(key.getBytes());
+                    val = OutputUtils.byteArrayToHexString(val.getBytes());
+                }
+                
+                xmlWriter.write("\n");
+            }
+            xmlWriter.write("\n");
+        }
+    }
+    
+    private void dumpACL(BufferedWriter xmlWriter, ACLEntry[] acl) throws IOException {
+        
+        if (acl != null && acl.length != 0) {
+            xmlWriter.write("\n");
+            for (ACLEntry entry : acl)
+                xmlWriter.write("\n");
+            xmlWriter.write("\n");
+        }
+    }
+    
+    public void restoreDBFromDump(String entity, Attributes attrs, RestoreState state,
+        boolean openTag, int dbVersion) throws BackendException {
+        
+        if (entity.equals("dir")) {
+            
+            if (openTag) {
+                
+                Long id = Long.parseLong(attrs.getValue(attrs.getIndex("id")));
+                String name = OutputUtils.unescapeFromXML(attrs.getValue(attrs.getIndex("name")));
+                String uid = attrs.getValue(attrs.getIndex("uid"));
+                String gid = attrs.getValue(attrs.getIndex("gid"));
+                long atime = Long.parseLong(attrs.getValue(attrs.getIndex("atime")));
+                long ctime = Long.parseLong(attrs.getValue(attrs.getIndex("ctime")));
+                long mtime = Long.parseLong(attrs.getValue(attrs.getIndex("mtime")));
+                
+                DirEntity dir = new DirEntity(id, uid, gid, atime, ctime, mtime, null, 0);
+                
+                createFile(dir, null);
+                if (state.parentIds.size() != 0)
+                    linkFile(name, id, state.parentIds.get(0));
+                
+                state.parentIds.add(0, id);
+                state.currentEntity = backend.getFileById(id);
+            }
+
+            else
+                state.parentIds.remove(0);
+        }
+
+        else if (entity.equals("file") && openTag) {
+            
+            Long id = Long.parseLong(attrs.getValue(attrs.getIndex("id")));
+            String name = OutputUtils.unescapeFromXML(attrs.getValue(attrs.getIndex("name")));
+            
+            // since files may be linked to multiple directories, create the
+            // metadata object only if it does not exist yet
+            FileEntity file = (FileEntity) backend.getFileById(id);
+            if (file == null) {
+                
+                String uid = attrs.getValue(attrs.getIndex("uid"));
+                String gid = attrs.getValue(attrs.getIndex("gid"));
+                long atime = Long.parseLong(attrs.getValue(attrs.getIndex("atime")));
+                long ctime = Long.parseLong(attrs.getValue(attrs.getIndex("ctime")));
+                long mtime = Long.parseLong(attrs.getValue(attrs.getIndex("mtime")));
+                long size = Long.parseLong(attrs.getValue(attrs.getIndex("size")));
+                String writeEpochStr = attrs.getValue(attrs.getIndex("epoch"));
+                long writeEpoch = writeEpochStr == null ? 0 : Long.parseLong(writeEpochStr);
+                String truncEpochStr = attrs.getValue(attrs.getIndex("issuedEpoch"));
+                long truncEpoch = truncEpochStr == null ? 0 : Long.parseLong(truncEpochStr);
+                
+                file = new FileEntity(id, uid, gid, atime, ctime, mtime, size, null, null, 0,
+                    writeEpoch, truncEpoch);
+                
+                createFile(file, null);
+            }
+            
+            linkFile(name, id, state.parentIds.get(0));
+            
+            state.currentEntity = backend.getFileById(id);
+        }
+
+        else if (entity.equals("xlocList") && openTag) {
+            
+            long version = Long.parseLong(attrs.getValue(attrs.getIndex("version")));
+            ((FileEntity) state.currentEntity).setXLocationsList(new XLocationsList(null, version));
+            backend.put((FileEntity) state.currentEntity);
+        }
+
+        else if (entity.equals("xloc") && openTag) {
+            
+            String pattern = attrs.getValue(attrs.getIndex("pattern"));
+            StringTokenizer st = new StringTokenizer(pattern, " ,");
+            String policy = st.nextToken();
+            long size = Long.parseLong(st.nextToken());
+            long width = Long.parseLong(st.nextToken());
+            
+            XLocationsList xLocList = ((FileEntity) state.currentEntity).getXLocationsList();
+            
+            state.currentXLoc = new XLocation(new StripingPolicy(policy, size, width), null);
+            xLocList.addReplicaWithoutVersionChange(state.currentXLoc);
+        }
+
+        else if (entity.equals("osd") && openTag) {
+            
+            String osd = attrs.getValue(attrs.getIndex("location"));
+            
+            String[] osdList = state.currentXLoc.getOsdList();
+            if (osdList == null)
+                osdList = new String[] { osd };
+            else {
+                String[] newOSDList = new String[osdList.length + 1];
+                System.arraycopy(osdList, 0, newOSDList, 0, osdList.length);
+                newOSDList[newOSDList.length - 1] = osd;
+                osdList = newOSDList;
+            }
+            
+            state.currentXLoc.setOsdList(osdList);
+        }
+
+        else if (entity.equals("entry") && openTag) {
+            
+            String userId = attrs.getValue(attrs.getIndex("entity"));
+            long rights = Long.parseLong(attrs.getValue(attrs.getIndex("rights")));
+            
+            ACLEntry[] acl = state.currentEntity.getAcl();
+            if (acl == null)
+                acl = new ACLEntry[] { new ACLEntry(userId, rights) };
+            else {
+                ACLEntry[] newACL = new ACLEntry[acl.length + 1];
+                System.arraycopy(acl, 0, newACL, 0, acl.length);
+                newACL[newACL.length - 1] = new ACLEntry(userId, rights);
+                acl = newACL;
+            }
+            
+            state.currentEntity.setAcl(acl);
+            if (state.currentEntity instanceof FileEntity)
+                backend.put((FileEntity) state.currentEntity);
+            else
+                backend.put((DirEntity) state.currentEntity);
+        }
+
+        else if (entity.equals("attr") && openTag) {
+            
+            long type = Long.parseLong(attrs.getValue(attrs.getIndex("type")));
+            String uid = attrs.getValue(attrs.getIndex("uid"));
+            
+            String key = null;
+            Object value = null;
+            
+            // do not escape system attributes, as they only contain ASCII data
+            key = attrs.getValue(attrs.getIndex("key"));
+            if (type == FileAttributeEntity.TYPE_SYSTEM) {
+                value = attrs.getValue(attrs.getIndex("value"));
+            }
+
+            else if (dbVersion < 2) {
+                try {
+                    key = OutputUtils.unescapeFromXML(attrs.getValue(attrs.getIndex("key")));
+                    value = OutputUtils.unescapeFromXML(attrs.getValue(attrs.getIndex("value")));
+                } catch (Exception exc) {
+                    Logging.logMessage(Logging.LEVEL_WARN, this, "extended attribute");
+                    return;
+                }
+                
+            }
+
+            else {
+                key = new String(OutputUtils.hexStringToByteArray(attrs.getValue(attrs
+                        .getIndex("key"))));
+                value = new String(OutputUtils.hexStringToByteArray(attrs.getValue(attrs
+                        .getIndex("value"))));
+            }
+            
+            // if the value refers to a striping policy, parse it
+            if (key.equals("spol")) {
+                StringTokenizer st = new StringTokenizer(value.toString(), ", ");
+                value = new StripingPolicy(st.nextToken(), Long.parseLong(st.nextToken()), Long
+                        .parseLong(st.nextToken()));
+            } else if (key.equals("ro"))
+                value = Boolean.valueOf((String) value);
+            
+            backend
+                    .put(new FileAttributeEntity(key, value, type, state.currentEntity.getId(), uid));
+        }
+    }
+    
+    /**
+     * VERY EVIL OPERATION!
+     */
+    public StorageBackend getBackend() {
+        return this.backend;
+    }
+    
+    public static class RestoreState {
+        
+        public List         parentIds = new LinkedList();
+        
+        public AbstractFileEntity currentEntity;
+        
+        public XLocation          currentXLoc;
+        
+    }
+    
+    /**
+     * 
+     * @param fileID
+     * @return true, if the file with the given ID exists, false otherwise
+     * @throws BackendException
+     */
+    public boolean exists(String fileID) throws BackendException {
+        try {
+            return (getFileEntity(Long.parseLong(fileID)) != null);
+        } catch (NumberFormatException e) {
+            throw new BackendException("StorageManager.exists(fileID) : wrong fileID-format");
+        }
+    }
+}
diff --git a/servers/src/org/xtreemfs/mrc/brain/storage/SyncListener.java b/servers/src/org/xtreemfs/mrc/brain/storage/SyncListener.java
new file mode 100644
index 0000000000000000000000000000000000000000..9d7c7ec9ab26c61d81a780687dc2d59be3568438
--- /dev/null
+++ b/servers/src/org/xtreemfs/mrc/brain/storage/SyncListener.java
@@ -0,0 +1,41 @@
+/*  Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+    This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+    Grid Operating System, see  for more details.
+    The XtreemOS project has been developed with the financial support of the
+    European Commission's IST program under contract #FP6-033576.
+
+    XtreemFS is free software: you can redistribute it and/or modify it under
+    the terms of the GNU General Public License as published by the Free
+    Software Foundation, either version 2 of the License, or (at your option)
+    any later version.
+
+    XtreemFS is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with XtreemFS. If not, see .
+*/
+/*
+ * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB)
+ */
+
+package org.xtreemfs.mrc.brain.storage;
+
+/**
+ * Simple listener called after a LogEntry was synced or sent.
+ * @author bjko
+ */
+public interface SyncListener {
+    
+    /** Called after the LogEntry was synced to logfile
+     */
+    public void synced(LogEntry entry);
+    
+    /** Called if the LogEntry could not be written/sent.
+     */
+    public void failed(LogEntry entry, Exception ex);
+    
+}
diff --git a/servers/src/org/xtreemfs/mrc/brain/storage/VolIDGen.java b/servers/src/org/xtreemfs/mrc/brain/storage/VolIDGen.java
new file mode 100644
index 0000000000000000000000000000000000000000..af7aaac32446cb327b4886f3daea06ed91115ff5
--- /dev/null
+++ b/servers/src/org/xtreemfs/mrc/brain/storage/VolIDGen.java
@@ -0,0 +1,140 @@
+/*  Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+    This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+    Grid Operating System, see  for more details.
+    The XtreemOS project has been developed with the financial support of the
+    European Commission's IST program under contract #FP6-033576.
+
+    XtreemFS is free software: you can redistribute it and/or modify it under
+    the terms of the GNU General Public License as published by the Free
+    Software Foundation, either version 2 of the License, or (at your option)
+    any later version.
+
+    XtreemFS is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with XtreemFS. If not, see .
+*/
+/*
+ * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB)
+ */
+
+package org.xtreemfs.mrc.brain.storage;
+
+import java.net.NetworkInterface;
+import java.net.SocketException;
+import java.util.Enumeration;
+
+import org.xtreemfs.common.logging.Logging;
+
+/**
+ * Generates globally unique IDs for volumes based on the MAC, time and a
+ * random component.
+ * @author bjko
+ */
+public class VolIDGen {
+
+    private String ADDR_PART;
+
+    private byte[] addr;
+
+    private static VolIDGen instance = null;
+
+
+    /** Creates a new instance of VolIDGen
+     * @throws SocketException if something is wrong with the sockets and MACs cannto be read
+     */
+    public VolIDGen() throws SocketException {
+        byte[] mac = null;
+        byte[] virtualMac = null;
+        Enumeration nifs = NetworkInterface.getNetworkInterfaces();
+        while (nifs.hasMoreElements()) {
+            NetworkInterface nif = nifs.nextElement();
+            byte[] tmp = nif.getHardwareAddress();
+            if (tmp != null) {
+                //okay, IF has a MAC address. Not all have one, think of lo
+
+                //we do not want a VM MAC, but take it if there is nothing else
+                if ( (tmp[0] == 0) && (tmp[1] == 0x50) && (tmp[2] == 0x56)) {
+                    //VMWARE's MACs
+                    virtualMac = tmp;
+                } else if ( (tmp[0] == 0) && (tmp[1] == 0x16) && ((0xFF & tmp[2]) >= 0xE0)) {
+                    //XEN's MACs
+                    virtualMac = tmp;
+                } else {
+                    //take a real MAC
+                    mac = tmp;
+                }
+            }
+        }
+        if (mac == null) {
+            if (virtualMac == null) {
+                //THIS IS BAD!
+                //GOTO shoot the MRC in the foot ;-)
+
+                Logging.logMessage(Logging.LEVEL_WARN, this,
+                        "cannot find a Hardware/MAC address to initialize, using random number instead");
+
+                mac = new byte[6];
+                for (int i = 0; i < mac.length; i++)
+                    mac[i] = (byte) (Math.random() * 256 - 128);
+
+            } else {
+                Logging.logMessage(Logging.LEVEL_WARN, this,
+                        "using a mac that is part of a virual machine - this may cause volumeID collisions!");
+
+                mac = virtualMac;
+            }
+        }
+
+        //make it a six byte HEXTRING w/ leading zzzzzeros
+        ADDR_PART = byteToHex(mac[0] & 0x00FF)+byteToHex(mac[1] & 0x00FF)+byteToHex(mac[2] & 0x00FF)+
+                    byteToHex(mac[3] & 0x00FF)+byteToHex(mac[4] & 0x00FF)+byteToHex(mac[5] & 0x00FF);
+        addr = mac;
+
+    }
+
+    /** Creates a new unique volume ID
+     *  @return the new volumeID
+     *  @deprecated use SliceID instead
+     */
+    public String getNewVolID() {
+        int time = (int)(System.currentTimeMillis()/1000l);
+
+        return ADDR_PART+intToHex(time)+shortToHex((int)(Math.random()*0xFFFF));
+
+    }
+
+    public byte[] getNewVolIDBytes() {
+        int time = (int)(System.currentTimeMillis()/1000l);
+        int rand = (int)(Math.random()*0xFFFF);
+        byte[] arr = new byte[]{addr[0],addr[1],addr[2],addr[3],addr[4],addr[5],
+                    (byte)(time & 0xFF), (byte)(time >> 8 & 0xFF), (byte)(time >> 16 & 0xFF),
+                    (byte)(time >> 24 & 0xFF), (byte)(rand & 0xFF),  (byte)(rand >> 8 & 0xFF)};
+        return arr;
+    }
+
+    /** Gets the single instance of VolIDGen
+     */
+    public static synchronized VolIDGen getGenerator() throws SocketException {
+        if (instance == null) {
+            instance = new VolIDGen();
+        }
+        return instance;
+    }
+
+    //helper too simple to document
+    public static String byteToHex(int in) {
+        return String.format("%02X",in);
+    }
+    public static String intToHex(int in) {
+        return String.format("%08X",in);
+    }
+    public static String shortToHex(int in) {
+        return String.format("%04X",in);
+    }
+
+}
diff --git a/servers/src/org/xtreemfs/mrc/brain/storage/entities/ACLEntry.java b/servers/src/org/xtreemfs/mrc/brain/storage/entities/ACLEntry.java
new file mode 100644
index 0000000000000000000000000000000000000000..c283f773df797116e17a94cc7ddb772f9e3fe2de
--- /dev/null
+++ b/servers/src/org/xtreemfs/mrc/brain/storage/entities/ACLEntry.java
@@ -0,0 +1,74 @@
+/*  Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+    This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+    Grid Operating System, see  for more details.
+    The XtreemOS project has been developed with the financial support of the
+    European Commission's IST program under contract #FP6-033576.
+
+    XtreemFS is free software: you can redistribute it and/or modify it under
+    the terms of the GNU General Public License as published by the Free
+    Software Foundation, either version 2 of the License, or (at your option)
+    any later version.
+
+    XtreemFS is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with XtreemFS. If not, see .
+*/
+/*
+ * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB)
+ */
+
+package org.xtreemfs.mrc.brain.storage.entities;
+
+import java.io.Serializable;
+
+/**
+ * This class represents a single entry of an ACL assigned to a file, directory
+ * or volume. An ACL entry is a key-value pair mapping some entity to a set of
+ * access rights.
+ *
+ * How an ACL is used in detail depends on the access control policy held by the
+ * volume.
+ *
+ * @author stender
+ *
+ */
+public class ACLEntry implements Serializable {
+
+    private String entity;
+
+    private long rights;
+
+    public ACLEntry() {
+    }
+
+    public ACLEntry(String userId, long rights) {
+        this.entity = userId;
+        this.rights = rights;
+    }
+
+    public String getEntity() {
+        return entity;
+    }
+
+    public void setEntity(String userId) {
+        this.entity = userId;
+    }
+
+    public long getRights() {
+        return rights;
+    }
+
+    public void setRights(long rights) {
+        this.rights = rights;
+    }
+
+    public String toString() {
+        return entity + "=" + rights;
+    }
+
+}
diff --git a/servers/src/org/xtreemfs/mrc/brain/storage/entities/AbstractFileEntity.java b/servers/src/org/xtreemfs/mrc/brain/storage/entities/AbstractFileEntity.java
new file mode 100644
index 0000000000000000000000000000000000000000..802f5e71a8d0f014f45eb98509254de157d88e09
--- /dev/null
+++ b/servers/src/org/xtreemfs/mrc/brain/storage/entities/AbstractFileEntity.java
@@ -0,0 +1,152 @@
+/*  Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see  for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB)
+ */
+
+package org.xtreemfs.mrc.brain.storage.entities;
+
+import java.io.Serializable;
+
+/**
+ * The abstract file entity is the base class for file and directory entities.
+ *
+ * File and directory entities are used to store such metadata about a file that
+ * is mandatory and frequently used. File metadata that is only assigned to
+ * certain files is stored in the form of meta attributes, such as a target
+ * reference in case the file is a symbolic link.
+ *
+ * The abstract file entity encapsulates all data that files and directories
+ * have in common, such as a name, POSIX timestamps or an ACL.
+ *
+ * @see FileEntity
+ * @see DirEntity
+ *
+ * @author stender
+ *
+ */
+public abstract class AbstractFileEntity implements Serializable {
+
+    private long       id;
+
+    private String     indexId;
+
+    private long       atime;
+
+    private long       ctime;
+
+    private long       mtime;
+
+    private String     userId;
+
+    private String     groupId;
+
+    private ACLEntry[] acl;
+
+    private long       linkCount;
+
+    public AbstractFileEntity() {
+    }
+
+    public AbstractFileEntity(long id, String userId, String groupId, long atime, long ctime,
+        long mtime, ACLEntry[] acl, long linkCount) {
+
+        this.id = id;
+        this.userId = userId;
+        this.groupId = groupId;
+        this.atime = atime;
+        this.ctime = ctime;
+        this.mtime = mtime;
+        this.acl = acl;
+        this.linkCount = linkCount;
+    }
+
+    public long getId() {
+        return id;
+    }
+
+    public void setId(long id) {
+        this.id = id;
+    }
+
+    public long getAtime() {
+        return atime;
+    }
+
+    public void setAtime(long atime) {
+        this.atime = atime;
+    }
+
+    public long getCtime() {
+        return ctime;
+    }
+
+    public void setCtime(long ctime) {
+        this.ctime = ctime;
+    }
+
+    public long getMtime() {
+        return mtime;
+    }
+
+    public void setMtime(long mtime) {
+        this.mtime = mtime;
+    }
+
+    public String getUserId() {
+        return userId;
+    }
+
+    public void setUserId(String userId) {
+        this.userId = userId;
+    }
+
+    public String getGroupId() {
+        return groupId;
+    }
+
+    public void setGroupId(String groupId) {
+        this.groupId = groupId;
+    }
+
+    public ACLEntry[] getAcl() {
+        return acl;
+    }
+
+    public void setAcl(ACLEntry[] acl) {
+        this.acl = acl;
+    }
+
+    public long getLinkCount() {
+        return linkCount;
+    }
+
+    public void setLinkCount(long linkCount) {
+        this.linkCount = linkCount;
+    }
+
+    public abstract boolean isDirectory();
+
+    public String toString() {
+        return (isDirectory() ? "D" : "") + id + " (" + linkCount + " links)";
+    }
+
+}
diff --git a/servers/src/org/xtreemfs/mrc/brain/storage/entities/DirEntity.java b/servers/src/org/xtreemfs/mrc/brain/storage/entities/DirEntity.java
new file mode 100644
index 0000000000000000000000000000000000000000..b8ead4641584bb42abbf880686abc33357ff4d6c
--- /dev/null
+++ b/servers/src/org/xtreemfs/mrc/brain/storage/entities/DirEntity.java
@@ -0,0 +1,67 @@
+/*  Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see  for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB)
+ */
+
+package org.xtreemfs.mrc.brain.storage.entities;
+
+import java.io.Serializable;
+
+/**
+ * A directory entity encapsulates all data needed for a directory metadata
+ * object stored in the database.
+ *
+ * @see DirEntity
+ *
+ * @author stender
+ *
+ */
+public class DirEntity extends AbstractFileEntity implements Serializable {
+
+    public DirEntity() {
+    }
+
+    public DirEntity(DirEntity entity) {
+        super(entity.getId(), entity.getUserId(), entity.getGroupId(), entity.getAtime(), entity
+                .getCtime(), entity.getMtime(), entity.getAcl(), entity.getLinkCount());
+    }
+
+    public DirEntity(long id, String userId, String groupId, long atime, long ctime, long mtime,
+        ACLEntry[] acl, long linkCount) {
+        super(id, userId, groupId, atime, ctime, mtime, acl, linkCount);
+    }
+
+    public boolean isDirectory() {
+        return true;
+    }
+
+    public void setContent(DirEntity entity) {
+        setUserId(entity.getUserId());
+        setGroupId(entity.getGroupId());
+        setAtime(entity.getAtime());
+        setCtime(entity.getCtime());
+        setMtime(entity.getMtime());
+        setAcl(entity.getAcl());
+        setLinkCount(entity.getLinkCount());
+    }
+
+}
diff --git a/servers/src/org/xtreemfs/mrc/brain/storage/entities/FileAttributeEntity.java b/servers/src/org/xtreemfs/mrc/brain/storage/entities/FileAttributeEntity.java
new file mode 100644
index 0000000000000000000000000000000000000000..0ccd82eef813dd3a68f5b13db737d672646793fa
--- /dev/null
+++ b/servers/src/org/xtreemfs/mrc/brain/storage/entities/FileAttributeEntity.java
@@ -0,0 +1,147 @@
+/*  Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+    This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+    Grid Operating System, see  for more details.
+    The XtreemOS project has been developed with the financial support of the
+    European Commission's IST program under contract #FP6-033576.
+
+    XtreemFS is free software: you can redistribute it and/or modify it under
+    the terms of the GNU General Public License as published by the Free
+    Software Foundation, either version 2 of the License, or (at your option)
+    any later version.
+
+    XtreemFS is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with XtreemFS. If not, see .
+*/
+/*
+ * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB)
+ */
+
+package org.xtreemfs.mrc.brain.storage.entities;
+
+import java.io.Serializable;
+
+/**
+ * This class encapsulates data of a file attribute in the database. File
+ * attributes are used to store additional data about files, such as
+ * user-defined extended metadata or other file metadata which is not stored in
+ * file or directory entities.
+ *
+ * There is a many-to-one relationship between files and file attributes; a file
+ * may have multiple attributes, and each file attribute entity belongs to a
+ * single file which is identified by the 'fileId' field.
+ *
+ * A file attribute has a key and a value. In the scope of a file, the key
+ * uniquely identifies the attribute. The value holds the attribute data.
+ *
+ * A file attribute may be either of type 'SYSTEM' or 'USER'. System attributes
+ * are used for storing file metadata that is not part of the file entity or
+ * directory entity describing the file itself, such as a reference for a
+ * symbolic link or a striping policy string. User attributes are used for
+ * storing extended user metadata, such as annotations describing the file
+ * content.
+ *
+ * @author stender
+ *
+ */
+public class FileAttributeEntity implements Serializable {
+
+    public static final int TYPE_USER   = 0;
+
+    public static final int TYPE_SYSTEM = 1;
+
+    private String          id;
+
+    private String          key;
+
+    private T               value;
+
+    private long            type;
+
+    private long            fileId;
+
+    private String          userId;
+
+    public FileAttributeEntity() {
+    }
+
+    public FileAttributeEntity(FileAttributeEntity entity) {
+        this.id = entity.id;
+        this.key = entity.key;
+        this.value = entity.value;
+        this.type = entity.type;
+        this.fileId = entity.fileId;
+        this.userId = entity.userId;
+    }
+
+    public FileAttributeEntity(String key, T value, long type, long fileId,
+        String userId) {
+        this.id = fileId + ":" + key
+            + (type == TYPE_SYSTEM ? "" : (":" + userId));
+        this.key = key;
+        this.value = value;
+        this.fileId = fileId;
+        this.userId = userId;
+        this.type = type;
+    }
+
+    public long getFileId() {
+        return fileId;
+    }
+
+    public void setFileId(long fileId) {
+        this.fileId = fileId;
+    }
+
+    public String getId() {
+        return id;
+    }
+
+    public void setId(String id) {
+        this.id = id;
+    }
+
+    public String getKey() {
+        return key;
+    }
+
+    public void setKey(String key) {
+        this.key = key;
+    }
+
+    public long getType() {
+        return type;
+    }
+
+    public void setType(long type) {
+        this.type = type;
+    }
+
+    public String getUserId() {
+        return userId;
+    }
+
+    public void setUserId(String userId) {
+        this.userId = userId;
+    }
+
+    public void setValue(T value) {
+        this.value = value;
+    }
+
+    public T getValue() {
+        return value;
+    }
+
+    public String toString() {
+        return "<" + key + "=" + value + ", file: " + fileId + ", type: "
+            + (type == TYPE_SYSTEM ? "SYSTEM" : "USER") + ", user: " + userId
+            + ">";
+    }
+
+}
diff --git a/servers/src/org/xtreemfs/mrc/brain/storage/entities/FileEntity.java b/servers/src/org/xtreemfs/mrc/brain/storage/entities/FileEntity.java
new file mode 100644
index 0000000000000000000000000000000000000000..1fd71c5239928d78d62398fec10dca96086f1a20
--- /dev/null
+++ b/servers/src/org/xtreemfs/mrc/brain/storage/entities/FileEntity.java
@@ -0,0 +1,122 @@
+/*  Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+    This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+    Grid Operating System, see  for more details.
+    The XtreemOS project has been developed with the financial support of the
+    European Commission's IST program under contract #FP6-033576.
+
+    XtreemFS is free software: you can redistribute it and/or modify it under
+    the terms of the GNU General Public License as published by the Free
+    Software Foundation, either version 2 of the License, or (at your option)
+    any later version.
+
+    XtreemFS is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with XtreemFS. If not, see .
+*/
+/*
+ * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB)
+ */
+
+package org.xtreemfs.mrc.brain.storage.entities;
+
+import java.io.Serializable;
+
+/**
+ * A file entity encapsulates all data needed for a file metadata object stored
+ * in the database. Unlike a {@link DirEntity}, a file entity has a size and a
+ * set of replicas.
+ *
+ * @see DirEntity
+ *
+ * @author stender
+ *
+ */
+public class FileEntity extends AbstractFileEntity implements Serializable {
+
+    private long           size;
+
+    private XLocationsList xLocationsList;
+
+    private long           epoch;
+
+    private long           issuedEpoch;
+
+    public FileEntity() {
+    }
+
+    public FileEntity(FileEntity entity) {
+        this(entity.getId(), entity.getUserId(), entity.getGroupId(), entity
+                .getAtime(), entity.getCtime(), entity.getMtime(), entity
+                .getSize(), entity.getXLocationsList(), entity.getAcl(), entity
+                .getLinkCount(), entity.getEpoch(), entity
+                .getIssuedEpoch());
+    }
+
+    public FileEntity(long id, String userId, String groupId, long atime,
+        long ctime, long mtime, long size, XLocationsList replicas,
+        ACLEntry[] acl, long linkCount, long writeEpoch, long truncateEpoch) {
+
+        super(id, userId, groupId, atime, ctime, mtime, acl, linkCount);
+
+        this.size = size;
+        this.xLocationsList = replicas;
+        this.epoch = writeEpoch;
+        this.issuedEpoch = truncateEpoch;
+    }
+
+    public XLocationsList getXLocationsList() {
+        return xLocationsList;
+    }
+
+    public void setXLocationsList(XLocationsList replicas) {
+        this.xLocationsList = replicas;
+    }
+
+    public long getSize() {
+        return size;
+    }
+
+    public void setSize(long size) {
+        this.size = size;
+    }
+
+    public long getEpoch() {
+        return epoch;
+    }
+
+    public void setEpoch(long currentEpoch) {
+        this.epoch = currentEpoch;
+    }
+
+    public long getIssuedEpoch() {
+        return issuedEpoch;
+    }
+
+    public void setIssuedEpoch(long issuedEpoch) {
+        this.issuedEpoch = issuedEpoch;
+    }
+
+    public boolean isDirectory() {
+        return false;
+    }
+
+    public void setContent(FileEntity entity) {
+        setUserId(entity.getUserId());
+        setGroupId(entity.getGroupId());
+        setAtime(entity.getAtime());
+        setCtime(entity.getCtime());
+        setMtime(entity.getMtime());
+        setSize(entity.getSize());
+        setXLocationsList(entity.getXLocationsList());
+        setAcl(entity.getAcl());
+        setLinkCount(entity.getLinkCount());
+        setEpoch(entity.getEpoch());
+        setIssuedEpoch(entity.getIssuedEpoch());
+    }
+
+}
diff --git a/servers/src/org/xtreemfs/mrc/brain/storage/entities/SliceEntity.java b/servers/src/org/xtreemfs/mrc/brain/storage/entities/SliceEntity.java
new file mode 100644
index 0000000000000000000000000000000000000000..03e1bee9c4174881da8593cb2d9f9ccb3a40adbd
--- /dev/null
+++ b/servers/src/org/xtreemfs/mrc/brain/storage/entities/SliceEntity.java
@@ -0,0 +1,68 @@
+/*  Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+    This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+    Grid Operating System, see  for more details.
+    The XtreemOS project has been developed with the financial support of the
+    European Commission's IST program under contract #FP6-033576.
+
+    XtreemFS is free software: you can redistribute it and/or modify it under
+    the terms of the GNU General Public License as published by the Free
+    Software Foundation, either version 2 of the License, or (at your option)
+    any later version.
+
+    XtreemFS is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with XtreemFS. If not, see .
+*/
+/*
+ * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB)
+ */
+
+package org.xtreemfs.mrc.brain.storage.entities;
+
+public class SliceEntity {
+
+    private long id;
+
+    public String rangeStart;
+
+    public String rangeEnd;
+
+    public SliceEntity() {
+        this.rangeStart = "";
+        this.rangeEnd = null;
+    }
+
+    public SliceEntity(long id, String rangeStart, String rangeEnd) {
+        this.id = id;
+        this.rangeStart = rangeStart;
+        this.rangeEnd = rangeEnd;
+    }
+
+    public boolean isIn(String path) {
+        return rangeStart.compareTo(path) <= 0
+                && (rangeEnd == null ? true : rangeEnd.compareTo(path) > 0);
+    }
+
+    public SliceEntity[] split(String separator) {
+        // TODO
+
+        SliceEntity[] slices = new SliceEntity[2];
+        // slices[0] = rangeStart, separator;
+        // slices[1] = separator, rangeEnd;
+        return slices;
+    }
+
+    public long getId() {
+        return id;
+    }
+
+    public void setId(long id) {
+        this.id = id;
+    }
+
+}
diff --git a/servers/src/org/xtreemfs/mrc/brain/storage/entities/StripingPolicy.java b/servers/src/org/xtreemfs/mrc/brain/storage/entities/StripingPolicy.java
new file mode 100644
index 0000000000000000000000000000000000000000..d821eb3af87b1ab940f4af3a5ee6f82003d0ec96
--- /dev/null
+++ b/servers/src/org/xtreemfs/mrc/brain/storage/entities/StripingPolicy.java
@@ -0,0 +1,84 @@
+/*  Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+    This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+    Grid Operating System, see  for more details.
+    The XtreemOS project has been developed with the financial support of the
+    European Commission's IST program under contract #FP6-033576.
+
+    XtreemFS is free software: you can redistribute it and/or modify it under
+    the terms of the GNU General Public License as published by the Free
+    Software Foundation, either version 2 of the License, or (at your option)
+    any later version.
+
+    XtreemFS is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with XtreemFS. If not, see .
+*/
+/*
+ * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB)
+ */
+
+package org.xtreemfs.mrc.brain.storage.entities;
+
+import java.io.Serializable;
+
+public class StripingPolicy implements Serializable {
+
+    private String policy;
+
+    private long   stripeSize;
+
+    private long   width;
+
+    public StripingPolicy() {
+    }
+
+    public StripingPolicy(String policy, long stripeSize, long width) {
+        this.policy = policy;
+        this.stripeSize = stripeSize;
+        this.width = width;
+    }
+
+    public String getPolicy() {
+        return policy;
+    }
+
+    public void setPolicy(String policy) {
+        this.policy = policy;
+    }
+
+    public long getStripeSize() {
+        return stripeSize;
+    }
+
+    public void setStripeSize(long stripeSize) {
+        this.stripeSize = stripeSize;
+    }
+
+    public long getWidth() {
+        return width;
+    }
+
+    public void setWidth(long width) {
+        this.width = width;
+    }
+
+    public boolean equals(Object obj) {
+
+        if (!(obj instanceof StripingPolicy))
+            return false;
+
+        StripingPolicy sp = (StripingPolicy) obj;
+        return policy.equals(sp.policy) && stripeSize == sp.stripeSize
+            && width == sp.width;
+    }
+
+    public String toString() {
+        return policy + ", " + stripeSize + ", " + width;
+    }
+
+}
diff --git a/servers/src/org/xtreemfs/mrc/brain/storage/entities/XLocation.java b/servers/src/org/xtreemfs/mrc/brain/storage/entities/XLocation.java
new file mode 100644
index 0000000000000000000000000000000000000000..47a77ca0c573a1af96eadfd6f72a5f4a1e13fde2
--- /dev/null
+++ b/servers/src/org/xtreemfs/mrc/brain/storage/entities/XLocation.java
@@ -0,0 +1,78 @@
+/*  Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+    This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+    Grid Operating System, see  for more details.
+    The XtreemOS project has been developed with the financial support of the
+    European Commission's IST program under contract #FP6-033576.
+
+    XtreemFS is free software: you can redistribute it and/or modify it under
+    the terms of the GNU General Public License as published by the Free
+    Software Foundation, either version 2 of the License, or (at your option)
+    any later version.
+
+    XtreemFS is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with XtreemFS. If not, see .
+*/
+/*
+ * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB)
+ */
+
+package org.xtreemfs.mrc.brain.storage.entities;
+
+import java.io.Serializable;
+
+public class XLocation implements Serializable {
+
+    private StripingPolicy stripingPolicy;
+
+    private String[] osdList;
+
+    public XLocation() {
+    }
+
+    public XLocation(StripingPolicy stripingPolicy, String[] osdList) {
+        this.stripingPolicy = stripingPolicy;
+        this.osdList = osdList;
+    }
+
+    public String[] getOsdList() {
+        return osdList;
+    }
+
+    public void setOsdList(String[] osdList) {
+        this.osdList = osdList;
+    }
+
+    public StripingPolicy getStripingPolicy() {
+        return stripingPolicy;
+    }
+
+    public void setStripingPolicy(StripingPolicy stripingPolicy) {
+        this.stripingPolicy = stripingPolicy;
+    }
+
+    public boolean equals(Object obj) {
+
+        if(!(obj instanceof XLocation))
+            return false;
+
+        XLocation xloc = (XLocation) obj;
+        if(!stripingPolicy.equals(xloc.stripingPolicy))
+            return false;
+
+        if(osdList.length != xloc.osdList.length)
+            return false;
+
+        for(int i = 0; i < osdList.length; i++)
+            if(!osdList[i].equals(xloc.osdList[i]))
+                return false;
+
+        return true;
+    }
+
+}
diff --git a/servers/src/org/xtreemfs/mrc/brain/storage/entities/XLocationsList.java b/servers/src/org/xtreemfs/mrc/brain/storage/entities/XLocationsList.java
new file mode 100644
index 0000000000000000000000000000000000000000..90eb28f1542d3e602f074076d257f5b526004707
--- /dev/null
+++ b/servers/src/org/xtreemfs/mrc/brain/storage/entities/XLocationsList.java
@@ -0,0 +1,104 @@
+/*  Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see  for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB)
+ */
+
+package org.xtreemfs.mrc.brain.storage.entities;
+
+import java.io.Serializable;
+import java.util.LinkedList;
+import java.util.List;
+
+public class XLocationsList implements Serializable {
+
+    private XLocation[] replicas;
+
+    private long        version;
+
+    public XLocationsList() {
+    }
+
+    public XLocationsList(XLocation[] replicas, long version) {
+        this.replicas = replicas;
+        this.version = version;
+    }
+
+    public XLocation[] getReplicas() {
+        return replicas;
+    }
+
+    public void setReplicas(XLocation[] replicas) {
+        this.replicas = replicas;
+    }
+
+    public long getVersion() {
+        return version;
+    }
+
+    public void setVersion(long version) {
+        this.version = version;
+    }
+
+    public void addReplica(XLocation replica) {
+
+        // set the new X-Locations list
+        if (this.replicas != null) {
+            XLocation[] replicas = new XLocation[this.replicas.length + 1];
+            System.arraycopy(this.replicas, 0, replicas, 0, this.replicas.length);
+            replicas[this.replicas.length] = replica;
+            this.replicas = replicas;
+        } else
+            this.replicas = new XLocation[] { replica };
+
+        // increment the version
+        version++;
+    }
+
+    public void removeReplica(XLocation replica) {
+
+        if (replica == null)
+            return;
+
+        List newRepls = new LinkedList();
+        for (int i = 0; i < replicas.length; i++)
+            if (!replicas[i].equals(replica))
+                newRepls.add(replicas[i]);
+
+        if (newRepls.size() != replicas.length) {
+            replicas = newRepls.toArray(new XLocation[newRepls.size()]);
+            version++;
+        }
+    }
+
+    public void addReplicaWithoutVersionChange(XLocation replica) {
+
+        // set the new X-Locations list
+        if (this.replicas != null) {
+            XLocation[] replicas = new XLocation[this.replicas.length + 1];
+            System.arraycopy(this.replicas, 0, replicas, 0, this.replicas.length);
+            replicas[this.replicas.length] = replica;
+            this.replicas = replicas;
+        } else
+            this.replicas = new XLocation[] { replica };
+    }
+
+}
diff --git a/servers/src/org/xtreemfs/mrc/osdselection/AbstractSelectionPolicy.java b/servers/src/org/xtreemfs/mrc/osdselection/AbstractSelectionPolicy.java
new file mode 100644
index 0000000000000000000000000000000000000000..fc68a9ddb2c54f93ae82dd0daefa6e8e6338cc30
--- /dev/null
+++ b/servers/src/org/xtreemfs/mrc/osdselection/AbstractSelectionPolicy.java
@@ -0,0 +1,212 @@
+/*  Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+    This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+    Grid Operating System, see  for more details.
+    The XtreemOS project has been developed with the financial support of the
+    European Commission's IST program under contract #FP6-033576.
+
+    XtreemFS is free software: you can redistribute it and/or modify it under
+    the terms of the GNU General Public License as published by the Free
+    Software Foundation, either version 2 of the License, or (at your option)
+    any later version.
+
+    XtreemFS is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB)
+ */
+
+package org.xtreemfs.mrc.osdselection;
+
+import java.net.InetSocketAddress;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.xtreemfs.common.TimeSync;
+import org.xtreemfs.common.logging.Logging;
+import org.xtreemfs.common.uuids.ServiceUUID;
+import org.xtreemfs.common.uuids.UnknownUUIDException;
+import org.xtreemfs.foundation.json.JSONParser;
+import org.xtreemfs.foundation.json.JSONString;
+
+public abstract class AbstractSelectionPolicy implements OSDSelectionPolicy {
+    
+    static final long OSD_TIMEOUT_SPAN  = 600;             // 10 min
+                                                            
+    static final long MIN_FREE_CAPACITY = 32 * 1024 * 1024; // 32 mb
+                                                            
+    public Map> getUsableOSDs(Map> osds,
+        String args) {
+        
+        Set ruleList = null;
+        if (args != null) {
+            try {              
+                ruleList = new HashSet((List) JSONParser.parseJSON(new JSONString(
+                    args)));
+            } catch (Exception exc) {
+                Logging.logMessage(Logging.LEVEL_WARN, this, "invalid set of suitable OSDs: "
+                    + args);
+            }
+        }
+        
+        Map> suitable = new HashMap>();
+        for (String uuid : osds.keySet()) {
+            
+            try {
+                Map osd = osds.get(uuid);
+                if ((ruleList == null || follows(uuid,ruleList))
+                    && (hasFreeCapacity(osd) && !hasTimedOut(osd)))
+                    suitable.put(uuid, osd);
+                
+            } catch (NullPointerException e) {
+                continue;
+            }
+        }
+        return suitable;
+    }
+    
+    static boolean hasFreeCapacity(Map osd) {
+        Long free = Long.parseLong((String) osd.get("free"));
+        return free > MIN_FREE_CAPACITY;
+    }
+    
+    /*
+    static String getLocation(Map osd) {
+        String location = (String) osd.get("location");
+        return location;
+    }*/
+    
+    static boolean hasTimedOut(Map osd) {
+        
+        // if the OSD has contacted the DS within the last 10 minutes,
+        // assume that it is still running
+        if (osd.containsKey("lastUpdated")) {
+            long updateTimestamp = Long.parseLong((String) osd.get("lastUpdated"));
+            long currentTime = TimeSync.getGlobalTime() / 1000;
+            return currentTime - updateTimestamp > OSD_TIMEOUT_SPAN;
+        } else
+            return true;
+    }
+    
+    /**
+     * Conventions for rule prefixes:
+ *

+ * * means the following rule matches a URL.
+ * ! means that the following rule must not be matched.

+ *

Constellations like *! as prefix are not allowed.
+ * Every rule must be an identifier for at least one potential OSD.
+ * At least one rule must be followed.
+ * A not rule (!) is more important then any other rule.

+ * + * @param ruleList - rules the OSD must follow. + * @param UUID - of the OSD. + * @return true, if the OSD with UUID follows the ruleList.
+ * False is also returned, if the UUID is unknown. + */ + static private boolean follows(String UUID,Set ruleList) { + boolean follows = false; + + try{ + for (String rule : ruleList){ + if (rule.startsWith("!")){ + if(matches(UUID,rule.substring(1))) return false; + }else{ + follows |= matches(UUID,rule); + } + } + }catch (UnknownUUIDException ue) { + return false; + } + + return follows; + } + + /** + * + * @param UUID - of the OSD. + * @param pattern - pattern that must be match. + * @return true, if the the OSD with UUID matches the rule. + * @throws UnknownUUIDException + */ + private static boolean matches(String UUID, String pattern) throws UnknownUUIDException { + if (pattern.startsWith("*")){ + String cleanPattern = pattern.substring(1); + + // get the address of the UUID + ServiceUUID osd = new ServiceUUID(UUID); + osd.resolve(); + InetSocketAddress inetAddressOSD = osd.getAddress(); + + if (cleanPattern.equals(inetAddressOSD.getHostName())) return true; +// else if (cleanPattern.equals(inetAddressOSD.getHostString())) return true; + //match subNet-String + else{ + String patternIP = cleanPattern; + if (patternIP.startsWith("http://")) patternIP = patternIP.substring(7); + else if (patternIP.startsWith("https://")) patternIP = patternIP.substring(8); + + String[] patternPortIP = patternIP.split(":"); + int patternPort = 0; + if (patternPortIP.length>=2){ + try{ + patternPort = Integer.valueOf(patternPortIP[1].split("/")[0]); + }catch (NumberFormatException f){ + Logging.logMessage(Logging.LEVEL_WARN,null,"'"+pattern+"' port is not valid."); + } + } + + String[] patternIPv4 = patternPortIP[0].split("."); + if (patternIPv4.length==4){ + String[] osdIPv4 = inetAddressOSD.getAddress().getHostAddress().split("."); + if (patternPort!=0 && patternPort!=inetAddressOSD.getPort()) return false; + + // compare the IP strings 0 is a wildcard, but not if at a later position !=0 + try{ + boolean wasNotZero = false; + for (int i=3;i<=0;i--){ + if (wasNotZero && (Integer.parseInt(osdIPv4[i]) != Integer.parseInt(patternIPv4[i]))){ + return false; + }else{ + if (Integer.parseInt(patternIPv4[i])!=0){ + wasNotZero = true; + if ((Integer.parseInt(osdIPv4[i]) != Integer.parseInt(patternIPv4[i]))) return false; + } + } + } + return true; + }catch(NumberFormatException e){ + // if not compatible go ignore and jump to the next step + } + } + + try { + URI patternURI = new URI(cleanPattern); + patternPort = patternURI.getPort(); + String patternHost = patternURI.getHost(); + if (patternHost==null) throw new URISyntaxException(patternURI.toASCIIString(),"unknown host"); + + // check the port + if (patternPort!=0 && patternPort!=inetAddressOSD.getPort()) return false; + + return true; + } catch (URISyntaxException e) { + Logging.logMessage(Logging.LEVEL_WARN,null,"'"+pattern+"' is not a valid identifier for an osd or osd-range and will be ignored."); + } + } + }else{ + return UUID.equals(pattern); + } + return false; + } +} diff --git a/servers/src/org/xtreemfs/mrc/osdselection/OSDSelectionPolicy.java b/servers/src/org/xtreemfs/mrc/osdselection/OSDSelectionPolicy.java new file mode 100644 index 0000000000000000000000000000000000000000..d0f75871e7048667d0d4266ff64787b696302e29 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/osdselection/OSDSelectionPolicy.java @@ -0,0 +1,58 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc.osdselection; + +import java.net.InetAddress; +import java.util.Map; + +/** + * Interface for policies implementing a selection mechanism for OSDs. + * + * @author bjko, stender + */ +public interface OSDSelectionPolicy { + + /** + * Returns the subset of all registered OSDs that match the policy. + * + * @param osds + * osds is a map containing osd info registered with the + * directory service + * @return the filtered list + */ + public Map> getUsableOSDs( + Map> osds, String args); + + /** + * Returns a list of OSDs to be allocated to a newly created file. + * + * @param osdMap + * list of osds that match the policy + * @return a list of osds that can be used to create a new file + */ + public String[] getOSDsForNewFile(Map> osdMap, + InetAddress clientAddr, int amount, String args); + +} diff --git a/servers/src/org/xtreemfs/mrc/osdselection/OSDStatusManager.java b/servers/src/org/xtreemfs/mrc/osdselection/OSDStatusManager.java new file mode 100644 index 0000000000000000000000000000000000000000..5994507ccbfb4b92e9ad13d3b8996e6486d9095e --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/osdselection/OSDStatusManager.java @@ -0,0 +1,404 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc.osdselection; + +import java.io.IOException; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.Map; + +import org.xtreemfs.common.clients.RPCClient; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.clients.RPCResponseListener; +import org.xtreemfs.common.clients.dir.DIRClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.LifeCycleThread; +import org.xtreemfs.mrc.MRCConfig; +import org.xtreemfs.mrc.PolicyContainer; +import org.xtreemfs.mrc.brain.VolumeChangeListener; +import org.xtreemfs.mrc.slices.VolumeInfo; + +/** + * Checks regularly for suitable OSDs for each volume. + * + * @author bjko + */ +public class OSDStatusManager extends LifeCycleThread implements VolumeChangeListener, + RPCResponseListener { + + /** + * Volume and policy record. + */ + public static class VolumeOSDs { + /** + * volume ID + */ + public String volID; + + /** + * policyID used by the volume. + */ + public long selectionPolicyID; + + /** + * OSD policy arguments used by the volume + */ + public String selectionPolicyArgs; + + /** + * Map of suitable OSDs for that volume. Can be empty. + */ + public Map> usableOSDs; + } + + /** + * Policies available in the system. + */ + private static final Map policies = new HashMap(); + + /** + * Interval in ms to wait between two checks. + */ + private int checkIntervalMillis = 1000 * 5; + + /** + * A list of volumes registered with the thread. + */ + private final Map volumeMap; + + /** + * The latest set of all known OSDs fetched from the Directory Service. + */ + private Map> knownOSDs; + + /** + * An client used to send requests to the Directory Service. + */ + private final DIRClient client; + + /** + * Thread shuts down if true. + */ + private boolean quit = false; + + /** + * Enables debugging output. + */ + private boolean debug = false; + + /** + * The configuration for the component. + */ + private MRCConfig config; + + private RPCResponse>> pendingResponse; + + private final String authString; + + private final PolicyContainer policyContainer; + + static { + policies.put(RandomSelectionPolicy.POLICY_ID, new RandomSelectionPolicy()); + policies.put(ProximitySelectionPolicy.POLICY_ID, new ProximitySelectionPolicy()); + } + + /** + * Creates a new instance of OSDStatusManager + * + * @param client + * a DIRClient used for contacting the direcotory service + */ + public OSDStatusManager(MRCConfig config, DIRClient client, PolicyContainer policyContainer, + String authString) throws IOException { + + super("OSDStatusManager"); + + this.policyContainer = policyContainer; + this.config = config; + this.authString = authString; + + volumeMap = new HashMap(); + knownOSDs = new HashMap>(); + + this.client = client; + + int interval = config.getOsdCheckInterval(); + checkIntervalMillis = 1000 * interval; + } + + public void volumeChanged(int mod, VolumeInfo volume) { + + switch (mod) { + + case VolumeChangeListener.MOD_CHANGED: + + synchronized (this) { + + final String volId = volume.getId(); + VolumeOSDs vol = volumeMap.get(volId); + + if (vol == null) { + + vol = new VolumeOSDs(); + vol.volID = volume.getId(); + vol.selectionPolicyID = volume.getOsdPolicyId(); + vol.selectionPolicyArgs = volume.getOsdPolicyArgs(); + vol.usableOSDs = new HashMap>(); + + volumeMap.put(volId, vol); + + } else { + vol.selectionPolicyID = volume.getOsdPolicyId(); + vol.selectionPolicyArgs = volume.getOsdPolicyArgs(); + vol.usableOSDs.clear(); + } + + this.notifyAll(); + } + + break; + + case VolumeChangeListener.MOD_DELETED: + + synchronized (this) { + volumeMap.remove(volume.getId()); + } + + break; + } + } + + /** + * Shuts down the thread. + */ + public void shutdown() { + synchronized (this) { + quit = true; + this.notifyAll(); + } + } + + /** + * Main loop. + */ + public void run() { + + // initially fetch the list of OSDs from the Directory Service + RPCResponse>> r = null; + try { + r = client.getEntities(RPCClient.generateMap("type", "OSD"), new LinkedList(), + authString); + knownOSDs = r.get(); + } catch (Exception exc) { + Logging.logMessage(Logging.LEVEL_ERROR, this, exc); + this.notifyCrashed(exc); + } finally { + if (r != null) + r.freeBuffers(); + } + + if (Logging.isInfo()) + Logging.logMessage(Logging.LEVEL_INFO, this, "OSD status manager operational, using " + + config.getDirectoryService()); + + notifyStarted(); + + while (!quit) { + Logging.logMessage(Logging.LEVEL_DEBUG, this, "sending request..."); + + if (pendingResponse == null) { + + try { + // request list of registered OSDs from Directory + // Service + + pendingResponse = client.getEntities(RPCClient.generateMap("type", "OSD"), + new LinkedList(), authString); + + pendingResponse.setResponseListener(this); + + } catch (Exception exc) { + Logging.logMessage(Logging.LEVEL_ERROR, this, exc); + } + + Logging.logMessage(Logging.LEVEL_DEBUG, this, "request sent..."); + + synchronized (this) { + try { + this.wait(checkIntervalMillis); + } catch (InterruptedException ex) { + } + } + + } else + try { + Thread.sleep(checkIntervalMillis / 2); + } catch (InterruptedException ex) { + } + } + Logging.logMessage(Logging.LEVEL_INFO, this, "shutdown complete"); + notifyStopped(); + + } + + /** + * Returns the list of usable OSDs for the given volume id. + * + * @param volumeId + * the volume id + * @return a list of feasible OSDs. Each list entry contains a mapping from + * keys to values which describes a certain OSD + */ + public synchronized Map> getUsableOSDs(String volumeId) { + + VolumeOSDs vol = volumeMap.get(volumeId); + if (vol == null) { + Logging.logMessage(Logging.LEVEL_WARN, this, + "no volume registered at OSDStatusManager with ID '" + volumeId + "'"); + return null; + } + + // if no OSDs are assigned to the current volume, re-calculate the set + // of feasible OSDs from the last set of OSDs received from the + // Directory Service + if (vol.usableOSDs.size() == 0) { + OSDSelectionPolicy policy = getOSDSelectionPolicy(Long.valueOf(vol.selectionPolicyID)); + if (policy != null) { + if (knownOSDs != null) + vol.usableOSDs = policy.getUsableOSDs(knownOSDs, vol.selectionPolicyArgs); + else + Logging.logMessage(Logging.LEVEL_WARN, this, + "could not calculate set of feasible OSDs for volume '" + vol.volID + + "': haven't yet received an OSD list from Directory Service!"); + + } else + Logging.logMessage(Logging.LEVEL_WARN, this, + "could not calculate set of feasible OSDs for volume '" + vol.volID + + "': no assignment policy available!"); + } + + return vol.usableOSDs; + + } + + public Map getCurrentStatus() { + Map map = new HashMap(); + for (String volID : volumeMap.keySet()) { + map.put(volID, volumeMap.get(volID).usableOSDs); + } + return map; + } + + public synchronized void responseAvailable(RPCResponse response) { + + Logging.logMessage(Logging.LEVEL_DEBUG, this, "response..."); + + try { + + knownOSDs = (Map>) response.get(); + assert (knownOSDs != null); + + Logging.logMessage(Logging.LEVEL_DEBUG, this, "registered OSDs"); + if (knownOSDs.size() == 0) + Logging.logMessage(Logging.LEVEL_WARN, this, + "there are currently no OSDs available"); + for (String uuid : knownOSDs.keySet()) { + Logging.logMessage(Logging.LEVEL_DEBUG, this, uuid + " = " + knownOSDs.get(uuid)); + } + + for (VolumeOSDs vol : volumeMap.values()) { + OSDSelectionPolicy policy = getOSDSelectionPolicy(vol.selectionPolicyID); + if (policy != null) { + vol.usableOSDs = policy.getUsableOSDs(knownOSDs, vol.selectionPolicyArgs); + + Logging.logMessage(Logging.LEVEL_DEBUG, this, "OSDs for " + vol.volID); + if (vol.usableOSDs != null) + for (String uuid : vol.usableOSDs.keySet()) { + Logging.logMessage(Logging.LEVEL_DEBUG, this, " " + uuid + " = " + + knownOSDs.get(uuid)); + } + + } else { + Logging.logMessage(Logging.LEVEL_WARN, this, "policy ID " + + vol.selectionPolicyID + " selected for volume ID " + vol.volID + + " does not exist!"); + } + } + + } catch (IOException ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, + "Cannot contact Directory Service. Reason: " + ex); + } catch (Exception ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, + "Cannot contact Directory Service. Reason: " + ex); + } finally { + response.freeBuffers(); + } + + pendingResponse = null; + } + + public OSDSelectionPolicy getOSDSelectionPolicy(long policyId) { + + OSDSelectionPolicy policy = policies.get(policyId); + + // if the policy is not built-in, try to load it from the plug-in + // directory + if (policy == null) { + try { + policy = policyContainer.getOSDSelectionPolicy(policyId); + policies.put(policyId, policy); + } catch (Exception exc) { + Logging.logMessage(Logging.LEVEL_WARN, this, + "could not load OSDSelectionPolicy with ID " + policyId); + Logging.logMessage(Logging.LEVEL_WARN, this, exc); + } + } + + return policy; + } + + /** + * Returns the approximate amount of free space in the given volume. + * + * @param volumeId + * the ID of the volume + * + * @return the approximate number of free bytes in the volume + * + */ + public long getFreeSpace(VolumeInfo volume) { + + long free = 0; + + Map> usableOSDs = getUsableOSDs(volume.getId()); + if (usableOSDs == null) + return 0; + + for (Map entry : usableOSDs.values()) + free += Long.valueOf((String) entry.get("free")); + return free; + } +} diff --git a/servers/src/org/xtreemfs/mrc/osdselection/ProximitySelectionPolicy.java b/servers/src/org/xtreemfs/mrc/osdselection/ProximitySelectionPolicy.java new file mode 100644 index 0000000000000000000000000000000000000000..04719bdea7ee3f0b68150da80c5875b4af4ebd57 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/osdselection/ProximitySelectionPolicy.java @@ -0,0 +1,138 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Nele Andersen (ZIB) + */ + +package org.xtreemfs.mrc.osdselection; + +import java.net.InetAddress; +import java.util.LinkedList; +import java.util.PriorityQueue; + +import java.util.Map; +import java.net.URI; +import java.net.UnknownHostException; + +public class ProximitySelectionPolicy extends AbstractSelectionPolicy{ + + public static final long POLICY_ID = 2; + + private byte[] clientAddress; + private long clientAddressLong; + + public String[] getOSDsForNewFile(Map> osdMap, + InetAddress clientAddress, int amount, String args) { + + this.clientAddress = clientAddress.getAddress(); + clientAddressLong = inetAddressToLong(this.clientAddress); + + // sort all OSDs with sufficient free capacity according to the value + // returned by the method distance + String[] osds = new String[amount]; + PriorityQueue queue = new PriorityQueue(); + LinkedList list = new LinkedList(); + + for (String osd : osdMap.keySet()) { + if (hasFreeCapacity(osdMap.get(osd))) { + try { + queue.add(new Pair(osd, distance(osdMap.get(osd)))); + } catch (UnknownHostException e) { + } + } + } + + for (int i = 0; !queue.isEmpty() + && (queue.peek().getDistance() == 0 || i < amount); i++) + list.add(queue.poll().getOsd()); + + for (int i = 0; !list.isEmpty() && i < amount; i++) + osds[i] = list.remove((int) (Math.random() * list.size())); + + return osds; + + } + + private long distance(Map osd) throws UnknownHostException { + + byte[] osdAddress = InetAddress.getByName( + (URI.create((String) osd.get("uri")).getHost())).getAddress(); + + // if osd in same subnet as client + if (osdAddress[0] == clientAddress[0] + && osdAddress[1] == clientAddress[1] + && osdAddress[2] == clientAddress[2]) + return 0; + + return Math.abs(inetAddressToLong(osdAddress) - clientAddressLong); + } + + public long inetAddressToLong(byte[] address) { + + StringBuffer sb = new StringBuffer(); + + for (int i = 0; i < address.length; i++) { + + if (address[i] < 0) + sb.append(256 + address[i]); + else if (address[i] < 10) + sb.append("00" + address[i]); + else if (address[i] < 100) + sb.append("0" + address[i]); + else + sb.append(address[i]); + } + return Long.parseLong(sb.toString()); + } + + class Pair implements Comparable { + + private String osd; + private long distance; + + Pair(String osd, long distance) { + this.osd = osd; + this.distance = distance; + } + + public String toString() { + return "(" + osd + ", " + distance + ")"; + } + + public long getDistance() { + return distance; + } + + public String getOsd() { + return osd; + } + + public int compareTo(Pair other) { + if (this.distance < other.distance) + return -1; + if (this.distance > other.distance) + return 1; + else + return 0; + } + } + +} diff --git a/servers/src/org/xtreemfs/mrc/osdselection/RandomSelectionPolicy.java b/servers/src/org/xtreemfs/mrc/osdselection/RandomSelectionPolicy.java new file mode 100644 index 0000000000000000000000000000000000000000..0e550e534cbb77dfbdeee48882443fb3adac11bc --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/osdselection/RandomSelectionPolicy.java @@ -0,0 +1,63 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc.osdselection; + +import java.net.InetAddress; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +/** + * A very simple policy that accepts all osds! + * + * @author bjko + */ +public class RandomSelectionPolicy extends AbstractSelectionPolicy{ + + public static final long POLICY_ID = 1; + + /** Creates a new instance of SimpleSelectionPolicy */ + public RandomSelectionPolicy() { + } + + public String[] getOSDsForNewFile(Map> osdMap, + InetAddress clientAddress, int amount, String args) { + + // first, sort out all OSDs with insufficient free capacity + String[] osds = new String[amount]; + List list = new LinkedList(); + for (String osd : osdMap.keySet()) { + if (hasFreeCapacity(osdMap.get(osd))) + list.add(osd); + } + + // from the remaining set, take a random subset of OSDs + for (int i = 0; i < amount; i++) + osds[i] = list.remove((int) (Math.random() * list.size())); + + return osds; + } + +} diff --git a/servers/src/org/xtreemfs/mrc/replication/MasterReplicationMechanism.java b/servers/src/org/xtreemfs/mrc/replication/MasterReplicationMechanism.java new file mode 100644 index 0000000000000000000000000000000000000000..dc4b1b89b2fe672a0aa5d030bb3d1f728ba475cb --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/replication/MasterReplicationMechanism.java @@ -0,0 +1,69 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc.replication; + +import java.net.InetSocketAddress; +import org.xtreemfs.mrc.slices.SliceInfo; + +/** + * + * @author bjko + */ +public class MasterReplicationMechanism implements ReplicationMechanism { + + private SliceInfo slice; + + public InetSocketAddress[] slaves; + + private boolean waitForAck; + + /** Creates a new instance of MasterReplicationMechanism. + Does not wait for ack from slaves.*/ + public MasterReplicationMechanism(InetSocketAddress[] slaves) { + this(slaves,false); + } + + /** Creates a new instance of MasterReplicationMechanism */ + public MasterReplicationMechanism(InetSocketAddress[] slaves, boolean waitForAck) { + this.slaves = slaves; + this.waitForAck = waitForAck; + } + + public boolean sendResponseAfterReplication() { + return waitForAck; + } + + public void registerSlice(SliceInfo slice) { + this.slice = slice; + this.slice.setReplicating(true); + } + + public SliceInfo getSlice() { + return this.slice; + } + + + +} diff --git a/servers/src/org/xtreemfs/mrc/replication/NullReplicationMechanism.java b/servers/src/org/xtreemfs/mrc/replication/NullReplicationMechanism.java new file mode 100644 index 0000000000000000000000000000000000000000..bc57b0b2b88c970d719a309bf9598a1b2fc9a886 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/replication/NullReplicationMechanism.java @@ -0,0 +1,57 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc.replication; + +import org.xtreemfs.mrc.MRCRequest; +import org.xtreemfs.mrc.brain.storage.LogEntry; +import org.xtreemfs.mrc.brain.storage.SliceID; +import org.xtreemfs.mrc.slices.SliceInfo; + +/** + * + * @author bjko + */ +public class NullReplicationMechanism implements ReplicationMechanism { + + private SliceInfo slice; + + /** Creates a new instance of NullReplicationMechanism */ + public NullReplicationMechanism() { + } + + public boolean sendResponseAfterReplication() { + return false; + } + + public void registerSlice(SliceInfo slice) { + this.slice = slice; + this.slice.setReplicating(true); + } + + public SliceInfo getSlice() { + return this.slice; + } + +} diff --git a/servers/src/org/xtreemfs/mrc/replication/ReplicationInfo.java b/servers/src/org/xtreemfs/mrc/replication/ReplicationInfo.java new file mode 100644 index 0000000000000000000000000000000000000000..f0aea5dea1d1ee41e5862957a306cbd4799fa6fb --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/replication/ReplicationInfo.java @@ -0,0 +1,57 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc.replication; + +import java.io.Serializable; + +/** + * + * @author bjko + */ +public class ReplicationInfo implements Serializable { + + public ReplicationMechanism mechanism; + + public int currentViewID; + + public int nextSequenceID; + + /** This is the sequence ID of the first + * (oldest) log entry available. + */ + public int lastAvailSqID; + + /** Creates a new instance of ReplicationInfo */ + public ReplicationInfo() { + currentViewID = 1; + nextSequenceID = 1; + lastAvailSqID = 1; + } + + public int getNextSequenceID() { + return this.nextSequenceID++; + } + +} diff --git a/servers/src/org/xtreemfs/mrc/replication/ReplicationManager.java b/servers/src/org/xtreemfs/mrc/replication/ReplicationManager.java new file mode 100644 index 0000000000000000000000000000000000000000..42a1f6d7184c5d184301f52c281404b8659fd2f8 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/replication/ReplicationManager.java @@ -0,0 +1,1166 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc.replication; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.nio.CharBuffer; +import java.nio.MappedByteBuffer; +import java.nio.channels.FileChannel; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.LinkedBlockingQueue; + +import org.xtreemfs.common.VersionManagement; +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.clients.HttpErrorException; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.clients.mrc.MRCClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.uuids.ServiceUUID; +import org.xtreemfs.foundation.LifeCycleThread; +import org.xtreemfs.foundation.json.JSONCharBufferString; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.json.JSONString; +import org.xtreemfs.foundation.pinky.HTTPHeaders; +import org.xtreemfs.foundation.pinky.HTTPUtils; +import org.xtreemfs.foundation.speedy.SpeedyRequest; +import org.xtreemfs.mrc.MRCConfig; +import org.xtreemfs.mrc.MRCRequest; +import org.xtreemfs.mrc.brain.BrainRequestListener; +import org.xtreemfs.mrc.brain.BrainStage; +import org.xtreemfs.mrc.brain.UserException; +import org.xtreemfs.mrc.brain.storage.DiskLogger; +import org.xtreemfs.mrc.brain.storage.InvalidLogEntryException; +import org.xtreemfs.mrc.brain.storage.LogEntry; +import org.xtreemfs.mrc.brain.storage.SliceID; +import org.xtreemfs.mrc.brain.storage.SyncListener; +import org.xtreemfs.mrc.slices.SliceInfo; +import org.xtreemfs.mrc.slices.SliceManager; +import org.xtreemfs.mrc.slices.VolumeInfo; +import org.xtreemfs.mrc.utils.MessageUtils; + +/** + * Handles all replication specific commands. All persistent commands are sent + * to replicated MRCs according to the replication mechanism. + * + * @author bjko + */ +public class ReplicationManager extends LifeCycleThread { + + public static final int EC_SLICE_DOES_NOT_EXIST = 1000; + + public static final int EC_SLICE_NOT_OPERATIONAL = 1001; + + public static final int EC_LOGRANGE_NOT_AVAIL = 1002; + + public static final int EC_INVALID_VIEWID = 1003; + + /** + * Database dir from configuration used to store the settings + */ + private final String dbDir; + + /** filename in which the settings are stored */ + private static final String FILENAME = "replication.dat"; + + /** for crash recovery first a the tmp is written */ + private static final String TEMP_FILENAME = "replication.tmp"; + + /** request queue */ + private LinkedBlockingQueue requests; + + /** + * listener which is notfified after a replication request is finished + */ + private ReplicationRequestListener rrListener; + + /** + * listener which is notified after a replication system command is finished + */ + private BrainRequestListener brListener; + + /** + * if set to true the thread leaves the main loop + */ + private volatile boolean quit; + + /** + * MRCClient used to communicate with other MRCs + */ + private MRCClient mrcClient; + + /** + * from config + */ + private boolean debug; + + /** + * Brain stage used to perform retrieve slice details and to execute remote + * commands + */ + private BrainStage storage; + + /** Global settings */ + private MRCConfig config; + + /** + * Workaround to intercept Sync listener events + */ + private StupidListener sl; + + private final DiskLogger diskLogger; + + private final SliceManager slices; + + /** Creates a new instance of ReplicationManager */ + public ReplicationManager(MRCConfig config, MRCClient mrcClient, DiskLogger diskLogger, + SliceManager slices) throws IOException, ClassNotFoundException { + + super("ReplMgr thr."); + + this.diskLogger = diskLogger; + + this.slices = slices; + requests = new LinkedBlockingQueue(); + quit = false; + this.mrcClient = mrcClient; + this.config = config; + sl = new StupidListener(null); + + if (!config.getDbDir().endsWith("/")) { + dbDir = config.getDbDir() + "/"; + } else { + dbDir = config.getDbDir(); + } + + } + + /** + * Sets the BrainStage the replication manager sould use. + */ + public void setBrainStage(BrainStage storage) { + this.storage = storage; + } + + /** + * Registers a listener for BrainRequets (i.e. system commands). For + * replication commands use ReplicationListener + */ + public void registerBrainListener(BrainRequestListener brl) { + this.brListener = brl; + } + + /** + * Register a listener for Replication Requests. For a system command use + * BrainListener + */ + public void registerReplicationListener(ReplicationRequestListener rrl) { + this.rrListener = rrl; + } + + /** + * add a request to the job queue + */ + public void addRequest(MRCRequest rq) { + this.requests.add(rq); + } + + /** thread main loop */ + public void run() { + + notifyStarted(); + while (!quit) { + try { + // take next request from Q + MRCRequest rq = this.requests.take(); + // this assert is somehow superflous as it checks something + // that is specified in the javadocs... + assert (rq != null); + + if (rq.logEntry != null) { + // If there is a logEntry attached to the request + // it must be a persistent command already executed + // so we have to repliate it to somewhere... + doReplication(rq); + } else { + // a replication specific system command was requested + // by a remote server + if (rq.getPinkyRequest().requestURI.equals(".Replicate")) { + // another srver wants to replicate sth. to us + this.remoteOperation(rq); + // this operation takes care of notification itself + } else if (rq.getPinkyRequest().requestURI.equals(".RgetLogEntries")) { + this.getLogEntries(rq); + rq.details.persistentOperation = false; + brListener.brainRequestDone(rq); + } else if (rq.getPinkyRequest().requestURI.equals(".RgetSliceDB")) { + this.getSliceDB(rq); + rq.details.persistentOperation = false; + brListener.brainRequestDone(rq); + } else if (rq.getPinkyRequest().requestURI.equals(".Rinfo")) { + this.getInfos(rq); + rq.details.persistentOperation = false; + brListener.brainRequestDone(rq); + } else if (rq.getPinkyRequest().requestURI.equals(".RchangeStatus")) { + this.changeStatus(rq); + rq.details.persistentOperation = false; + brListener.brainRequestDone(rq); + } else if (rq.getPinkyRequest().requestURI.equals(".RforceUpdate")) { + this.forceUpdate(rq); + rq.details.persistentOperation = false; + brListener.brainRequestDone(rq); + } else if (rq.getPinkyRequest().requestURI.equals(".RnewSlaveSlice")) { + this.newSlaveSlice(rq); + // routine takes care of notification + } else if (rq.getPinkyRequest().requestURI.equals(".RnewMasterSlice")) { + this.newMasterSlice(rq); + rq.details.persistentOperation = false; + brListener.brainRequestDone(rq); + } else if (rq.getPinkyRequest().requestURI.equals(".RnoReplication")) { + this.nullReplication(rq); + rq.details.persistentOperation = false; + brListener.brainRequestDone(rq); + } else { + // HUH? what is that + rq.getPinkyRequest().setResponse(HTTPUtils.SC_NOT_IMPLEMENTED); + rq.details.persistentOperation = false; + brListener.brainRequestDone(rq); + } + } + + } catch (InterruptedException ex) { + // who cares + } + + } + + notifyStopped(); + } + + /** + * enqueues the operation contained in rq for replication + * + * @returns true if the client can get a response immediateley, if false the + * response is sent after successful replication + * @throws IllegalArgumentException + * if there is no LogEntry attached to the request + */ + public boolean replicate(MRCRequest rq) { + // need a valid log entry to send to remote site + if (rq.logEntry == null) + throw new IllegalArgumentException("replication requests must contain a valid LogEntry"); + + // first we have to check the replication policy for that slice + SliceInfo info = slices.getSliceInfo(rq.logEntry.slID); + assert (info != null); + + ReplicationMechanism rm = info.getReplicationMechanism(); + assert (rm != null); + + // if it is null replication we have nothing todo + if (rm instanceof NullReplicationMechanism) { + // release log entry immediateley + BufferPool.free(rq.logEntry.payload); + return true; + } + + // add the request to the Q, run will know this is a + // replicate request since a logEntry is attached + requests.add(rq); + + // bla + if (rm.sendResponseAfterReplication()) { + return false; + } else { + return true; + } + } + + /** + * execute a replication request after it was enqueued w/ replicate + */ + protected void doReplication(MRCRequest rq) { + // can't be null replication + SliceInfo info = slices.getSliceInfo(rq.logEntry.slID); + assert (info != null); + + ReplicationMechanism rm = info.getReplicationMechanism(); + assert (rm != null); + + if (rm instanceof MasterReplicationMechanism) { + // put the log entry into a ByteBuffer + ReusableBuffer l = null; + + try { + + l = rq.logEntry.marshall(); + assert (rq.logEntry.slID != null); + + List invalidSlaves = null; + + // and notify all registered slaves + for (InetSocketAddress slave : ((MasterReplicationMechanism) rm).slaves) { + if (mrcClient.serverIsAvailable(slave)) { + RPCResponse resp = null; + try { + Logging.logMessage(Logging.LEVEL_DEBUG, this, + "requested replication to " + slave + " size(Q)=" + + this.requests.size()); + resp = mrcClient.sendRPC(slave, ".Replicate", l.createViewBuffer(), + null, null); + resp.waitForResponse(); + Logging.logMessage(Logging.LEVEL_DEBUG, this, "replicated to " + slave); + } catch (JSONException ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + // FIXME: this should never happen, but there should + // be some + // kind of notfication if it occurs + } catch (HttpErrorException ex) { + + SpeedyRequest sr = resp.getSpeedyRequest(); + String body = new String(sr.responseBody.array(), HTTPUtils.ENC_UTF8); + Map exc = null; + try { + exc = (Map) JSONParser.parseJSON(new JSONString( + body)); + } catch (JSONException e1) { + Logging.logMessage(Logging.LEVEL_ERROR, this, e1); + } + + if ((Long) exc.get("errno") == EC_SLICE_DOES_NOT_EXIST) { + // this is a big problem!!! + Logging.logMessage(Logging.LEVEL_ERROR, this, "slave " + slave + + " does not have a slice " + rq.logEntry.slID + + " and is removed from the slave list!"); + if (invalidSlaves == null) + invalidSlaves = new ArrayList(); + invalidSlaves.add(slave); + } + } catch (IOException ex) { + Logging.logMessage(Logging.LEVEL_DEBUG, this, "cannot replicate to " + + slave + " because " + ex); + } catch (InterruptedException ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + } finally { + if (resp != null) + resp.freeBuffers(); + } + } else { + Logging.logMessage(Logging.LEVEL_DEBUG, this, "speedy says that " + slave + + " is not available"); + } + } + if (invalidSlaves != null) { + int i = 0; + InetSocketAddress[] newSl = new InetSocketAddress[((MasterReplicationMechanism) rm).slaves.length + - invalidSlaves.size()]; + for (InetSocketAddress slave : ((MasterReplicationMechanism) rm).slaves) { + if (!invalidSlaves.contains(slave)) + newSl[i++] = slave; + } + ((MasterReplicationMechanism) rm).slaves = newSl; + try { + slices.modifySlice(info); + } catch (IOException ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + } + } + + } finally { + if (l != null) + BufferPool.free(l); + } + } + + } + + /** + * Handles remote requests for operation execution, i.e. by a master MRC + */ + protected void remoteOperation(MRCRequest rq) { + try { + + Logging.logMessage(Logging.LEVEL_DEBUG, this, "remote operation started, size(Q)=" + + this.requests); + rq.getPinkyRequest().requestBody.position(0); + // unmarshall the log entry + LogEntry e = new LogEntry(rq.getPinkyRequest().requestBody); + assert (e.slID != null); + + // get replication info for the affected slice + SliceInfo info = slices.getSliceInfo(e.slID); + if (info == null) { + // slice does not exist on this volume + MessageUtils.marshallException(rq, new UserException(EC_SLICE_DOES_NOT_EXIST, + "slice does not exist")); + rq.details.persistentOperation = false; + brListener.brainRequestDone(rq); + } + + synchronized (info) { + ReplicationMechanism rm = info.getReplicationMechanism(); + assert (rm != null); + // FIXME: handle error case when slice is not available anymore + // or the server sent an invalid request + + // execute only if the slice is ready + if (info.isReplicating()) { + + // check view and sequence ID.... + if ((e.viewID != info.getCurrentViewID()) + || (e.sequenceID > info.getCurrentSequenceID())) { + // we have missed some ops... + Logging.logMessage(Logging.LEVEL_WARN, this, "missing OPS..."); + // the slice is not operational anymore + // until it has fetched all missed log entries from the + // master + info.setReplicating(false); + // we first acknowledge to make sure that the server + // does not + // time out + MessageUtils.marshallException(rq, new UserException( + EC_SLICE_NOT_OPERATIONAL, "slice not up to date")); + rq.details.persistentOperation = false; + brListener.brainRequestDone(rq); + + // we fetch everything from the master... + fetchMasterStatus(e.slID); + Logging.logMessage(Logging.LEVEL_DEBUG, this, "remote operation done"); + return; + } + // if everything is fine we can just execute the log enry + this.replicateLogEntry(e, info); + e.attachment = rq; + rq.details.sliceId = e.slID; + // and append it to our local log file + e.listener = sl; + diskLogger.append(e); + rq.getPinkyRequest().setResponse(HTTPUtils.SC_OKAY); + rq.details.persistentOperation = false; + brListener.brainRequestDone(rq); + Logging.logMessage(Logging.LEVEL_DEBUG, this, "remote operation done"); + } else { + // slice is out of order...please try again ;-) + MessageUtils.marshallException(rq, new UserException(EC_SLICE_NOT_OPERATIONAL, + "slice not operational")); + rq.details.persistentOperation = false; + brListener.brainRequestDone(rq); + Logging.logMessage(Logging.LEVEL_DEBUG, this, "SLICE IS NOT OPERATIONAL"); + } + } + } catch (Exception ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + if (!rq.getPinkyRequest().responseSet) + MessageUtils.marshallException(rq, ex); + rq.details.persistentOperation = false; + brListener.brainRequestDone(rq); + } + } + + /** + * Remote server requested log entries for a slice + */ + public void getLogEntries(MRCRequest rq) { + // parse the request + try { + Object o = MessageUtils.unmarshallRequest(rq); + List args = (List) o; + // args are ["sliceID",currentViewID,startSequenceID,endSequenceID + String slID = (String) args.get(0); + int viewID = ((Long) args.get(1)).intValue(); + int startSQ = ((Long) args.get(2)).intValue(); + int endSQ = ((Long) args.get(3)).intValue(); + + // get a SliceID obj + SliceID sl = new SliceID(slID); + + // we need the replication info again + // get replication info for the affected slice + SliceInfo info = slices.getSliceInfo(sl); + assert (info != null); + + // tell the connection remover that we are still doing sth + rq.getPinkyRequest().active(); + if (info.getCurrentViewID() != viewID) { + if (info.getCurrentViewID() > viewID) { + // okay, slave lags behind + MessageUtils.marshallException(rq, new UserException(EC_LOGRANGE_NOT_AVAIL, + "requested range is not available because view has already changed")); + } else { + // the other server is in a newer view + // THIS IS EVIL! + MessageUtils.marshallException(rq, new UserException(EC_INVALID_VIEWID, + "viewID is not valid")); + } + } else { + if (info.getLastAvailSqID() > startSQ) { + // there was a compactDB op after startSQ + // so the log entries are not available + // and the other server probably needs to fetch + // the entire database + MessageUtils.marshallException(rq, new UserException(EC_LOGRANGE_NOT_AVAIL, + "requested range is not available")); + } else { + ReusableBuffer b = diskLogger.getLog(sl, startSQ, endSQ); + Logging.logMessage(Logging.LEVEL_DEBUG, this, "response is " + b); + // send the log file... + rq.getPinkyRequest().setResponse(HTTPUtils.SC_OKAY, b, + HTTPUtils.DATA_TYPE.BINARY); + } + } + Logging.logMessage(Logging.LEVEL_DEBUG, this, "Log requested for " + slID + " " + + viewID + "/" + startSQ + "-" + endSQ); + } catch (Exception e) { + // FIXME: better exception handling like in brain + Logging.logMessage(Logging.LEVEL_WARN, this, e); + rq.getPinkyRequest().setResponse(HTTPUtils.SC_BAD_REQUEST); + } + } + + /** + * shut down the Replication Manager thread + */ + public void shutdown() { + this.quit = true; + this.interrupt(); + } + + /** + * Sends the entire slice DB + */ + private void getSliceDB(MRCRequest rq) { + try { + assert (rq != null); + assert (rq.getPinkyRequest() != null); + + Object o = MessageUtils.unmarshallRequest(rq); + List args = (List) o; + String slID = (String) args.get(0); + + // remove everything that is not a character to make sure nobody + // messes around w/ our FS + slID = slID.replaceAll("/[^abcdefABCDEF0123456789]/", ""); + + File sliceFile = new File(dbDir + slID + "/mrcdb." + + VersionManagement.getMrcDataVersion()); + if (sliceFile.exists()) { + // we are working, connremover should not GC us + rq.getPinkyRequest().active(); + // files should not get larger, if they do, we are f*up + assert (sliceFile.length() <= Integer.MAX_VALUE); + int size = (int) sliceFile.length(); + FileInputStream fis = new FileInputStream(sliceFile); + FileChannel fc = fis.getChannel(); + // map DB into memory + MappedByteBuffer mbb = fc.map(FileChannel.MapMode.READ_ONLY, 0, size); + ReusableBuffer buf = new ReusableBuffer(mbb); + fis.close(); + SliceID tmp = new SliceID(slID); + SliceInfo info = slices.getSliceInfo(tmp); + assert (info != null); + // add headers for DB info (like which view we have and until + // which sequence ID it has + // compacted all ops + HTTPHeaders addHdrs = new HTTPHeaders(); + addHdrs.addHeader("X-ViewID", info.getCurrentViewID()); + addHdrs.addHeader("X-Start-SequenceID", info.getLastAvailSqID()); + rq.getPinkyRequest().setResponse(HTTPUtils.SC_OKAY, buf, + HTTPUtils.DATA_TYPE.BINARY, addHdrs); + Logging.logMessage(Logging.LEVEL_DEBUG, this, "Database rqquested for " + slID); + } else { + // FIXME: Huston, we have a problem... + Logging.logMessage(Logging.LEVEL_ERROR, this, "request for " + slID + " but " + + sliceFile.getAbsolutePath() + " not found??"); + MessageUtils.marshallException(rq, new UserException(EC_SLICE_DOES_NOT_EXIST, + "slice does not exist")); + } + } catch (Exception e) { + MessageUtils.marshallException(rq, e); + } + + } + + /** + * executed when a remote master wants to start replicating a slice to this + * MRC + */ + private void newSlaveSlice(MRCRequest rq) { + try { + assert (rq != null); + assert (rq.getPinkyRequest() != null); + + Object o = MessageUtils.unmarshallRequest(rq); + List args = (List) o; + String slID = (String) args.get(0); + String master = (String) args.get(1); + String volName = (String) args.get(2); + Long fileACL = (Long) args.get(3); + Long osdPolicy = (Long) args.get(4); + String osdPolicyArgs = (String) args.get(5); + Long partPolicy = (Long) args.get(6); + + // make sure there are no special chars in the sliceID (security) + slID = slID.replaceAll("/[^a-fA-F0-9]/", ""); + InetSocketAddress masterAddr = MessageUtils.addrFromString(master); + + // lets see if we already have that slice + SliceID sliceID = new SliceID(slID); + SliceInfo info = slices.getSliceInfo(sliceID); + + if (info != null) { + // just change the info + info.setStatus(SliceInfo.SliceStatus.OFFLINE); + info.setReplicating(true); + info.changeReplicationMechanism(new SlaveReplicationMechanism(masterAddr)); + slices.modifySlice(info); + } else { + + // first, create a local representation of the volume if + // none exists yet + if (!slices.hasVolumeWithId(sliceID.getVolumeId())) + slices.createVolume(sliceID.getVolumeId(), volName, fileACL, osdPolicy, + osdPolicyArgs, partPolicy, false, false); + + // create the slice + + // create the directory.. + File dir = new File(dbDir + slID); + dir.mkdirs(); + + info = new SliceInfo(sliceID, new SlaveReplicationMechanism(masterAddr)); + info.setReplicating(true); + info.setStatus(SliceInfo.SliceStatus.OFFLINE); + slices.createSlice(info, true); + + // SYNC! + rq.getPinkyRequest().setResponse(HTTPUtils.SC_OKAY); + rq.details.persistentOperation = false; + brListener.brainRequestDone(rq); + + // do the rest "offline" + } + // fetch the current database + fetchMasterStatus(sliceID); + rq.getPinkyRequest().active(); + + } catch (Exception e) { + e.printStackTrace(); + MessageUtils.marshallException(rq, e); + rq.details.persistentOperation = false; + brListener.brainRequestDone(rq); + } + } + + /** + * executed when a remote master wants to start replicating a slice to this + * MRC + */ + private void nullReplication(MRCRequest rq) { + try { + assert (rq != null); + assert (rq.getPinkyRequest() != null); + + Object o = MessageUtils.unmarshallRequest(rq); + List args = (List) o; + String slID = (String) args.get(0); + + // make sure there are no special chars in the sliceID (security) + slID = slID.replaceAll("/[^a-fA-F0-9]/", ""); + + // lets see if we already have that slice + SliceID sliceID = new SliceID(slID); + SliceInfo info = slices.getSliceInfo(sliceID); + + if (info != null) { + // just change the info + info.setStatus(SliceInfo.SliceStatus.OFFLINE); + info.setReplicating(false); + info.changeReplicationMechanism(new NullReplicationMechanism()); + slices.modifySlice(info); + rq.getPinkyRequest().setResponse(HTTPUtils.SC_OKAY); + } else { + MessageUtils.marshallException(rq, new UserException(EC_SLICE_DOES_NOT_EXIST, + "slice does not exist!")); + } + // fetch the current database + rq.getPinkyRequest().active(); + + } catch (Exception e) { + MessageUtils.marshallException(rq, e); + } + } + + /** + * fetch missed log entries and/or full slice database from the master MRC + */ + private void fetchMasterStatus(SliceID sl) throws IOException { + // get local state + SliceInfo info = slices.getSliceInfo(sl); + assert (info != null); + synchronized (info) { + + ReplicationMechanism rm = info.getReplicationMechanism(); + assert (rm != null); + + info.setReplicating(false); + + Logging.logMessage(Logging.LEVEL_DEBUG, this, "fetch master status started"); + + boolean retry = false; + do { + RPCResponse mr = null; + + try { + // ask master to send me the missing log part + + // first build a request + List args = new LinkedList(); + args.add(sl.toString()); + args.add((Integer) info.getCurrentViewID()); + args.add((Integer) info.getCurrentSequenceID()); + args.add((Integer) Integer.MAX_VALUE); + // and send it to the master + mr = mrcClient.sendRPC(((SlaveReplicationMechanism) rm).master, + ".RgetLogEntries", args, null, null); + // wait for answer + mr.waitForResponse(); + + SpeedyRequest rq = mr.getSpeedyRequest(); + + // we could contact the master MRC (no exception) + retry = false; + if (rq.responseBody != null) { + // read the response body + // create view buffer + ReusableBuffer body = rq.responseBody.createViewBuffer(); + body.position(0); + + // we need this "null listener" for the DiskLogger + StupidListener nada = new StupidListener(info); + + try { + // do sth with this log stuff... + while (true) { + LogEntry e = new LogEntry(body); + // we do not really care about the result, at + // least + // at here + e.registerListener(nada); + replicateLogEntry(e, info); + + Logging.logMessage(Logging.LEVEL_DEBUG, this, + "replayed remote op: " + e.operationName + " params: " + + e.payload); + + diskLogger.append(e); + } + } catch (InvalidLogEntryException ex) { + if (ex.getLength() != 0) { + info.setStatus(SliceInfo.SliceStatus.OFFLINE); + info.setReplicating(true); + throw new IOException("cannot read log entries " + ex, ex); + } + // probably a half finished log entry... + } catch (Exception ex) { + info.setStatus(SliceInfo.SliceStatus.OFFLINE); + info.setReplicating(true); + throw new IOException("cannot read log entries " + ex, ex); + } finally { + BufferPool.free(body); + } + } else { + // nothing to replay...remote log is empty! + } + + } catch (InterruptedException e) { + throw new IOException(e.getMessage()); + } catch (JSONException e) { + throw new IOException(e.getMessage()); + } catch (HttpErrorException e) { + // okay sth went wrong + if (retry) { + info.setReplicating(true); + info.setStatus(SliceInfo.SliceStatus.OFFLINE); + throw new IOException("cannot fetch database for slice " + sl + " from " + + ((SlaveReplicationMechanism) rm).master); + } + + assert (mr != null); + + SpeedyRequest rq = mr.getSpeedyRequest(); + String body = new String(rq.responseBody.array(), HTTPUtils.ENC_UTF8); + Map exc; + try { + exc = (Map) JSONParser.parseJSON(new JSONString(body)); + } catch (JSONException e1) { + throw new IOException(e1); + } + + if ((Long) exc.get("errno") == EC_LOGRANGE_NOT_AVAIL) { + // this means we have to get the full database first + retry = true; + + RPCResponse mr2 = null; + try { + // make a request object + List args2 = new LinkedList(); + args2.add(sl.toString()); + // and send request + mr2 = mrcClient.sendRPC(((SlaveReplicationMechanism) rm).master, + ".RgetSliceDB", args2, null, null); + mr2.waitForResponse(); + SpeedyRequest sr = mr2.getSpeedyRequest(); + + // great! we have it + FileOutputStream fos = new FileOutputStream(dbDir + sl + "/mrcdb." + + VersionManagement.getMrcDataVersion()); + sr.responseBody.position(0); + fos.getChannel().write(sr.responseBody.getBuffer()); + fos.close(); + + // fetch current view and lastAvailSqID from headers + String tmp = sr.responseHeaders.getHeader("X-ViewID"); + if (tmp != null) { + info.setCurrentViewID(Integer.valueOf(tmp)); + } else { + info.setReplicating(true); + throw new IOException("missing or invalid X-ViewID header"); + } + + tmp = sr.responseHeaders.getHeader("X-Start-SequenceID"); + if (tmp != null) { + info.setLastAvailSqID(Integer.valueOf(tmp) - 1); + info.setNextSequenceID(Integer.valueOf(tmp)); + } else { + info.setReplicating(true); + throw new IOException( + "missing or invalid X-Start-SequenceID header"); + } + // tell the storage manager that it has to read from + // disk + slices.reInitSlice(sl); + + Logging.logMessage(Logging.LEVEL_DEBUG, this, + "fetch DB done and relinked..."); + } catch (Exception e2) { + // this is the end my only friend, the end ;-) + Logging.logMessage(Logging.LEVEL_ERROR, this, e2); + info.setReplicating(true); + info.setStatus(SliceInfo.SliceStatus.OFFLINE); + throw new IOException(e2.getMessage()); + } finally { + if (mr2 != null) + mr2.freeBuffers(); + } + + } else { + Logging.logMessage(Logging.LEVEL_WARN, this, "unknwon error code :" + e); + } + } catch (IOException e) { + info.setReplicating(true); + info.setStatus(SliceInfo.SliceStatus.OFFLINE); + Logging.logMessage(Logging.LEVEL_WARN, this, "cannot contact master for slice " + + sl); + throw e; + } finally { + if (mr != null) + mr.freeBuffers(); + } + } while (retry); + Logging.logMessage(Logging.LEVEL_DEBUG, this, "fetch master status done"); + info.setReplicating(true); + info.setStatus(SliceInfo.SliceStatus.READONLY); + } + } + + /** + * execute a remote operation locally + */ + private void replicateLogEntry(LogEntry l, SliceInfo ri) throws Exception { + // parse the log entry contents + Object args = null; + if (l.payload != null) { + // parse JSONrequest.pr.requestBody.position(0); + CharBuffer utf8buf = HTTPUtils.ENC_UTF8.decode(l.payload.getBuffer()); + args = JSONParser.parseJSON(new JSONCharBufferString(utf8buf)); + } + + // this operation works in sync mode + storage.replayLogEntry(l.operationName, l.userID, l.groupID, args); + + ri.setNextSequenceID(l.sequenceID + 1); + + Logging.logMessage(Logging.LEVEL_DEBUG, this, "replayed remote operation " + + l.operationName + " with " + args); + } + + /** + * setup this MRC as the master for an existing slice + */ + private void newMasterSlice(MRCRequest rq) { + try { + assert (rq != null); + assert (rq.getPinkyRequest() != null); + + Object o = MessageUtils.unmarshallRequest(rq); + // unwrap the arguments + List args = (List) o; + String slID = (String) args.get(0); + List slaves = (List) args.get(1); + Boolean waitForAck = (Boolean) args.get(2); + + // convert the strings to socket addresses + InetSocketAddress[] addrs = new InetSocketAddress[slaves.size()]; + int i = 0; + for (Object sl : slaves) { + String master = (String) sl; + addrs[i++] = MessageUtils.addrFromString(master); + } + // no special chars in the slice ID! + slID = slID.replaceAll("/[^a-fA-F0-9]/", ""); + + SliceID sliceID = new SliceID(slID); + // get replication info for the affected slice + SliceInfo info = slices.getSliceInfo(sliceID); + assert (info != null); + + if (info == null) { + MessageUtils.marshallException(rq, new UserException(EC_SLICE_DOES_NOT_EXIST, + "no such slice")); + return; + } + + // create and save new replication info + ReplicationMechanism rm = new MasterReplicationMechanism(addrs, waitForAck); + info.changeReplicationMechanism(rm); + slices.modifySlice(info); + + // before creating new slaves we have to compact our database + // to keep transfers to a minimum and to make sure that there + // is a db file for new slices + try { + // compact db + slices.compactDB(); + diskLogger.cleanLog(); + slices.completeDBCompaction(); + } catch (Exception ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + MessageUtils.marshallException(rq, ex); + return; + } + + // notify all slaves... + VolumeInfo ve = slices.getVolumeById(sliceID.getVolumeId()); + + // create request object containing all slice info + List args2 = new LinkedList(); + args2.add(slID); + args2.add(new ServiceUUID(config.getUUID().toString()).getAddress().getHostName() + ":" + + config.getPort()); + args2.add(ve.getName()); + args2.add(ve.getAcPolicyId()); + args2.add(ve.getOsdPolicyId()); + args2.add(ve.getOsdPolicyArgs()); + args2.add(ve.getPartitioningPolicyId()); + + // send requests... + for (InetSocketAddress slave : addrs) { + mrcClient.sendRPC(slave, ".RnewSlaveSlice", args2, null, null).waitForResponse(); + } + + // ack to client + rq.getPinkyRequest().setResponse(HTTPUtils.SC_OKAY); + } catch (Exception e) { + Logging.logMessage(Logging.LEVEL_ERROR, this, e); + MessageUtils.marshallException(rq, e); + } + } + + /** + * should be called on start up to ensure that all slave slices have the + * most recent data + */ + public void init() { + // check all slave volumes + for (SliceID sl : slices.getSliceList()) { + SliceInfo info = slices.getSliceInfo(sl); + ReplicationMechanism rm = info.getReplicationMechanism(); + if (rm instanceof SlaveReplicationMechanism) { + try { + fetchMasterStatus(sl); + } catch (IOException ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + } + } else if (rm instanceof MasterReplicationMechanism) { + info.setReplicating(true); + info.setStatus(SliceInfo.SliceStatus.ONLINE); + } else { + info.setStatus(SliceInfo.SliceStatus.ONLINE); + } + } + } + + /** + * return details of slices and replication info + */ + private void getInfos(MRCRequest rq) { + List infos = new LinkedList(); + for (SliceID slID : slices.getSliceList()) { + Map slInfo = new HashMap(); + slInfo.put("sliceID", slID.toString()); + SliceInfo info = slices.getSliceInfo(slID); + slInfo.put("replicationMechanism", info.getReplicationMechanism().getClass() + .getSimpleName()); + slInfo.put("viewID", info.getCurrentViewID()); + slInfo.put("sequenceID", info.getCurrentSequenceID() - 1); + slInfo.put("replicationActive", info.isReplicating()); + slInfo.put("status", info.getStatus().toString()); + VolumeInfo ve; + try { + ve = slices.getVolumeById(slID.getVolumeId()); + slInfo.put("volumeName", ve.getName()); + } catch (Exception ex) { + slInfo.put("volumeName", ""); + } + + if (info.getReplicationMechanism() instanceof SlaveReplicationMechanism) { + slInfo.put("master", + ((SlaveReplicationMechanism) info.getReplicationMechanism()).master.toString()); + } + if (info.getReplicationMechanism() instanceof MasterReplicationMechanism) { + List sl = new LinkedList(); + for (InetSocketAddress isa : ((MasterReplicationMechanism) info + .getReplicationMechanism()).slaves) { + sl.add(isa.toString()); + } + slInfo.put("slaves", sl); + } + infos.add(slInfo); + } + MessageUtils.marshallResponse(rq, infos); + } + + protected void changeStatus(MRCRequest rq) { + try { + assert (rq != null); + assert (rq.getPinkyRequest() != null); + + Object o = MessageUtils.unmarshallRequest(rq); + List args = (List) o; + String slID = (String) args.get(0); + String newStatus = (String) args.get(1); + + // try to get slice + SliceInfo info = slices.getSliceInfo(new SliceID(slID)); + if (info == null) + throw new Exception("No such slice on this server"); + + if (newStatus.equals("ONLINE")) { + info.setStatus(SliceInfo.SliceStatus.ONLINE); + } else if (newStatus.equals("OFFLINE")) { + info.setStatus(SliceInfo.SliceStatus.OFFLINE); + } else if (newStatus.equals("READONLY")) { + info.setStatus(SliceInfo.SliceStatus.READONLY); + } else { + throw new Exception("Invalid status requested"); + } + slices.modifySlice(info); + + rq.getPinkyRequest().setResponse(HTTPUtils.SC_OKAY); + rq.details.persistentOperation = false; + + } catch (Exception e) { + MessageUtils.marshallException(rq, e); + rq.details.persistentOperation = false; + } + } + + protected void forceUpdate(MRCRequest rq) { + try { + assert (rq != null); + assert (rq.getPinkyRequest() != null); + + Object o = MessageUtils.unmarshallRequest(rq); + List args = (List) o; + String slID = (String) args.get(0); + + // try to get slice + SliceInfo info = slices.getSliceInfo(new SliceID(slID)); + if (info == null) + throw new UserException(EC_SLICE_DOES_NOT_EXIST, "No such slice on this server"); + + if (info.getReplicationMechanism() instanceof SlaveReplicationMechanism) { + fetchMasterStatus(info.sliceID); + } else { + throw new UserException("Only slave slices can be updated"); + } + + rq.getPinkyRequest().setResponse(HTTPUtils.SC_OKAY); + rq.details.persistentOperation = false; + + } catch (Exception e) { + MessageUtils.marshallException(rq, e); + rq.details.persistentOperation = false; + } + } + + public int getQLength() { + return this.requests.size(); + } + + /** + * a listener for syncing remote operations. It does not do anything but + * prints a message in the case of an error. + */ + public static class StupidListener implements SyncListener { + + public SliceInfo info; + + public StupidListener(SliceInfo info) { + this.info = info; + } + + public void synced(LogEntry entry) { + // who cares + } + + public void failed(LogEntry entry, Exception ex) { + // FIXME: should do something but don't know what... + Logging.logMessage(Logging.LEVEL_ERROR, this, "SYNC TO DISK FAILED"); + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + } + + } + +} diff --git a/servers/src/org/xtreemfs/mrc/replication/ReplicationMechanism.java b/servers/src/org/xtreemfs/mrc/replication/ReplicationMechanism.java new file mode 100644 index 0000000000000000000000000000000000000000..0110aaf8da67079dab7d52bb123d36b52539ccc9 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/replication/ReplicationMechanism.java @@ -0,0 +1,42 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc.replication; + +import java.io.Serializable; +import org.xtreemfs.mrc.slices.SliceInfo; + +/** + * + * @author bjko + */ +public interface ReplicationMechanism extends Serializable { + + public boolean sendResponseAfterReplication(); + + public void registerSlice(SliceInfo slice); + + public SliceInfo getSlice(); + +} diff --git a/servers/src/org/xtreemfs/mrc/replication/ReplicationRequestListener.java b/servers/src/org/xtreemfs/mrc/replication/ReplicationRequestListener.java new file mode 100644 index 0000000000000000000000000000000000000000..8661058e70074a0e3ebefd9ff00c8a9922bdde8a --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/replication/ReplicationRequestListener.java @@ -0,0 +1,37 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc.replication; + +import org.xtreemfs.mrc.MRCRequest; + +/** + * + * @author bjko + */ +public interface ReplicationRequestListener { + + public void replicationDone(MRCRequest rq); + +} diff --git a/servers/src/org/xtreemfs/mrc/replication/SlaveReplicationMechanism.java b/servers/src/org/xtreemfs/mrc/replication/SlaveReplicationMechanism.java new file mode 100644 index 0000000000000000000000000000000000000000..db0afc8cf6f080d586c423783c38ac0b02470f82 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/replication/SlaveReplicationMechanism.java @@ -0,0 +1,70 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc.replication; + +import java.net.InetSocketAddress; +import java.util.LinkedList; +import java.util.List; +import org.xtreemfs.mrc.MRCRequest; +import org.xtreemfs.mrc.brain.storage.LogEntry; +import org.xtreemfs.mrc.brain.storage.SliceID; +import org.xtreemfs.mrc.slices.SliceInfo; + +/** + * + * @author bjko + */ +public class SlaveReplicationMechanism implements ReplicationMechanism { + + public InetSocketAddress master; + + private SliceInfo slice; + + transient private List pendingRequests; + + + /** Creates a new instance of SlaveReplicationMechanism */ + public SlaveReplicationMechanism(InetSocketAddress master) { + this.master = master; + this.pendingRequests = new LinkedList(); + } + + public boolean sendResponseAfterReplication() { + return false; + } + + public void registerSlice(SliceInfo slice) { + this.slice = slice; + this.slice.setReplicating(true); + } + + public SliceInfo getSlice() { + return this.slice; + } + + + + +} diff --git a/servers/src/org/xtreemfs/mrc/slices/DefaultPartitioningPolicy.java b/servers/src/org/xtreemfs/mrc/slices/DefaultPartitioningPolicy.java new file mode 100644 index 0000000000000000000000000000000000000000..297ae4be91d72e8cb719eae56019470f5a7a7c5a --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/slices/DefaultPartitioningPolicy.java @@ -0,0 +1,82 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc.slices; + +import java.util.Collection; +import java.util.HashSet; +import java.util.Set; + +import org.xtreemfs.mrc.brain.UserException; +import org.xtreemfs.mrc.brain.storage.BackendException; +import org.xtreemfs.mrc.brain.storage.SliceID; + +/** + * A simple partitioning policy that only maintains a single slice per volume. + * + * @author stender + * + */ +public class DefaultPartitioningPolicy implements PartitioningPolicy { + + public static final long POLICY_ID = 1; + + private final SliceManager sliceMan; + + public DefaultPartitioningPolicy(SliceManager sliceMan) { + this.sliceMan = sliceMan; + } + + public Collection getInitialSlices(String volumeId) + throws BackendException { + + try { + Set slices = new HashSet(); + slices.add(new SliceInfo(new SliceID(volumeId, 1), null)); + return slices; + } catch (Exception exc) { + throw new BackendException(exc); + } + } + + public SliceID getSlice(String volumeId, String path) + throws BackendException { + try { + return sliceMan.getVolumeById(volumeId).getSlices().iterator() + .next().sliceID; + } catch (UserException exc) { + throw new BackendException(exc); + } + } + + public SliceID getSlice(String volumeId, long fileId) + throws BackendException { + try { + return sliceMan.getVolumeById(volumeId).getSlices().iterator() + .next().sliceID; + } catch (UserException exc) { + throw new BackendException(exc); + } + } +} diff --git a/servers/src/org/xtreemfs/mrc/slices/PartitioningManager.java b/servers/src/org/xtreemfs/mrc/slices/PartitioningManager.java new file mode 100644 index 0000000000000000000000000000000000000000..f93e2b764f7b9f3beccebf15b2973bf72b07dcd8 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/slices/PartitioningManager.java @@ -0,0 +1,85 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc.slices; + +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +import org.xtreemfs.mrc.brain.UserException; +import org.xtreemfs.mrc.brain.storage.BackendException; +import org.xtreemfs.mrc.brain.storage.SliceID; + +public class PartitioningManager { + + private final SliceManager sliceMan; + + private final Map policies; + + public PartitioningManager(SliceManager sliceMan) { + + this.sliceMan = sliceMan; + + policies = new HashMap(); + policies.put(DefaultPartitioningPolicy.POLICY_ID, + new DefaultPartitioningPolicy(sliceMan)); + } + + public Collection getInitialSlices(String volumeId) + throws BackendException { + return getPolicy(volumeId).getInitialSlices(volumeId); + } + + public SliceID getSlice(String volumeId, String path) + throws BackendException { + return getPolicy(volumeId).getSlice(volumeId, path); + } + + public SliceID getSlice(String volumeId, long fileId) + throws BackendException { + return getPolicy(volumeId).getSlice(volumeId, fileId); + } + + protected PartitioningPolicy getPolicy(String volumeId) + throws BackendException { + + try { + long policyId = sliceMan.getVolumeById(volumeId) + .getPartitioningPolicyId(); + + PartitioningPolicy policy = policies.get(policyId); + if (policy == null) + throw new BackendException( + "unknown partitioning policy for volume " + volumeId + + ": " + policyId); + + return policy; + + } catch (UserException exc) { + throw new BackendException(exc); + } + } + +} diff --git a/servers/src/org/xtreemfs/mrc/slices/PartitioningPolicy.java b/servers/src/org/xtreemfs/mrc/slices/PartitioningPolicy.java new file mode 100644 index 0000000000000000000000000000000000000000..c9f87fb0568360c3e27c9ae97ff46d734b3389e6 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/slices/PartitioningPolicy.java @@ -0,0 +1,43 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc.slices; + +import java.util.Collection; + +import org.xtreemfs.mrc.brain.storage.BackendException; +import org.xtreemfs.mrc.brain.storage.SliceID; + +public interface PartitioningPolicy { + + public Collection getInitialSlices(String volumeId) + throws BackendException; + + public SliceID getSlice(String volumeId, String path) + throws BackendException; + + public SliceID getSlice(String volumeId, long fileId) + throws BackendException; + +} diff --git a/servers/src/org/xtreemfs/mrc/slices/SliceEventListener.java b/servers/src/org/xtreemfs/mrc/slices/SliceEventListener.java new file mode 100644 index 0000000000000000000000000000000000000000..f839e48285cf5ab75f536a0414a37d220bff6032 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/slices/SliceEventListener.java @@ -0,0 +1,41 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc.slices; + +/** + * + * @author bjko + */ +public interface SliceEventListener { + + public static enum EventType { + SLICE_ADDED, + SLICE_MODIFIED, + SLICE_REMOVED + }; + + public void event(EventType type, SliceInfo slice); + +} diff --git a/servers/src/org/xtreemfs/mrc/slices/SliceInfo.java b/servers/src/org/xtreemfs/mrc/slices/SliceInfo.java new file mode 100644 index 0000000000000000000000000000000000000000..fbada66b309030fb0d721134989ec822276afdc1 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/slices/SliceInfo.java @@ -0,0 +1,157 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc.slices; + +import java.io.Serializable; +import java.util.concurrent.atomic.AtomicInteger; +import org.xtreemfs.mrc.brain.storage.SliceID; +import org.xtreemfs.mrc.replication.NullReplicationMechanism; +import org.xtreemfs.mrc.replication.ReplicationMechanism; + +/** + * + * @author bjko + */ +public class SliceInfo implements Serializable { + + public final SliceID sliceID; + + /** The current viewID + */ + private volatile int currentViewID; + + /** sequence ID to assign to the next operation + */ + private AtomicInteger nextSequenceID; + + /** This is the sequence ID of the first + * (oldest) log entry available. + */ + private int lastAvailSqID; + + /** replication mechanism to use for this slice + */ + private ReplicationMechanism replMech; + + /** slice status + */ + public static enum SliceStatus { ONLINE, READONLY, OFFLINE }; + + /** current status of the slice (as seen by the client) + */ + private volatile SliceStatus status; + + /** true if replication is working and accepting remote operations + */ + private volatile boolean replicationOperational; + + private boolean deleted; + + /** Creates a new instance of SliceInfo */ + public SliceInfo(SliceID id, ReplicationMechanism mecha) { + this.sliceID = id; + nextSequenceID = new AtomicInteger(0); + currentViewID = 0; + lastAvailSqID = 0; + if (mecha == null) { + replMech = new NullReplicationMechanism(); + } else { + replMech = mecha; + } + status = SliceStatus.ONLINE; + replicationOperational = false; + } + + public int getNextSequenceID() { + return this.nextSequenceID.getAndIncrement(); + } + + public int getCurrentSequenceID() { + return this.nextSequenceID.get(); + } + + public int getLastAvailSqID() { + return this.lastAvailSqID; + } + + public int getCurrentViewID() { + return currentViewID; + } + + public synchronized void viewChange() { + currentViewID++; + lastAvailSqID = 0; + nextSequenceID.set(0); + } + + public ReplicationMechanism getReplicationMechanism() { + return this.replMech; + } + + public synchronized void changeReplicationMechanism(ReplicationMechanism rm) { + currentViewID++; + lastAvailSqID = 0; + nextSequenceID.set(0); + replMech = rm; + } + + public SliceStatus getStatus() { + return status; + } + + public void setStatus(SliceStatus status) { + this.status = status; + } + + public Boolean isReplicating() { + return this.replicationOperational; + } + + public void setReplicating(boolean rStatus) { + this.replicationOperational = rStatus; + } + + public void setLastAvailSqID(int sqID) { + this.lastAvailSqID = sqID; + } + + public void setCurrentViewID(int viewID) { + assert(viewID >= 0); + this.currentViewID = viewID; + } + + public void setNextSequenceID(int id) { + this.nextSequenceID.set(id); + } + + public void setDeleted() { + this.deleted = true; + } + + public boolean isDeleted() { + return this.deleted; + } + +} diff --git a/servers/src/org/xtreemfs/mrc/slices/SliceManager.java b/servers/src/org/xtreemfs/mrc/slices/SliceManager.java new file mode 100644 index 0000000000000000000000000000000000000000..210d18daf9547cdf2dd1e459c361d9f768e84909 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/slices/SliceManager.java @@ -0,0 +1,976 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc.slices; + +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.FileWriter; +import java.io.FilenameFilter; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.net.SocketException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import javax.xml.parsers.SAXParser; +import javax.xml.parsers.SAXParserFactory; + +import org.xml.sax.Attributes; +import org.xml.sax.SAXException; +import org.xml.sax.helpers.DefaultHandler; +import org.xtreemfs.common.VersionManagement; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.util.FSUtils; +import org.xtreemfs.mrc.MRCConfig; +import org.xtreemfs.mrc.brain.ErrNo; +import org.xtreemfs.mrc.brain.UserException; +import org.xtreemfs.mrc.brain.VolumeChangeListener; +import org.xtreemfs.mrc.brain.storage.BackendException; +import org.xtreemfs.mrc.brain.storage.SliceID; +import org.xtreemfs.mrc.brain.storage.StorageManager; +import org.xtreemfs.mrc.brain.storage.StorageManager.RestoreState; + +/** + * Manages all locally known slices of all volumes. + * + *

+ * Each slice represents a certain part of all metadata stored in a volume. For + * each slice, a separate database is maintained, which can be accessed by its + * own StorageManager instance. If metadata partitioning is used, + * several slices may constitute a single volume. + * + * @author bjko, stender + */ +public class SliceManager { + + /** filename in which the settings are stored */ + private static final String FILENAME = "slices.dat"; + + /** for crash recovery first a the tmp is written */ + private static final String TEMP_FILENAME = "slices.tmp"; + + private static final String CP_LOCK_FILENAME = ".lock"; + + /** maps all locally known slice IDs to the corresponding slice info objects */ + private Map slicesById; + + /** + * maps the names of all locally known volumes to the corresponding volume + * info objects + */ + private Map volumesByName; + + /** + * maps the IDs of all locally known volumes to the corresponding volume + * info objects + */ + private Map volumesById; + + /** + * contains all listeners that are notified when slices are added, removed + * or changed + */ + private final List listeners; + + /** the MRC configuration */ + private final MRCConfig config; + + /** the database directory */ + private final String dbDir; + + /** + * maps the IDs of all locally known slices to their storage managers + */ + private final Map mngrMap; + + /** + * the partitioning manager + */ + private final PartitioningManager partMan; + + /** + * contains all listeners that are notified when volumes are changed + */ + private final List vcListeners; + + /** + * Creates a new instance of SliceManager + */ + public SliceManager(MRCConfig config) { + + this.config = config; + this.mngrMap = new HashMap(); + this.listeners = new LinkedList(); + this.partMan = new PartitioningManager(this); + this.vcListeners = new LinkedList(); + + if (!config.getDbDir().endsWith("/")) { + dbDir = config.getDbDir() + "/"; + } else { + dbDir = config.getDbDir(); + } + } + + /** + * Initializes the slice manager. This causes all meta information about + * volumes and slices to be loaded from disk. + * + * @throws IOException + * @throws ClassNotFoundException + */ + public void init() throws IOException, ClassNotFoundException { + + // check whether a local database exists; if so, load all metadata + // about slices and volumes + File status = new File(dbDir + FILENAME); + if (status.exists()) { + ObjectInputStream ois = new ObjectInputStream(new FileInputStream(status)); + volumesByName = (HashMap) ois.readObject(); + volumesById = (HashMap) ois.readObject(); + ois.close(); + } else { + volumesByName = new HashMap(); + volumesById = new HashMap(); + } + listeners.clear(); + mngrMap.clear(); + + // create the 'slices by ID' mapping on-the-fly + slicesById = new HashMap(); + for (VolumeInfo vol : volumesById.values()) + for (SliceInfo slice : vol.getSlices()) + slicesById.put(slice.sliceID, slice); + + Logging.logMessage(Logging.LEVEL_DEBUG, this, "Slices on this server:"); + for (SliceInfo info : slicesById.values()) { + Logging.logMessage(Logging.LEVEL_DEBUG, this, "\t" + info.sliceID + " mechanism=" + + info.getReplicationMechanism().getClass().getSimpleName()); + } + } + + /** + * Creates a new volume. + * + * @param volumeId + * the volume ID + * @param volumeName + * the volume name + * @param ownerId + * the owner of the volume + * @param groupId + * the owning group of the volume + * @param fileAccessPolicyId + * the access policy + * @param osdPolicyId + * the OSD selection policy + * @param partitioningPolicyId + * the metadata partitioning policy + * @param registerAtDS + * a flag indicating whether the volume will be registered at the + * directory service + * @param createSlices + * a flag indicating whether the set of initial slices needs to + * be created + * @return a volume info object + * @throws UserException + * @throws IOException + * @throws BackendException + */ + public VolumeInfo createVolume(String volumeId, String volumeName, long fileAccessPolicyId, + long osdPolicyId, String osdPolicyArgs, long partitioningPolicyId, boolean registerAtDS, + boolean createSlices) throws UserException, IOException, BackendException { + + if (volumeName.indexOf('/') != -1 || volumeName.indexOf('\\') != -1) + throw new UserException(ErrNo.EINVAL, "volume name must not contain '/' or '\\'"); + + if (volumesByName.containsKey(volumeName)) + throw new UserException(ErrNo.EEXIST, "volume ' " + volumeName + + "' already exists locally"); + + // create the volume + VolumeInfo volume = new VolumeInfo(volumeId, volumeName, fileAccessPolicyId, osdPolicyId, + partitioningPolicyId, registerAtDS); + volume.setOsdPolicyArgs(osdPolicyArgs); + + volumesByName.put(volumeName, volume); + volumesById.put(volumeId, volume); + + // create the initial slices + if (createSlices) { + for (SliceInfo slice : partMan.getInitialSlices(volumeId)) { + + volume.setSlice(slice.sliceID, slice); + slicesById.put(slice.sliceID, slice); + + for (SliceEventListener l : listeners) + l.event(SliceEventListener.EventType.SLICE_ADDED, slice); + } + } + + notifyVolumeChangeListeners(VolumeChangeListener.MOD_CHANGED, volume); + + return volume; + } + + /** + * Creates a new slice. + * + * @param slice + * slice metadata + * @throws IOException + * @throws BackendException + */ + public void createSlice(SliceInfo slice, boolean sync) throws IOException, BackendException { + + assert (slice.sliceID != null); + + // add the slice to the 'slicesbyVolume' map for a faster retrieval + VolumeInfo volume = volumesById.get(slice.sliceID.getVolumeId()); + if (volume == null) + throw new BackendException("could not find local volume for slice '" + slice.sliceID + + "'"); + + slicesById.put(slice.sliceID, slice); + volume.setSlice(slice.sliceID, slice); + + if (sync) + syncVolumeAndSliceMetadata(); + + Logging.logMessage(Logging.LEVEL_DEBUG, this, "created new slice " + slice.sliceID); + + // notify listeners + for (SliceEventListener l : listeners) + l.event(SliceEventListener.EventType.SLICE_ADDED, slice); + + } + + /** + * Deletes a slice. + * + * @param id + * the slice ID + * @throws IOException + */ + public void deleteSlice(SliceID id) throws IOException { + + SliceInfo info = slicesById.get(id); + if (info == null) + return; + + // remove the slice from the map + slicesById.remove(id); + + // remove the slice from the volume + VolumeInfo volume = volumesById.get(info.sliceID.getVolumeId()); + volume.setSlice(id, null); + + // dispose of the slice database + + Logging.logMessage(Logging.LEVEL_DEBUG, this, "deleted slice " + id); + + // notify listeners + for (SliceEventListener l : listeners) + l.event(SliceEventListener.EventType.SLICE_REMOVED, info); + } + + /** + * Modifies a slice. + * + * @param info + * slice metadata + * @throws IOException + */ + public void modifySlice(SliceInfo info) throws IOException { + // slices.put(info.sliceID,info); + + // notify listeners + for (SliceEventListener l : listeners) { + l.event(SliceEventListener.EventType.SLICE_MODIFIED, info); + } + } + + /** + * Returns the metadata of a slice. + * + * @param id + * the slice ID + * @return + */ + public synchronized SliceInfo getSliceInfo(SliceID id) { + SliceInfo info = slicesById.get(id); + + if (info == null) { + Logging.logMessage(Logging.LEVEL_WARN, this, "no info for slice " + id); + } + + return info; + } + + /** + * Registers a new listener for slice events. + * + * @param l + * the listener + */ + public void registerListener(SliceEventListener l) { + synchronized (listeners) { + listeners.add(l); + } + } + + /** + * Unregisters a listener for slice events. + * + * @param l + * the listener + */ + public void unregisterListener(SliceEventListener l) { + synchronized (listeners) { + listeners.remove(l); + } + } + + /** + * Persistently stores all slice and volume metadata. + * + * @throws IOException + */ + private void syncVolumeAndSliceMetadata() throws IOException { + + File dbDirFile = new File(dbDir); + if (!dbDirFile.exists()) + dbDirFile.mkdirs(); + + /** + * first the tmp file is written. If we crash while overwriting the TMP + * file we still have the old .DAT file. If the tmp was written + * successfully we can start overwriting the .DAT file. If we crash + * then, we still have a working copy in TMP. This still requires manual + * intervention but no data is lost. + */ + + FileOutputStream fos = new FileOutputStream(dbDir + TEMP_FILENAME); + ObjectOutputStream oos = new ObjectOutputStream(fos); + oos.writeObject(volumesByName); + oos.writeObject(volumesById); + oos.writeObject(slicesById); + oos.flush(); + fos.getFD().sync(); + oos.close(); + + fos = new FileOutputStream(dbDir + FILENAME); + oos = new ObjectOutputStream(fos); + oos.writeObject(volumesByName); + oos.writeObject(volumesById); + oos.writeObject(slicesById); + oos.flush(); + fos.getFD().sync(); + oos.close(); + } + + /** + * Compacts all slice databases. + * + * @throws BackendException + */ + public void compactDB() throws BackendException { + + try { + + Logging.logMessage(Logging.LEVEL_INFO, this, "creating database checkpoint"); + + // create a new file to indicate that checkpointing is in + // progress (this should be an atomic operation) + new File(dbDir).mkdirs(); + new File(dbDir + CP_LOCK_FILENAME).createNewFile(); + + for (VolumeInfo vol : volumesById.values()) { + + for (SliceInfo info : vol.getSlices()) { + + final File sliceDBDir = new File(dbDir + info.sliceID); + final File sliceBackupDir = new File(dbDir + info.sliceID + ".backup"); + + if (!sliceDBDir.exists()) + sliceDBDir.mkdirs(); + + // create a backup of the slice database + FSUtils.copyTree(sliceDBDir, sliceBackupDir); + + getSliceDB(info.sliceID, '*').sync(); + + info.setLastAvailSqID(info.getCurrentSequenceID()); + } + } + + syncVolumeAndSliceMetadata(); + + } catch (Exception exc) { + throw new BackendException(exc); + } + } + + public void completeDBCompaction() throws BackendException { + + // delete all backup files + File[] backupDirs = new File(dbDir).listFiles(new FilenameFilter() { + public boolean accept(File dir, String name) { + return name.endsWith(".backup"); + } + }); + + for (File dir : backupDirs) + FSUtils.delTree(dir); + + // delete the lock file in order to indicate that checkpointing is + // complete + new File(dbDir + CP_LOCK_FILENAME).delete(); + + Logging.logMessage(Logging.LEVEL_INFO, this, "database checkpointing complete"); + } + + public void restoreDB() { + + File lock = new File(dbDir + CP_LOCK_FILENAME); + + // return if checkpoint has been created w/o problems + if (!lock.exists()) + return; + + // otherwise, something went wrong when checkpointing; in this case, all + // backups need to be restored + File[] backupDirs = new File(dbDir).listFiles(new FilenameFilter() { + public boolean accept(File dir, String name) { + return name.endsWith(".backup"); + } + }); + + for (File dir : backupDirs) { + + final String originalDir = dir.getAbsolutePath().substring(0, + dir.getAbsolutePath().length() - ".backup".length()); + final File original = new File(originalDir); + + FSUtils.delTree(original); + dir.renameTo(original); + } + + // finally, delete the checkpointing lock file + lock.delete(); + + } + + /** + * Returns a list of all locally known slices. + * + * @return + */ + public List getSliceList() { + List ids = new LinkedList(slicesById.keySet()); + return ids; + } + + /** + * Returns a list of all locally known slices from a certain volume. + * + * @param volumeId + * the volume ID + * @return + */ + public Collection getSlicesByVolume(String volumeId) { + return volumesById.get(volumeId).getSlices(); + } + + /** + * Checks if an operation can be performed on a slice. + * + * @param modifying + * true if operation writes data, false if read-only operation + */ + protected boolean canPerformOperation(SliceInfo info, boolean modifying) { + if (info == null) + return false; + if (info.getStatus() == SliceInfo.SliceStatus.ONLINE) + return true; + else if ((info.getStatus() == SliceInfo.SliceStatus.READONLY) && !modifying) + return true; + else + return false; + + } + + /** + * Reloads the content of the slice with the given ID from the local + * database. + * + * @param sliceId + * the slice ID + * @throws BackendException + */ + public void reInitSlice(SliceID sliceId) throws BackendException { + StorageManager sm = mngrMap.remove(sliceId); + if (sm != null) + sm.shutdown(); + } + + /** + * Discards all persistent and non-persistent state and invokes + * init. + * + * @throws BackendException + */ + public void reset() throws BackendException { + + try { + closeSliceDBs(); + FSUtils.delTree(new File(dbDir)); + init(); + + } catch (IOException exc) { + throw new BackendException(exc); + } catch (ClassNotFoundException exc) { + throw new BackendException(exc); + } + } + + /** + * Checks whether a volume with the given name is known locally. + * + * @param volumeName + * the volume name + * @return + */ + public boolean hasVolume(String volumeName) { + return volumesByName.containsKey(volumeName); + } + + /** + * Checks whether a volume with the given ID is known locally. + * + * @param volumeId + * the volume ID + * @return + */ + public boolean hasVolumeWithId(String volumeId) { + return volumesById.containsKey(volumeId); + } + + /** + * Returns the metadata for the volume with the given name, if such a volume + * exists locally. + * + * @param volumeName + * the volume name + * @return + */ + public VolumeInfo getVolumeByName(String volumeName) throws UserException { + + VolumeInfo volume = volumesByName.get(volumeName); + if (volume == null) + throw new UserException(ErrNo.ENOENT, "volume '" + volumeName + + "' not found on this MRC"); + + return volume; + } + + /** + * Returns the metadata for the volume with the given ID, if such a volume + * exists locally. + * + * @param volumeId + * the volume name + * @return + */ + public VolumeInfo getVolumeById(String volumeId) throws UserException { + + VolumeInfo volume = volumesById.get(volumeId); + if (volume == null) + throw new UserException(ErrNo.ENOENT, "volume with id " + volumeId + + " not found on this MRC"); + + return volume; + } + + /** + * Returns a list of all locally known volumes. + * + * @return a list of all locally known volumes + */ + public List getVolumes() { + return new ArrayList(volumesById.values()); + } + + /** + * Deletes a volume. + * + * @param volumeName + * the volume name + * @throws UserException + * @throws IOException + */ + public void deleteVolume(String volumeName) throws UserException, IOException, BackendException { + + VolumeInfo volume = volumesByName.get(volumeName); + if (volume == null) + throw new UserException(ErrNo.ENOENT, "volume '" + volumeName + + "' not found on this MRC"); + + for (SliceInfo slice : volume.getSlices()) { + + slicesById.get(slice.sliceID).setDeleted(); + try { + StorageManager sMan = getSliceDB(slice.sliceID, '*'); + mngrMap.remove(slice.sliceID); + sMan.cleanup(); + } catch (Exception exc) { + Logging.logMessage(Logging.LEVEL_WARN, this, "slice " + slice.sliceID + + " could not be cleaned up"); + } + } + + volumesByName.remove(volumeName); + volumesById.remove(volume.getId()); + } + + /** + * Removes a single slice from the slices index. This method can be invoked + * to dispose of a deleted slice in a deferred fashion, if slice information + * needs to be accessed after the deletion has taken place. + * + * @param sliceId + * the ID of the slice to delete + */ + public void removeSliceFromIndex(SliceID sliceId) { + slicesById.remove(sliceId); + } + + /** + * Returns the slice database that stores the given path. + * + * @param volumeId + * the volume ID + * @param relPath + * the path to the resource within the volume + * @param accessMode + * the internal access mode to the database + * @return + * @throws UserException + * @throws BackendException + */ + public StorageManager getSliceDB(String volumeId, String relPath, char accessMode) + throws UserException, BackendException { + + VolumeInfo volume = volumesById.get(volumeId); + if (volume == null) + throw new UserException("could not find volume '" + volumeId); + + SliceID sliceId = partMan.getSlice(volumeId, relPath); + + return getSliceDB(sliceId, accessMode); + } + + /** + * Returns the slice database that is responsible for the given file ID. + * + * @param volumeId + * the volume ID + * @param fileId + * the file ID + * @param accessMode + * the internal access mode to the database + * @return + * @throws UserException + * @throws BackendException + */ + public StorageManager getSliceDB(String volumeId, long fileId, char accessMode) + throws UserException, BackendException { + + VolumeInfo volume = volumesById.get(volumeId); + if (volume == null) + throw new UserException("could not find volume '" + volumeId); + + SliceID sliceId = partMan.getSlice(volumeId, fileId); + + return getSliceDB(sliceId, accessMode); + } + + /** + * Returns the slice database for the given slice ID. + * + * @param sliceId + * the slice ID + * @param accessMode + * the internal access mode to the database + * @return + * @throws UserException + * @throws BackendException + */ + public StorageManager getSliceDB(SliceID sliceId, char accessMode) throws UserException, + BackendException { + + StorageManager mngr = mngrMap.get(sliceId); + if (mngr != null) + return mngr; + + SliceInfo slice = slicesById.get(sliceId); + if (slice == null) + throw new UserException("could not find slice '" + sliceId); + + // check if the slice is accessible + if (accessMode != '*' && !canPerformOperation(slice, accessMode == 'w')) + throw new UserException("slice '" + sliceId + "' is " + + (accessMode == 'w' ? "read-only or " : "") + "currently unavailable"); + + mngr = new StorageManager(dbDir + "/" + sliceId, sliceId); + mngr.startup(); + + mngrMap.put(sliceId, mngr); + return mngr; + } + + /** + * + * @param volumeID + * @param fileID + * @return true, if the file with the given ID exists, false otherwise. + * @throws UserException + * - if volume does not exist + * @throws BackendException + * - if a backendError occur + */ + public boolean exists(String volumeID, String fileID) throws UserException, BackendException { + // check the volume - if not available throw UserException + VolumeInfo volume = volumesById.get(volumeID); + if (volume == null) + throw new UserException("could not find volume '" + volumeID); + + // get the sliceID - if not available return false. + SliceID sliceId = null; + + sliceId = partMan.getSlice(volumeID, Long.valueOf(fileID)); + + // check sliceID for info objects - if not available return false. + if (slicesById.get(sliceId) == null) + return false; + + // get the responsible StorageManager - if not available return false. + StorageManager mngr = mngrMap.get(sliceId); + if (mngr != null) + return mngr.exists(fileID); + else + return getSliceDB(sliceId, 'r').exists(fileID); + } + + /** + * Syncs all slice databases. + * + * @throws BackendException + */ + public void syncSliceDBs() throws BackendException { + for (StorageManager mngr : mngrMap.values()) + mngr.sync(); + } + + /** + * Closes all slice databases. + * + * @throws BackendException + */ + public void closeSliceDBs() throws BackendException { + for (StorageManager mngr : mngrMap.values()) + mngr.shutdown(); + } + + /** + * Adds a new listener that is notified in response to volume changes. + * + * @param listener + * @throws IOException + * @throws BackendException + */ + public void addVolumeChangeListener(VolumeChangeListener listener) throws IOException, + BackendException { + + vcListeners.add(listener); + + for (VolumeInfo vol : getVolumes()) + notifyVolumeChangeListeners(VolumeChangeListener.MOD_CHANGED, vol); + } + + public void notifyVolumeChangeListeners(int mod, VolumeInfo vol) throws IOException { + + try { + for (VolumeChangeListener listener : vcListeners) + listener.volumeChanged(mod, vol); + } catch (IOException ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + } + } + + /** + * Generates a new globally unique volume ID + * + * @return + * @throws SocketException + */ + public static String generateNewVolumeId() throws SocketException { + return new SliceID(1).getVolumeId(); + } + + public void dumpDB(String dumpFilePath) throws Exception { + + BufferedWriter xmlWriter = new BufferedWriter(new FileWriter(dumpFilePath)); + xmlWriter.write("\n"); + xmlWriter.write("\n"); + + for (VolumeInfo volume : volumesById.values()) { + xmlWriter.write("\n"); + + for (SliceInfo slice : volume.getSlices()) { + xmlWriter.write("\n"); + StorageManager sMan = getSliceDB(slice.sliceID, 'r'); + sMan.dumpDB(xmlWriter); + xmlWriter.write("\n"); + } + + xmlWriter.write("\n"); + } + + xmlWriter.write("\n"); + xmlWriter.close(); + } + + public void restoreDBFromDump(String dumpFilePath) throws Exception { + + // First, check if any volume exists already. If so, deny the operation + // for security reasons. + if (!volumesById.isEmpty()) + throw new Exception( + "Restoring from a dump is only possible on an MRC with no database. Please delete the existing MRC database on the server and restart the MRC!"); + + SAXParserFactory spf = SAXParserFactory.newInstance(); + SAXParser sp = spf.newSAXParser(); + sp.parse(new File(dumpFilePath), new DefaultHandler() { + + private StorageManager sMan; + + private RestoreState state; + + private int dbVersion = 1; + + public void startElement(String uri, String localName, String qName, + Attributes attributes) throws SAXException { + + try { + + if (qName.equals("volume")) { + String id = attributes.getValue(attributes.getIndex("id")); + String name = attributes.getValue(attributes.getIndex("name")); + long acPol = Long.parseLong(attributes.getValue(attributes + .getIndex("acPolicy"))); + long osdPol = Long.parseLong(attributes.getValue(attributes + .getIndex("osdPolicy"))); + long partPol = Long.parseLong(attributes.getValue(attributes + .getIndex("partPolicy"))); + String osdPolArgs = attributes.getIndex("osdPolicyArgs") == -1 ? null + : attributes.getValue(attributes.getIndex("osdPolicyArgs")); + + createVolume(id, name, acPol, osdPol, osdPolArgs, partPol, true, false); + } + + else if (qName.equals("slice")) { + SliceID id = new SliceID(attributes.getValue(attributes.getIndex("id"))); + + createSlice(new SliceInfo(id, null), false); + + sMan = getSliceDB(id, '*'); + state = new StorageManager.RestoreState(); + } + + else if (qName.equals("filesystem")) + try { + dbVersion = Integer.parseInt(attributes.getValue(attributes + .getIndex("dbversion"))); + } catch (Exception exc) { + Logging.logMessage(Logging.LEVEL_WARN, this, + "restoring database with invalid version number"); + } + + else + sMan.restoreDBFromDump(qName, attributes, state, true, dbVersion); + + } catch (Exception exc) { + Logging.logMessage(Logging.LEVEL_ERROR, this, + "could not restore DB from XML dump: " + exc); + } + } + + public void endElement(String uri, String localName, String qName) throws SAXException { + + try { + if (qName.equals("volume") || qName.equals("slice") + || qName.equals("filesystem")) + return; + + sMan.restoreDBFromDump(qName, null, state, false, dbVersion); + } catch (Exception exc) { + Logging.logMessage(Logging.LEVEL_ERROR, this, + "could not restore DB from XML dump"); + Logging.logMessage(Logging.LEVEL_ERROR, this, exc); + } + } + + public void endDocument() throws SAXException { + try { + compactDB(); + completeDBCompaction(); + } catch (Exception exc) { + Logging.logMessage(Logging.LEVEL_ERROR, this, + "could not restore DB from XML dump"); + Logging.logMessage(Logging.LEVEL_ERROR, this, exc); + } + } + + }); + } +} \ No newline at end of file diff --git a/servers/src/org/xtreemfs/mrc/slices/VolumeInfo.java b/servers/src/org/xtreemfs/mrc/slices/VolumeInfo.java new file mode 100644 index 0000000000000000000000000000000000000000..d122371f32cd42068c2b0118bdb1c1b44c98a0c2 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/slices/VolumeInfo.java @@ -0,0 +1,155 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.mrc.slices; + +import java.io.Serializable; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +import org.xtreemfs.mrc.brain.storage.SliceID; + +/** + * This class stores all immutable metadata about an XtreemFS volume. + * + * Volumes are the top-level structural elements of the file system. Each volume + * has its own directory tree. A volume has a globally unique name and id. + * + * In some cases, it might be required that a volume is not registered at the + * directory service upon creation or system restart. For this purpose, a flag + * 'registerAtDS' exists. At the moment, this feature is used in connection with + * master-slave replication for slave replicas of volumes. + * + * Moreover, a volume holds several immutable policies. The OSD policy + * determines which OSDs may by default be allocated to files. The access + * control policy defines the circumstances under which users are allowed to + * access the volume. The partitioning policy determines how a volume is split + * up into slices. + * + * @author stender + * + */ +public class VolumeInfo implements Serializable { + + private String id; + + private String name; + + private long osdPolicyId; + + private String osdPolicyArgs; + + private long partitioningPolicyId; + + private long acPolicyId; + + private boolean registerAtDS; + + private final Map slices; + + public VolumeInfo(String id, String name, long fileAccessPolicyId, long osdPolicyId, + long partitioningPolicyId, boolean registerAtDS) { + + this.id = id; + this.name = name; + this.acPolicyId = fileAccessPolicyId; + this.osdPolicyId = osdPolicyId; + this.partitioningPolicyId = partitioningPolicyId; + this.registerAtDS = registerAtDS; + this.slices = new HashMap(); + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public long getOsdPolicyId() { + return osdPolicyId; + } + + public void setOsdPolicyId(long osdPolicyID) { + this.osdPolicyId = osdPolicyID; + } + + public String getOsdPolicyArgs() { + return osdPolicyArgs; + } + + public void setOsdPolicyArgs(String osdPolicyArgs) { + this.osdPolicyArgs = osdPolicyArgs; + } + + public long getAcPolicyId() { + return acPolicyId; + } + + public void setAcPolicyId(long fileAccessPolicyId) { + this.acPolicyId = fileAccessPolicyId; + } + + public long getPartitioningPolicyId() { + return partitioningPolicyId; + } + + public void setPartitioningPolicyId(long partitioningPolicyId) { + this.partitioningPolicyId = partitioningPolicyId; + } + + public boolean isRegisterAtDS() { + return registerAtDS; + } + + public void setRegisterAtDS(boolean registerAtDSOnStartup) { + this.registerAtDS = registerAtDSOnStartup; + } + + public SliceInfo getSlice(SliceID sliceId) { + return slices.get(sliceId); + } + + public void setSlice(SliceID sliceId, SliceInfo slice) { + if (slice == null) + slices.remove(sliceId); + else + slices.put(sliceId, slice); + } + + public Collection getSlices() { + return slices.values(); + } + +} diff --git a/servers/src/org/xtreemfs/mrc/utils/Converter.java b/servers/src/org/xtreemfs/mrc/utils/Converter.java new file mode 100644 index 0000000000000000000000000000000000000000..efab042bebedc57e7d31c7416348790e40bed110 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/utils/Converter.java @@ -0,0 +1,347 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.mrc.utils; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import org.xtreemfs.mrc.brain.storage.entities.ACLEntry; +import org.xtreemfs.mrc.brain.storage.entities.AbstractFileEntity; +import org.xtreemfs.mrc.brain.storage.entities.DirEntity; +import org.xtreemfs.mrc.brain.storage.entities.FileAttributeEntity; +import org.xtreemfs.mrc.brain.storage.entities.FileEntity; +import org.xtreemfs.mrc.brain.storage.entities.StripingPolicy; +import org.xtreemfs.mrc.brain.storage.entities.XLocation; +import org.xtreemfs.mrc.brain.storage.entities.XLocationsList; + +/** + * Contains static methods for converting Java objects to JSON-compliant data + * structures and vice versa. + * + * @author stender + * + */ +public class Converter { + + /** + * Converts an ACLEntry array to a mapping: userID:String -> + * rights:Long. + * + * @param acl + * @return + */ + public static Map aclToMap(ACLEntry[] acl) { + + if (acl == null) + return null; + + Map aclMap = new HashMap(); + for (ACLEntry entry : acl) + aclMap.put(entry.getEntity(), entry.getRights()); + + return aclMap; + } + + /** + * Converts a mapping: userID:String -> rights:Long to an + * ACLEntry array sorted by userID. + * + * @param aclMap + * @return + */ + public static ACLEntry[] mapToACL(Map aclMap) { + + if (aclMap == null) + return null; + + ACLEntry[] acl = new ACLEntry[aclMap.size()]; + Iterator keys = aclMap.keySet().iterator(); + for (int i = 0; i < acl.length; i++) { + String userId = keys.next(); + acl[i] = new ACLEntry(userId, (Long) aclMap.get(userId)); + } + + Arrays.sort(acl, new Comparator() { + public int compare(ACLEntry o1, ACLEntry o2) { + return o1.getEntity().compareTo(o2.getEntity()); + } + }); + + return acl; + } + + /** + * Converts an XLocationsList object to a list containing + * X-Locations data. + * + * @param xLocList + * @return + */ + public static List xLocListToList(XLocationsList xLocList) { + + if (xLocList == null) + return null; + + List replicaList = new LinkedList(); + List list = new LinkedList(); + for (XLocation replica : xLocList.getReplicas()) { + + List replicaAsList = new ArrayList(2); + Map policyMap = stripingPolicyToMap(replica + .getStripingPolicy()); + List osdList = stringArrayToList(replica.getOsdList()); + + replicaAsList.add(policyMap); + replicaAsList.add(osdList); + + replicaList.add(replicaAsList); + } + + list.add(replicaList); + list.add(xLocList.getVersion()); + + return list; + } + + /** + * Converts a list containing X-Locations data to an + * XLocationsList object. + * + * @param xLocs + * @return + */ + public static XLocationsList listToXLocList(List list) { + + if (list == null) + return null; + + List xLocs = (List) list.get(0); + + XLocation[] xLocations = new XLocation[xLocs.size()]; + for (int i = 0; i < xLocs.size(); i++) { + + List replicaAsList = (List) xLocs.get(i); + Map policyMap = (Map) replicaAsList + .get(0); + List osdList = (List) replicaAsList.get(1); + + xLocations[i] = new XLocation(mapToStripingPolicy(policyMap), + osdList.toArray(new String[osdList.size()])); + } + + long version = (Long) list.get(1); + + return new XLocationsList(xLocations, version); + } + + /** + * Converts a map containing striping policy information to a + * StripingPolicy object. + * + * @param policyMap + * @return + */ + public static StripingPolicy mapToStripingPolicy( + Map policyMap) { + + if (policyMap == null || policyMap.isEmpty()) + return null; + + StripingPolicy policy = new StripingPolicy((String) policyMap + .get("policy"), (Long) policyMap.get("stripe-size"), + (Long) policyMap.get("width")); + + return policy; + } + + /** + * Converts a StripingPolicy object to a map containing + * striping policy information. + * + * @param policy + * @return + */ + public static Map stripingPolicyToMap(StripingPolicy policy) { + + if (policy == null) + return null; + + Map policyMap = new HashMap(); + policyMap.put("policy", policy.getPolicy()); + policyMap.put("stripe-size", policy.getStripeSize()); + policyMap.put("width", policy.getWidth()); + + return policyMap; + } + + /** + * Converts a String array to a list of Strings. + * + * @param array + * @return + */ + public static List stringArrayToList(String[] array) { + + if (array == null) + return null; + + List list = new ArrayList(array.length); + + for (String s : array) + list.add(s); + + return list; + } + + // /** + // * Converts an entire file tree to a list containing a hierarchically + // * organized representation of all files in the tree. + // * + // * @param sMan + // * @param source + // * @return + // * @throws BrainException + // */ + // public static List fileTreeToList(StorageManager sMan, + // AbstractFileEntity source) throws BrainException { + // + // try { + // + // List attrs = sMan.getAllAttributes(source + // .getId()); + // + // Map file = new HashMap(); + // file.put("atime", source.getAtime()); + // file.put("ctime", source.getCtime()); + // file.put("mtime", source.getMtime()); + // file.put("ownerId", source.getUserId()); + // file.put("groupId", source.getGroupId()); + // file.put("isDirectory", source.isDirectory()); + // file.put("acl", Converter.aclToMap(source.getAcl())); + // file.put("linkCount", source.getLinkCount()); + // + // if (!source.isDirectory()) { + // FileEntity tmp = (FileEntity) source; + // file.put("size", tmp.getSize()); + // file.put("xLocList", Converter.xLocListToList(tmp + // .getXLocationsList())); + // } + // + // List> attributes = new LinkedList>(); + // for (FileAttributeEntity attr : attrs) { + // Map map = new HashMap(); + // map.put("key", attr.getKey()); + // map + // .put( + // "value", + // attr.getValue() instanceof StripingPolicy ? + // stripingPolicyToMap((StripingPolicy) attr + // .getValue()) + // : attr.getValue()); + // map.put("type", attr.getType()); + // map.put("userId", attr.getUserId()); + // attributes.add(map); + // } + // + // List> subElements = new LinkedList>(); + // for (AbstractFileEntity child : + // sMan.getChildData(source.getId()).values()) + // subElements.add(fileTreeToList(sMan, child)); + // + // List node = new LinkedList(); + // node.add(file); + // node.add(attributes); + // node.add(subElements); + // + // return node; + // + // } catch (Exception exc) { + // throw new BrainException(exc); + // } + // } + + /** + * Converts a list of FileAttributeEntitys to a list + * containing maps storing file attribute information. + * + * @param mappedData + * @return + */ + public static List attrMapsToAttrList( + List> mappedData) { + + List list = new LinkedList(); + for (Map attr : mappedData) + list.add(new FileAttributeEntity((String) attr.get("key"), + attr.get("value"), (Long) attr.get("type"), 0, (String) attr + .get("userId"))); + + return list; + } + + /** + * Converts a map containing file or directory metadata to a file or + * directory entity. + * + * @param mappedData + * the mapped file or directory data + * @return a corresponding object that can be stored by the MRC backend + */ + public static AbstractFileEntity mapToFile(Map mappedData) { + + boolean isDirectory = (Boolean) mappedData.get("isDirectory"); + + ACLEntry[] acl = Converter.mapToACL((Map) mappedData + .get("acl")); + + if (isDirectory) + return new DirEntity(0, (String) mappedData.get("ownerId"), + (String) mappedData.get("groupId"), (Long) mappedData + .get("atime"), (Long) mappedData.get("ctime"), + (Long) mappedData.get("mtime"), acl, (Long) mappedData + .get("linkCount")); + else { + + XLocationsList xLocList = Converter + .listToXLocList((List) mappedData.get("xLocList")); + + return new FileEntity(0, (String) mappedData.get("ownerId"), + (String) mappedData.get("groupId"), (Long) mappedData + .get("atime"), (Long) mappedData.get("ctime"), + (Long) mappedData.get("mtime"), (Long) mappedData.get("size"), + xLocList, acl, (Long) mappedData.get("linkCount"), + (Long) mappedData.get("writeEpoch"), (Long) mappedData + .get("truncEpoch")); + } + } + +} diff --git a/servers/src/org/xtreemfs/mrc/utils/MessageUtils.java b/servers/src/org/xtreemfs/mrc/utils/MessageUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..baffc1bf36d06218c2edc2124996df22b9fc5588 --- /dev/null +++ b/servers/src/org/xtreemfs/mrc/utils/MessageUtils.java @@ -0,0 +1,179 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.mrc.utils; + +import java.net.InetSocketAddress; +import java.nio.CharBuffer; +import java.util.HashMap; +import java.util.Map; + +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.util.OutputUtils; +import org.xtreemfs.foundation.json.JSONCharBufferString; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.json.JSONString; +import org.xtreemfs.foundation.pinky.HTTPHeaders; +import org.xtreemfs.foundation.pinky.HTTPUtils; +import org.xtreemfs.foundation.pinky.HTTPUtils.DATA_TYPE; +import org.xtreemfs.mrc.MRCRequest; +import org.xtreemfs.mrc.brain.UserException; + +/** + * Routines for marshalling and unmarshalling JSON in request bodies. + * + * @author bjko + */ +public class MessageUtils { + + public static void marshallResponse(MRCRequest req, Object res) { + + assert (req != null); + + try { + ReusableBuffer bbuf = ReusableBuffer.wrap(JSONParser.writeJSON(res).getBytes( + HTTPUtils.ENC_UTF8)); + req.getPinkyRequest().setResponse(HTTPUtils.SC_OKAY, bbuf, HTTPUtils.DATA_TYPE.JSON); + + } catch (JSONException exc) { + marshallException(req, exc); + } + } + + public static void marshallResponse(MRCRequest req, Object res, HTTPHeaders additionalHeaders) { + + assert (req != null); + + try { + ReusableBuffer bbuf = ReusableBuffer.wrap(JSONParser.writeJSON(res).getBytes( + HTTPUtils.ENC_UTF8)); + req.getPinkyRequest() + .setResponse(HTTPUtils.SC_OKAY, bbuf, HTTPUtils.DATA_TYPE.JSON, + additionalHeaders); + + } catch (JSONException exc) { + marshallException(req, exc); + } + } + + public static void marshallException(MRCRequest req, Map excMap, + boolean userException) { + try { + + ReusableBuffer body = ReusableBuffer.wrap(JSONParser.writeJSON(excMap).getBytes( + HTTPUtils.ENC_UTF8)); + + req.getPinkyRequest().setResponse(userException ? HTTPUtils.SC_USER_EXCEPTION + : HTTPUtils.SC_SERVER_ERROR, body, HTTPUtils.DATA_TYPE.JSON); + } catch (JSONException ex) { + Logging.logMessage(Logging.LEVEL_DEBUG, null, "cannot marshall exception"); + Logging.logMessage(Logging.LEVEL_ERROR, null, ex); + req.getPinkyRequest().setResponse(HTTPUtils.SC_SERVER_ERROR); + } + } + + public static void marshallException(MRCRequest req, Throwable exc) { + + String stackTrace = null; + + // encapsulate the stack trace in a string, unless the exception is a + // user exception + if (!(exc instanceof UserException)) + stackTrace = OutputUtils.stackTraceToString(exc); + + Map excMap = new HashMap(); + excMap.put("exceptionName", exc.toString()); + excMap.put("errorMessage", exc.getMessage()); + excMap.put("stackTrace", stackTrace); + if (exc instanceof UserException) + excMap.put("errno", ((UserException) exc).getErrno()); + + marshallException(req, excMap, exc instanceof UserException); + } + + public static void setRedirect(MRCRequest req, String target) { + HTTPHeaders headers = new HTTPHeaders(); + headers.addHeader(HTTPHeaders.HDR_LOCATION); + req.getPinkyRequest().setResponse(HTTPUtils.SC_SEE_OTHER, null, DATA_TYPE.JSON, headers); + } + + public static Object unmarshallRequestOld(MRCRequest request) throws JSONException { + String body = null; + + assert (request != null); + assert (request.getPinkyRequest() != null); + + if (request.getPinkyRequest().requestBody != null) { + byte bdy[] = null; + if (request.getPinkyRequest().requestBody.hasArray()) { + bdy = request.getPinkyRequest().requestBody.array(); + } else { + bdy = new byte[request.getPinkyRequest().requestBody.capacity()]; + request.getPinkyRequest().requestBody.position(0); + request.getPinkyRequest().requestBody.get(bdy); + } + + body = new String(bdy, HTTPUtils.ENC_UTF8); + return JSONParser.parseJSON(new JSONString(body)); + } else { + return null; + } + + } + + public static Object unmarshallRequest(MRCRequest request) throws JSONException { + String body = null; + + assert (request != null); + assert (request.getPinkyRequest() != null); + + if (request.getPinkyRequest().requestBody != null) { + request.getPinkyRequest().requestBody.position(0); + CharBuffer utf8buf = HTTPUtils.ENC_UTF8.decode(request.getPinkyRequest().requestBody.getBuffer()); + return JSONParser.parseJSON(new JSONCharBufferString(utf8buf)); + } else { + return null; + } + + } + + public static InetSocketAddress addrFromString(String hostAndPort) + throws IllegalArgumentException { + int dpoint = hostAndPort.lastIndexOf(':'); + if (dpoint == -1) { + throw new IllegalArgumentException("InetSocketAddress as String needs a : character"); + } + String host = hostAndPort.substring(0, dpoint); + int port = 0; + try { + port = Integer.valueOf(hostAndPort.substring(dpoint + 1)); + } catch (NumberFormatException ex) { + throw new IllegalArgumentException("Port is not a number in " + hostAndPort); + } + return new InetSocketAddress(host, port); + } + +} diff --git a/servers/src/org/xtreemfs/osd/ErrorCodes.java b/servers/src/org/xtreemfs/osd/ErrorCodes.java new file mode 100644 index 0000000000000000000000000000000000000000..d5be15a0e860f1ca51ba7f67f901e0d761d39515 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/ErrorCodes.java @@ -0,0 +1,92 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.osd; + + +public final class ErrorCodes { + + /** + * the fileID is malformed or contains invalid characters + */ + public static final int INVALID_FILEID = 1; + + /** + * a header field is malformed or contains invalid values + */ + public static final int INVALID_HEADER = 2; + + /** + * the RPC request data is not valid + */ + public static final int INVALID_RPC = 3; + + /** + * HTTP or RPC method is not implemented + */ + public static final int METHOD_NOT_IMPLEMENTED = 4; + + /** + * the parameter count or type does not match for an RPC. + */ + public static final int INVALID_PARAMS = 5; + + /** + * this error code indicates that the server needs the full XLocation list + * instead of the XLocation version number only. + */ + public static final int NEED_FULL_XLOC = 10; + + /** + * the XLocation list sent by the client is outdated and not accepted. + */ + public static final int XLOC_OUTDATED = 11; + + /** + * this server is not part of the XLocation list. + */ + public static final int NOT_IN_XLOC = 12; + + /** + * authentication failed. + */ + public static final int AUTH_FAILED = 13; + + /** + * checksum of an object turned out to be invalid + */ + public static final int INVALID_CHECKSUM = 20; + + /** + * the client is not the owner of the lease. + */ + public static final int NOT_LEASE_OWNER = 30; + + /** + * The lease has timed out (i.e. the timeout sent in the X-Lease-Timeout + * header has passed). + */ + public static final int LEASE_TIMED_OUT = 31; + +} diff --git a/servers/src/org/xtreemfs/osd/ErrorRecord.java b/servers/src/org/xtreemfs/osd/ErrorRecord.java new file mode 100644 index 0000000000000000000000000000000000000000..71dcc2f9b0b6df058a441ed943b282350abbc609 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/ErrorRecord.java @@ -0,0 +1,118 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.osd; + +import org.xtreemfs.common.util.OutputUtils; + +/** + * Encapsuls all types of errors. + */ +public final class ErrorRecord { + + public enum ErrorClass { + /** + * any internal server error condition + */ + INTERNAL_SERVER_ERROR, + /** + * indicates a malformed request + */ + BAD_REQUEST, + /** + * a user exception for which a defined error code exists in the + * protocol + */ + USER_EXCEPTION, + /** + * redirect to another server + */ + REDIRECT + } + + /** + * error type + */ + private final ErrorClass errorClass; + + /** + * error code (as defined in the protocol) + */ + private final int errorCode; + + /** + * an error message, can include a stack trace + */ + private final String errorMessage; + + /** + * the throwable thrown in the service that caused the error + */ + private final Throwable throwable; + + public ErrorRecord(ErrorClass errorClass, String errorMessage) { + this(errorClass, 0, errorMessage, null); + } + + public ErrorRecord(ErrorClass errorClass, String errorMessage, Throwable throwable) { + this(errorClass, 0, errorMessage, throwable); + } + + public ErrorRecord(ErrorClass errorClass, int errorCode, String errorMessage) { + this(errorClass, errorCode, errorMessage, null); + } + + public ErrorRecord(ErrorClass errorClass, int errorCode, String errorMessage, + Throwable throwable) { + this.errorCode = errorCode; + this.errorMessage = errorMessage; + this.errorClass = errorClass; + this.throwable = throwable; + } + + public int getErrorCode() { + return errorCode; + } + + public String getErrorMessage() { + return errorMessage; + } + + public ErrorClass getErrorClass() { + return errorClass; + } + + public String toString() { + + String stackTrace = OutputUtils.stackTraceToString(throwable); + return this.errorClass + "." + this.errorCode + ":" + this.errorMessage + + (stackTrace == null ? "" : ", caused by: " + stackTrace); + } + + public String toJSON() { + return "[ \"error-code\": " + this.errorCode + ", \"error-message\" : \"" + + (this.errorMessage.replace("\"", "\\\"")) + "\" ]"; + } + +} diff --git a/servers/src/org/xtreemfs/osd/LocationsCache.java b/servers/src/org/xtreemfs/osd/LocationsCache.java new file mode 100644 index 0000000000000000000000000000000000000000..c1c8d8739d595901cd7088fa51120f9afaf0591b --- /dev/null +++ b/servers/src/org/xtreemfs/osd/LocationsCache.java @@ -0,0 +1,88 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin, + Barcelona Supercomputing Center - Centro Nacional de Supercomputacion + and Consiglio Nazionale delle Ricerche. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB), Jesús Malo (BSC) + * Eugenio Cesario (CNR) + */ + +package org.xtreemfs.osd; + +import java.util.Map; +import org.xtreemfs.common.LRUCache; +import org.xtreemfs.common.striping.Locations; + +/** + * This class implements a cache for Locations + * + * @author jmalo + */ +public class LocationsCache { + + private Map cache; + private final int maximumSize; + + /** + * Creates a new instance of LocationsCache + * @param size Maximum number of entries to store + */ + public LocationsCache(int size) { + maximumSize = size; + cache = new LRUCache(maximumSize); + } + + /** + * It gets the existing version number in cache of the locations of a file + * @param fileId File to look for inside the cache + * @return The version number of the stored locations or 0 if the locations are not cached. + */ + public long getVersion(String fileId) { + Locations loc = cache.get(fileId); + return (loc != null)?loc.getVersion():0; + } + + /** + * It updates the existing entry of a file with a new locations + * @param fileId File refered by the locations + * @param updatedLoc New locations for the file + */ + public void update(String fileId, Locations updatedLoc) { + cache.put(fileId, updatedLoc); + } + + /* + * It gets the existing cached Locations of a file + * @param fileId File referred to the requested Locations + * @return The existing cached Locations or null if there isn't any Locations related to fileId + */ + public Locations getLocations(String fileId) { + return cache.get(fileId); + } + + public void removeLocations(String fileId) { + cache.remove(fileId); + } + + + + +} diff --git a/servers/src/org/xtreemfs/osd/OSD.java b/servers/src/org/xtreemfs/osd/OSD.java new file mode 100644 index 0000000000000000000000000000000000000000..99db42a83a7e7a3de427d6684123af7111336cd0 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/OSD.java @@ -0,0 +1,113 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.osd; + +import org.xtreemfs.common.logging.Logging; + +public class OSD { + + private OSDRequestDispatcher dispatcher; + + /** + * Creates a new instance of Main + */ + public OSD(OSDConfig config) { + + Logging + .logMessage(Logging.LEVEL_INFO, null, "JAVA_HOME=" + + System.getProperty("java.home")); + Logging.logMessage(Logging.LEVEL_INFO, null, "UUID: " + config.getUUID()); + + try { + // FIXME: pass UUID + useDIR + dispatcher = new OSDRequestDispatcher(config); + dispatcher.start(); + + final OSDRequestDispatcher ctrl = dispatcher; + + Runtime.getRuntime().addShutdownHook(new Thread() { + @Override + public void run() { + try { + + Logging.logMessage(Logging.LEVEL_INFO, this, "received shutdown signal!"); + + ctrl.heartbeatThread.shutdown(); + // FIXME: provide a solution that does not attempt to + // shut down an OSD that is already being shut down due + // to an error + // ctrl.shutdown(); + + Logging.logMessage(Logging.LEVEL_INFO, this, "OSD shutdown complete"); + + } catch (Exception ex) { + ex.printStackTrace(); + } + } + }); + + } catch (Exception ex) { + + Logging.logMessage(Logging.LEVEL_DEBUG, null, + "System could not start up due to an exception. Aborted."); + Logging.logMessage(Logging.LEVEL_ERROR, null, ex); + + if (dispatcher != null) + try { + dispatcher.shutdown(); + } catch (Exception e) { + Logging.logMessage(Logging.LEVEL_ERROR, config.getUUID(), + "could not shutdown MRC: "); + Logging.logMessage(Logging.LEVEL_ERROR, config.getUUID(), e); + } + } + } + + public void shutdown() { + dispatcher.shutdown(); + } + + public OSDRequestDispatcher getDispatcher() { + return dispatcher; + } + + /** + * Main routine + * + * @param args + * the command line arguments + */ + public static void main(String[] args) throws Exception { + + Thread.currentThread().setName("OSD"); + + String cfgFile = (args.length > 0) ? args[0] : "../config/osdconfig.properties"; + OSDConfig config = new OSDConfig(cfgFile); + + Logging.start(config.getDebugLevel()); + new OSD(config); + }; + +} diff --git a/servers/src/org/xtreemfs/osd/OSDConfig.java b/servers/src/org/xtreemfs/osd/OSDConfig.java new file mode 100644 index 0000000000000000000000000000000000000000..5e43d57faa044f1735013f2b1aa25380e4366fb5 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/OSDConfig.java @@ -0,0 +1,165 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin, + Barcelona Supercomputing Center - Centro Nacional de Supercomputacion + and Consiglio Nazionale delle Ricerche. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB), Jesús Malo (BSC) + * Eugenio Cesario (CNR) + */ + +package org.xtreemfs.osd; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.Properties; + +import org.xtreemfs.common.config.ServiceConfig; +import org.xtreemfs.common.uuids.ServiceUUID; + +/** + * + * @author bjko + */ +public class OSDConfig extends ServiceConfig { + + public static final int CHECKSUM_NONE = 0; + + public static final int CHECKSUM_ADLER32 = 1; + + public static final int CHECKSUM_CRC32 = 2; + + private InetSocketAddress directoryService; + + private ServiceUUID uuid; + + private int localClockRenew; + + private int remoteTimeSync; + + private String objDir; + + private boolean reportFreeSpace; + + private boolean basicStatsEnabled; + + private boolean measureRqsEnabled; + + private boolean useChecksums; + + private String checksumProvider; + + private String capabilitySecret; + + /** Creates a new instance of OSDConfig */ + public OSDConfig(String filename) throws IOException { + super(filename); + read(); + } + + public OSDConfig(Properties prop) throws IOException { + super(prop); + read(); + } + + public void read() throws IOException { + super.read(); + + this.directoryService = this.readRequiredInetAddr("dir_service.host", "dir_service.port"); + + this.objDir = this.readRequiredString("object_dir"); + + this.localClockRenew = this.readRequiredInt("local_clock_renewal"); + + this.remoteTimeSync = this.readRequiredInt("remote_time_sync"); + + this.uuid = new ServiceUUID(this.readRequiredString("uuid")); + + this.reportFreeSpace = this.readRequiredBoolean("report_free_space"); + + this.setMeasureRqsEnabled(this.readOptionalBoolean("measure_requests", false)); + + this.setBasicStatsEnabled(this.readOptionalBoolean("basic_statistics", false)); + + this.useChecksums = this.readRequiredBoolean("checksums.enabled"); + + this.checksumProvider = useChecksums ? this.readOptionalString("checksums.algorithm", null) + : null; + + this.capabilitySecret = this.readRequiredString("capability_secret"); + } + + public InetSocketAddress getDirectoryService() { + return directoryService; + } + + public String getObjDir() { + return objDir; + } + + public int getLocalClockRenew() { + return localClockRenew; + } + + public int getRemoteTimeSync() { + return remoteTimeSync; + } + + public ServiceUUID getUUID() { + return uuid; + } + + public boolean isReportFreeSpace() { + return reportFreeSpace; + } + + public void setReportFreeSpace(boolean reportFreeSpace) { + this.reportFreeSpace = reportFreeSpace; + } + + public boolean isBasicStatsEnabled() { + return basicStatsEnabled; + } + + public void setBasicStatsEnabled(boolean basicStatsEnabled) { + this.basicStatsEnabled = basicStatsEnabled; + } + + public boolean isMeasureRqsEnabled() { + return measureRqsEnabled; + } + + public void setMeasureRqsEnabled(boolean measureRqsEnabled) { + this.measureRqsEnabled = measureRqsEnabled; + } + + public String getChecksumProvider() { + return checksumProvider; + } + + public boolean isUseChecksums() { + return useChecksums; + } + + public String getCapabilitySecret() { + return capabilitySecret; + } + +} diff --git a/servers/src/org/xtreemfs/osd/OSDException.java b/servers/src/org/xtreemfs/osd/OSDException.java new file mode 100644 index 0000000000000000000000000000000000000000..195b7d719b6ed1c0b28a41cbfdc29633a5556fe5 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/OSDException.java @@ -0,0 +1,53 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.osd; + +import java.io.IOException; + +import org.xtreemfs.osd.ErrorRecord.ErrorClass; + +/** + * Encapsulates information about an error that occurs while processing a + * request in the OSD. + */ +public class OSDException extends IOException { + + private ErrorRecord err; + + public OSDException(ErrorClass errorClass, String errorMessage) { + super(errorMessage); + err = new ErrorRecord(errorClass, errorMessage); + } + + public OSDException(ErrorClass errorClass, int errorCode, String errorMessage) { + super(errorMessage); + err = new ErrorRecord(errorClass, errorCode, errorMessage); + } + + public ErrorRecord getErrorRecord() { + return err; + } + +} diff --git a/servers/src/org/xtreemfs/osd/OSDRequest.java b/servers/src/org/xtreemfs/osd/OSDRequest.java new file mode 100644 index 0000000000000000000000000000000000000000..e0bf482ba69c58bcbe15686ec0c0fb73cd39f5ce --- /dev/null +++ b/servers/src/org/xtreemfs/osd/OSDRequest.java @@ -0,0 +1,186 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.osd; + +import org.xtreemfs.common.Request; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.foundation.pinky.HTTPUtils; +import org.xtreemfs.foundation.speedy.SpeedyRequest; +import org.xtreemfs.osd.ops.Operation; +import org.xtreemfs.osd.stages.StageCallbackInterface; + +/** + * Request object. + * + * @author bjko + */ +public final class OSDRequest extends Request { + + /** + * Request type + */ + public enum Type { + READ, WRITE, DELETE, RPC, INTERNAL_EVENT, STATUS_PAGE + }; + + /** + * Request operation which contains state machine. + */ + private Operation operation; + + /** + * The callback, which is registered for the actual state of the operation. + */ + private StageCallbackInterface currentCallback; + + /** + * request data (e.g. object data) + */ + private ReusableBuffer data; + + /** + * Data content type (text/binary) + */ + private HTTPUtils.DATA_TYPE dataType; + + /** + * current state of the request (see operation) + */ + private int state; + + /** + * request type + */ + private Type type; + + /** + * request details (e.g. for an OSD) + */ + private final RequestDetails details; + + /** + * original osdRequest (used for "suboperations") + */ + private OSDRequest originalOsdRequest; + + /** + * list of sub-requests sent via Speedy. + */ + private SpeedyRequest[] httpRequests; + + public OSDRequest(long requestId) { + super(null); + this.requestId = requestId; + details = new RequestDetails(); + } + + public RequestDetails getDetails() { + return details; + } + + public int getState() { + return state; + } + + public void setState(int state) { + this.state = state; + } + + public Type getType() { + return type; + } + + public void setType(Type type) { + this.type = type; + } + + public Operation getOperation() { + return operation; + } + + public void setOperation(Operation operation) { + this.operation = operation; + } + + public ReusableBuffer getData() { + return data; + } + + public HTTPUtils.DATA_TYPE getDataType() { + return this.dataType; + } + + public void setData(ReusableBuffer data, HTTPUtils.DATA_TYPE dataType) { + this.data = data; + this.dataType = dataType; + } + + public SpeedyRequest[] getHttpRequests() { + return httpRequests; + } + + public void setHttpRequests(SpeedyRequest[] httpRequests) { + this.httpRequests = httpRequests; + } + + public String toString() { + + return getClass().getCanonicalName() + + " #" + + this.requestId + + "\n" + + "Operation " + + ((this.operation == null) ? "not yet parsed" : this.operation + .getClass().getCanonicalName()) + + "\n" + + "state " + + this.state + + "\n" + + "error " + + this.error + + "\n" + + "data " + + ((data == null) ? "null" : data.limit() + " bytes / " + + dataType); + + } + + public StageCallbackInterface getCurrentCallback() { + return this.currentCallback; + } + + public void setCurrentCallback(StageCallbackInterface actualCallback) { + this.currentCallback = actualCallback; + } + + public OSDRequest getOriginalOsdRequest() { + return this.originalOsdRequest; + } + + public void setOriginalOsdRequest(OSDRequest osdRequest) { + this.originalOsdRequest = osdRequest; + } + +} diff --git a/servers/src/org/xtreemfs/osd/OSDRequestDispatcher.java b/servers/src/org/xtreemfs/osd/OSDRequestDispatcher.java new file mode 100644 index 0000000000000000000000000000000000000000..022d1517cc39e073db050aae1b5fbfce8d59ecbd --- /dev/null +++ b/servers/src/org/xtreemfs/osd/OSDRequestDispatcher.java @@ -0,0 +1,543 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.osd; + +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.lang.management.OperatingSystemMXBean; +import java.net.InetSocketAddress; +import java.util.HashMap; +import java.util.Map; + +import org.xtreemfs.common.HeartbeatThread; +import org.xtreemfs.common.Request; +import org.xtreemfs.common.TimeSync; +import org.xtreemfs.common.VersionManagement; +import org.xtreemfs.common.HeartbeatThread.ServiceDataGenerator; +import org.xtreemfs.common.auth.NullAuthProvider; +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.checksums.ChecksumFactory; +import org.xtreemfs.common.checksums.provider.JavaChecksumProvider; +import org.xtreemfs.common.clients.RPCClient; +import org.xtreemfs.common.clients.dir.DIRClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.striping.Location; +import org.xtreemfs.common.trace.Tracer; +import org.xtreemfs.common.util.FSUtils; +import org.xtreemfs.common.uuids.ServiceUUID; +import org.xtreemfs.common.uuids.UUIDResolver; +import org.xtreemfs.foundation.LifeCycleListener; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.pinky.HTTPHeaders; +import org.xtreemfs.foundation.pinky.HTTPUtils; +import org.xtreemfs.foundation.pinky.PinkyRequest; +import org.xtreemfs.foundation.pinky.PinkyRequestListener; +import org.xtreemfs.foundation.pinky.PipelinedPinky; +import org.xtreemfs.foundation.pinky.SSLOptions; +import org.xtreemfs.foundation.speedy.MultiSpeedy; +import org.xtreemfs.foundation.speedy.SpeedyRequest; +import org.xtreemfs.osd.ops.AcquireLease; +import org.xtreemfs.osd.ops.CheckObjectRPC; +import org.xtreemfs.osd.ops.CleanUpOperation; +import org.xtreemfs.osd.ops.CloseFileEvent; +import org.xtreemfs.osd.ops.DeleteLocalRPC; +import org.xtreemfs.osd.ops.DeleteOFTRPC; +import org.xtreemfs.osd.ops.DeleteOperation; +import org.xtreemfs.osd.ops.FetchGmaxRPC; +import org.xtreemfs.osd.ops.GetProtocolVersionOperation; +import org.xtreemfs.osd.ops.GetStatistics; +import org.xtreemfs.osd.ops.GmaxEvent; +import org.xtreemfs.osd.ops.Operation; +import org.xtreemfs.osd.ops.ReadOperation; +import org.xtreemfs.osd.ops.ReturnLease; +import org.xtreemfs.osd.ops.ShutdownOperation; +import org.xtreemfs.osd.ops.StatisticsConfig; +import org.xtreemfs.osd.ops.StatusPageOperation; +import org.xtreemfs.osd.ops.TruncateLocalRPC; +import org.xtreemfs.osd.ops.TruncateRPC; +import org.xtreemfs.osd.ops.WriteOperation; +import org.xtreemfs.osd.ops.FetchAndWriteReplica; +import org.xtreemfs.osd.stages.AuthenticationStage; +import org.xtreemfs.osd.stages.DeletionStage; +import org.xtreemfs.osd.stages.ParserStage; +import org.xtreemfs.osd.stages.ReplicationStage; +import org.xtreemfs.osd.stages.Stage; +import org.xtreemfs.osd.stages.StageStatistics; +import org.xtreemfs.osd.stages.StatisticsStage; +import org.xtreemfs.osd.stages.StorageStage; +import org.xtreemfs.osd.storage.HashStorageLayout; +import org.xtreemfs.osd.storage.MetadataCache; +import org.xtreemfs.osd.storage.StorageLayout; +import org.xtreemfs.osd.storage.Striping; + +public class OSDRequestDispatcher implements RequestDispatcher, PinkyRequestListener, + LifeCycleListener, UDPReceiverInterface { + + protected final Stage[] stages; + + protected final Operation[] operations; + + protected final UDPCommunicator udpCom; + + protected final PipelinedPinky pinky; + + protected final MultiSpeedy speedy; + + protected final HeartbeatThread heartbeatThread; + + protected final OSDConfig config; + + protected final StageStatistics statistics; + + protected final DIRClient dirClient; + + protected long requestId; + + protected String authString; + + public OSDRequestDispatcher(OSDConfig config) throws IOException, JSONException { + + this.config = config; + assert (config.getUUID() != null); + + if (Tracer.COLLECT_TRACES) { + Tracer.initialize("/tmp/OSD_" + config.getUUID() + ".trace"); + } + + // generate an authorization string for Directory Service operations + authString = NullAuthProvider.createAuthString(config.getUUID().toString(), config + .getUUID().toString()); + + // initialize the checksum factory + ChecksumFactory.getInstance().addProvider(new JavaChecksumProvider()); + + // --------------------- + // initialize operations + // --------------------- + + // IMPORTANT: the order of operations defined in + // 'RequestDispatcher.Operations' has to be preserved! + operations = new Operation[] { new ReadOperation(this), new WriteOperation(this), + new StatusPageOperation(this), new FetchGmaxRPC(this), new TruncateRPC(this), + new TruncateLocalRPC(this), new DeleteOperation(this), new DeleteOFTRPC(this), + new DeleteLocalRPC(this), new GetProtocolVersionOperation(this), + new ShutdownOperation(this), new CheckObjectRPC(this), new GmaxEvent(this), + new CloseFileEvent(this), new GetStatistics(this), new StatisticsConfig(this), + new AcquireLease(this), new ReturnLease(this), new CleanUpOperation(this), + new FetchAndWriteReplica(this) }; + + // ------------------------------- + // initialize communication stages + // ------------------------------- + + pinky = config.isUsingSSL() ? new PipelinedPinky(config.getPort(), config.getAddress(), + this, new SSLOptions(config.getServiceCredsFile(), config.getServiceCredsPassphrase(), + config.getServiceCredsContainer(), config.getTrustedCertsFile(), config + .getTrustedCertsPassphrase(), config.getTrustedCertsContainer(), false)) + : new PipelinedPinky(config.getPort(), config.getAddress(), this); + pinky.setLifeCycleListener(this); + + speedy = config.isUsingSSL() ? new MultiSpeedy(new SSLOptions(config + .getServiceCredsFile(), config.getServiceCredsPassphrase(), config + .getServiceCredsContainer(), config.getTrustedCertsFile(), config + .getTrustedCertsPassphrase(), config.getTrustedCertsContainer(), false)) + : new MultiSpeedy(); + speedy.setLifeCycleListener(this); + + udpCom = new UDPCommunicator(config.getPort(), this); + udpCom.setLifeCycleListener(this); + + // -------------------------- + // initialize internal stages + // -------------------------- + + MetadataCache metadataCache = new MetadataCache(); + StorageLayout storageLayout = new HashStorageLayout(config, metadataCache); + + // TODO: use UUID resolution instead + Striping striping = new Striping(config.getUUID(), metadataCache); + + statistics = new StageStatistics(); + + // IMPORTANT: the order of stages defined in 'RequestDispatcher.Stages' + // has to be preserved! + stages = new Stage[] { new ParserStage(this), new AuthenticationStage(this), + new StorageStage(this, striping, metadataCache, storageLayout, 1), + new DeletionStage(this, striping, metadataCache, storageLayout), + new StatisticsStage(this, statistics, 60 * 10), new ReplicationStage(this) }; + + for (Stage stage : stages) + stage.setLifeCycleListener(this); + + // ---------------------------------------- + // initialize TimeSync and Heartbeat thread + // ---------------------------------------- + + dirClient = new DIRClient(speedy, config.getDirectoryService()); + + TimeSync.initialize(dirClient, config.getRemoteTimeSync(), config.getLocalClockRenew(), + authString); + UUIDResolver.start(dirClient, 10 * 1000, 600 * 1000); + UUIDResolver.addLocalMapping(config.getUUID(), config.getPort(), config.isUsingSSL()); + + ServiceDataGenerator gen = new ServiceDataGenerator() { + public Map> getServiceData() { + + OSDConfig config = OSDRequestDispatcher.this.config; + String freeSpace = "0"; + + if (config.isReportFreeSpace()) { + freeSpace = String.valueOf(FSUtils.getFreeSpace(config.getObjDir())); + } + + String totalSpace = "-1"; + + try { + File f = new File(config.getObjDir()); + totalSpace = String.valueOf(f.getTotalSpace()); + } catch (Exception ex) { + } + + OperatingSystemMXBean osb = ManagementFactory.getOperatingSystemMXBean(); + String load = String.valueOf((int) (osb.getSystemLoadAverage() * 100 / osb + .getAvailableProcessors())); + + long totalRAM = Runtime.getRuntime().maxMemory(); + long usedRAM = Runtime.getRuntime().totalMemory() + - Runtime.getRuntime().freeMemory(); + + Map> data = new HashMap>(); + data.put(config.getUUID().toString(), RPCClient.generateMap("type", "OSD", "free", + freeSpace, "total", totalSpace, "load", load, "prot_versions", + VersionManagement.getSupportedProtVersAsString(), "totalRAM", Long + .toString(totalRAM), "usedRAM", Long.toString(usedRAM), + "geoCoordinates", config.getGeoCoordinates())); + return data; + } + }; + heartbeatThread = new HeartbeatThread("OSD HB Thr", dirClient, config.getUUID(), gen, + authString,config); + + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, "OSD at " + this.config.getUUID() + + " ready"); + } + + public void start() { + + try { + + pinky.start(); + speedy.start(); + udpCom.start(); + + pinky.waitForStartup(); + speedy.waitForStartup(); + udpCom.waitForStartup(); + + TimeSync.initialize(new DIRClient(speedy, new InetSocketAddress("localhost", 32638)), + 60000, 50, authString); + + for (Stage stage : stages) + stage.start(); + + for (Stage stage : stages) + stage.waitForStartup(); + + heartbeatThread.start(); + heartbeatThread.waitForStartup(); + + Logging.logMessage(Logging.LEVEL_INFO, this, + "RequestController and all services operational"); + + } catch (Exception ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, "startup failed"); + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + System.exit(1); + } + + } + + public void shutdown() { + + try { + + // create status page snapshot for debugging purposes + try { + String statusPageSnapshot = ((StatusPageOperation) getOperation(Operations.STATUS_PAGE)) + .printStatusPage(); + BufferedWriter writer = new BufferedWriter(new FileWriter(config.getObjDir() + + "/.status.html")); + writer.write(statusPageSnapshot); + writer.close(); + } catch (Exception exc) { + // ignore + } + + heartbeatThread.shutdown(); + heartbeatThread.waitForShutdown(); + + UUIDResolver.shutdown(); + + pinky.shutdown(); + speedy.shutdown(); + udpCom.shutdown(); + + for (Stage stage : stages) + stage.shutdown(); + + pinky.waitForShutdown(); + try { + speedy.waitForShutdown(); + } catch (Exception exc) { + // FIXME: workaround to protect the system from crashing if an + // error occurs during speedy shutdown. A proper better error + // handling is the better solution here! + Logging.logMessage(Logging.LEVEL_ERROR, this, exc); + } + udpCom.waitForShutdown(); + + for (Stage stage : stages) + stage.waitForShutdown(); + + Logging.logMessage(Logging.LEVEL_INFO, this, "OSD and all stages terminated"); + + } catch (Exception ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, "shutdown failed"); + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + } + } + + public void receiveRequest(PinkyRequest theRequest) { + + /* + * if (Logging.tracingEnabled()) Logging.logMessage(Logging.LEVEL_DEBUG, + * this, "received request: " + theRequest.requestMethod + " #" + + * requestId); + */ + if (Tracer.COLLECT_TRACES) { + Tracer.trace(theRequest.requestHeaders.getHeader(HTTPHeaders.HDR_XREQUESTID), + requestId, Tracer.TraceEvent.RECEIVED, null, null); + } + + OSDRequest rq = new OSDRequest(requestId++); + rq.setPinkyRequest(theRequest); + + if (Logging.tracingEnabled()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, "proessing " + rq); + + stages[RequestDispatcher.Stages.PARSER.ordinal()].enqueueOperation(rq, + ParserStage.STAGEOP_PARSE, null); + } + + public void sendSpeedyRequest(Request originalRequest, SpeedyRequest speedyRq, + InetSocketAddress server) throws IOException { + + speedyRq.setOriginalRequest(originalRequest); + speedy.sendRequest(speedyRq, server); + + } + + public void requestFinished(OSDRequest rq) { + + assert (rq != null); + + final PinkyRequest pr = rq.getPinkyRequest(); + assert (pr != null); + + if (!pr.responseSet) { + if (rq.getError() == null) { + + HTTPHeaders headers = new HTTPHeaders(); + + String fsUpdate = rq.getDetails().getNewFSandEpoch(); + if (fsUpdate != null) + headers.addHeader(HTTPHeaders.HDR_XNEWFILESIZE, fsUpdate); + + // if the checksum was invalid when reading data, return the + // data but add a header indicating a wrong checksum to the + // client + if (rq.getDetails().isInvalidChecksum()) + headers.addHeader(HTTPHeaders.HDR_XINVALIDCHECKSUM, "true"); + + final String rqId = rq.getDetails().getRequestId(); + if (rqId != null) + headers.addHeader(HTTPHeaders.HDR_XREQUESTID, rqId); + + if (rq.getData() != null) { + + String mimeType = null; + + switch (rq.getDataType()) { + case BINARY: + mimeType = HTTPUtils.BIN_TYPE; + break; + case JSON: + mimeType = HTTPUtils.JSON_TYPE; + break; + case HTML: + mimeType = HTTPUtils.HTML_TYPE; + break; + } + + headers.addHeader(HTTPHeaders.HDR_CONTENT_TYPE, mimeType); + } + + if (Tracer.COLLECT_TRACES) + Tracer.trace(rqId, rq.getRequestId(), Tracer.TraceEvent.RESPONSE_SENT, null, + null); + + pr.setResponse(HTTPUtils.SC_OKAY, rq.getData(), HTTPUtils.DATA_TYPE.JSON, headers); + + } else { + + final ErrorRecord error = rq.getError(); + switch (error.getErrorClass()) { + case INTERNAL_SERVER_ERROR: { + pr.setResponse(HTTPUtils.SC_SERVER_ERROR, error.getErrorMessage() + "\n\n"); + break; + } + case USER_EXCEPTION: { + pr.setResponse(HTTPUtils.SC_USER_EXCEPTION, error.toJSON()); + break; + } + case REDIRECT: { + pr.setResponse(HTTPUtils.SC_SEE_OTHER, error.getErrorMessage()); + break; + } + default: { + pr.setResponse(HTTPUtils.SC_SERVER_ERROR, + "an unknown error type was returned: " + error); + break; + } + } + + if (rq.getData() != null) + BufferPool.free(rq.getData()); + + if (Tracer.COLLECT_TRACES) + Tracer.trace(rq.getDetails().getRequestId(), rq.getRequestId(), + Tracer.TraceEvent.ERROR_SENT, null, error.getErrorClass().toString()); + } + } + pinky.sendResponse(pr); + + } + + public OSDConfig getConfig() { + return config; + } + + public int getPinkyCons() { + return pinky.getNumConnections(); + } + + public int getPinkyQueueLength() { + return pinky.getTotalQLength(); + } + + public Stage getStage(RequestDispatcher.Stages stage) { + return stages[stage.ordinal()]; + } + + public Operation getOperation(RequestDispatcher.Operations opCode) { + return operations[opCode.ordinal()]; + } + + public StageStatistics getStatistics() { + return statistics; + } + + public DIRClient getDIRClient() { + return dirClient; + } + + public void startupPerformed() { + + } + + public void shutdownPerformed() { + + } + + public void crashPerformed() { + Logging.logMessage(Logging.LEVEL_ERROR, this, + "a component crashed... shutting down system!"); + this.shutdown(); + } + + public void sendUDP(ReusableBuffer data, InetSocketAddress receiver) { + udpCom.send(data, receiver); + } + + public void receiveUDP(ReusableBuffer data, InetSocketAddress sender) { + data.position(0); + + int type = (int) data.get(); + + if (type == UDPMessageType.Striping.ordinal()) { + // globalmax info for the storage stage + final Operation gMaxEvent = getOperation(RequestDispatcher.Operations.GMAX); + OSDRequest rq = new OSDRequest(-1); + rq.setData(data, HTTPUtils.DATA_TYPE.BINARY); + rq.setOperation(gMaxEvent); + gMaxEvent.startRequest(rq); + + } else if (type == UDPMessageType.MPXLN.ordinal()) { + // ignore for now! + } else { + Logging.logMessage(Logging.LEVEL_DEBUG, this, "unknown UDP message type!"); + } + } + + /** + * Checks if the local OSD is the head OSD in one of the given X-Locations + * list. + * + * @param xloc + * the X-Locations list + * @return true, if the local OSD is the head OSD of the + * given X-Locations list; false, otherwise + */ + public boolean isHeadOSD(Location xloc) { + final ServiceUUID headOSD = xloc.getOSDs().get(0); + return config.getUUID().equals(headOSD); + } + + public long getFreeSpace() { + return FSUtils.getFreeSpace(config.getObjDir()); + } + + public long getTotalSpace() { + File f = new File(config.getObjDir()); + long s = f.getTotalSpace(); + return s; + } + +} diff --git a/servers/src/org/xtreemfs/osd/OpenFileTable.java b/servers/src/org/xtreemfs/osd/OpenFileTable.java new file mode 100644 index 0000000000000000000000000000000000000000..0f413dfc74401a597c047c1c8ed3152e6cc0b115 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/OpenFileTable.java @@ -0,0 +1,311 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin, + Barcelona Supercomputing Center - Centro Nacional de Supercomputacion + and Consiglio Nazionale delle Ricerche. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB), Jesús Malo (BSC) + * Eugenio Cesario (CNR) + */ + +package org.xtreemfs.osd; + +import org.xtreemfs.common.ClientLease; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.PriorityQueue; + +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.osd.storage.CowPolicy; + +/** + * This class models an OpenFileTable, storing the set of files in an 'open' + * state; it makes available a 'clean' method that cleans the table by deleting + * entries whose expiration time is expired + * + * @author Eugenio Cesario + */ +public final class OpenFileTable { + + private HashMap openFiles; + + private PriorityQueue expTimes; + + // constructor + public OpenFileTable() { + openFiles = new HashMap(); + expTimes = new PriorityQueue(); + } + + /** + * Insert a new entry in the table + * + * @param fId + * fileId + * @param expTime + * expiration time + */ + public CowPolicy refresh(String fId, long expTime) { + OpenFileTableEntry currEntry = openFiles.get(fId); + + if (currEntry != null) { + Logging.logMessage(Logging.LEVEL_DEBUG, this, "refreshing for " + + fId); + // 'currEntry' isn't a new entry, so update it + // if its expiration time is renewed + if (expTime > currEntry.expTime) { + // openFiles.remove(fId); + expTimes.remove(currEntry); + currEntry.setExpirationTime(expTime); + openFiles.put(fId, currEntry); + expTimes.add(currEntry); + } + return currEntry.getCowPolicy(); + } else { + assert(false):"should never get here!"; + Logging.logMessage(Logging.LEVEL_ERROR, this,"ARGH!!!! SHOULD NOT REFRESH FOR NON-OPEN FILE ANYMORE!!!!!"); + Logging.logMessage(Logging.LEVEL_DEBUG, this, "new entry for " + + fId); + // 'currEntry' is a new entry, so + // insert it in the table + OpenFileTableEntry newEntry = new OpenFileTableEntry(fId, expTime); + openFiles.put(fId, newEntry); + expTimes.add(newEntry); + return null; + } + } + + /** + * Insert a new entry in the table + * + * @param fId + * fileId + * @param expTime + * expiration time + */ + public void openFile(String fId, long expTime, CowPolicy policy) { + assert(openFiles.containsKey(fId) == false); + + Logging.logMessage(Logging.LEVEL_DEBUG, this, "new entry for " + + fId); + // 'currEntry' is a new entry, so + // insert it in the table + OpenFileTableEntry newEntry = new OpenFileTableEntry(fId, expTime, policy); + openFiles.put(fId, newEntry); + expTimes.add(newEntry); + } + + /** + * Returns 'true' if this table contains the specified file, 'false' + * otherwise + */ + public boolean contains(String fId) { + return openFiles.containsKey(fId); + } + + /** + * Delete all the entries whose expiration time is strictly less than + * 'toTime'. + */ + public List clean(long toTime) { + + LinkedList closedFiles = new LinkedList(); + + OpenFileTableEntry dummyEntry = new OpenFileTableEntry(null, toTime); + + Iterator it = expTimes.iterator(); + + // since entries in 'expTimes' are sorted w.r.t. their expiration time + // (ascending order), 'expTimes' has to be scanned until there is an + // entry + // with its 'expTimes' > 'toTime' + + while (it.hasNext()) { + + OpenFileTableEntry currEntry = (OpenFileTableEntry) it.next(); + + if (currEntry.compareTo(dummyEntry) < 0) { + String fId = currEntry.fileId; + openFiles.remove(fId); + it.remove(); + closedFiles.add(currEntry); + } else { + break; + } + } + return closedFiles; + } + + /** + * It tells if a file was closed + * + * @param fileId + * File to consult + * @return true if the file is closed + */ + public boolean isClosed(String fileId) { + OpenFileTableEntry fileEntry = openFiles.get(fileId); + + if (fileEntry != null) { + if (fileEntry.expTime < (System.currentTimeMillis() / 1000)) { + return true; + } else { + return false; + } + } else { + return true; + } + } + + public void setDeleteOnClose(String fileId) { + OpenFileTableEntry fileEntry = openFiles.get(fileId); + + if (fileEntry != null) { + fileEntry.setDeleteOnClose(); + } + } + + /** + * + * @param fileId + * @return true if the file with the given id is set to be deleted on close, false otherwise. + */ + public boolean isDeleteOnClose(String fileId) { + OpenFileTableEntry fileEntry = openFiles.get(fileId); + + if (fileEntry != null) { + return fileEntry.isDeleteOnClose(); + } + + return false; + } + + public void close(String fileId) { + OpenFileTableEntry currEntry = openFiles.get(fileId); + + if (currEntry != null) { + expTimes.remove(currEntry); + openFiles.remove(fileId); + } + } + + public int getNumOpenFiles() { + return this.openFiles.size(); + } + + public List getLeases(String fileId) { + OpenFileTableEntry e = openFiles.get(fileId); + assert(e != null); + return e.getClientLeases(); + } + + /** + * Class used to model an entry in the OpenFileTable + * + * @author Eugenio Cesario + * + */ + public static class OpenFileTableEntry implements Comparable { + + private final String fileId; + + private long expTime; + + private boolean deleteOnClose; + + private List clientLeases; + + private CowPolicy fileCowPolicy; + + public OpenFileTableEntry(String fid, long et) { + this(fid,et,null); + } + + public OpenFileTableEntry(String fid, long et, CowPolicy cow) { + fileId = fid; + expTime = et; + deleteOnClose = false; + clientLeases = new LinkedList(); + + if (cow != null) + fileCowPolicy = cow; + else + fileCowPolicy = new CowPolicy(CowPolicy.cowMode.NO_COW); + } + + public int compareTo(Object o) { + int res = 0; + final OpenFileTableEntry e = (OpenFileTableEntry) o; + if (this.expTime < e.expTime) { + res = -1; + } else if (this.expTime == e.expTime) { + res = 0; + } else { + res = 1; + } + return res; + } + + public boolean equals(Object o) { + try { + final OpenFileTableEntry e = (OpenFileTableEntry) o; + if (fileId.equals(e.fileId)) { + return true; + } else { + return false; + } + } catch (ClassCastException ex) { + return false; + } + } + + public String toString() { + return "(" + fileId + "," + expTime + ")"; + } + + public void setExpirationTime(long newExpTime) { + this.expTime = newExpTime; + } + + public void setDeleteOnClose() { + deleteOnClose = true; + } + + public boolean isDeleteOnClose() { + return deleteOnClose; + } + + public String getFileId() { + return this.fileId; + } + + public List getClientLeases() { + return this.clientLeases; + } + + public CowPolicy getCowPolicy() { + return this.fileCowPolicy; + } + + + } +} diff --git a/servers/src/org/xtreemfs/osd/RPCTokens.java b/servers/src/org/xtreemfs/osd/RPCTokens.java new file mode 100644 index 0000000000000000000000000000000000000000..417c4f581d289bdf5398ef39e6b1a7049cde4929 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/RPCTokens.java @@ -0,0 +1,81 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin, + Barcelona Supercomputing Center - Centro Nacional de Supercomputacion + and Consiglio Nazionale delle Ricerche. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB), Jesús Malo (BSC) + * Eugenio Cesario (CNR) + */ + +package org.xtreemfs.osd; + +public interface RPCTokens { + + /** + * POST method name for getting the size of a file + */ + public static final String fetchGlobalMaxToken = "fetchGlobalMax"; + + /** + * POST method name for truncating a file with propagation + */ + public static final String truncateTOKEN = "truncate"; + + /** + * POST method name for truncating a file without propagation + */ + public static final String truncateLocalTOKEN = "truncateLocal"; + + /** + * POST method name for the deletion of locally stored objects of a file + */ + public static final String deleteLocalTOKEN = "deleteLocal"; + + /** + * POST method name for the retrieval of a commonly supported protocol + * version + */ + public static final String getProtocolVersionTOKEN = "getProtocolVersion"; + + /** + * POST method for shutting down the OSD + */ + public static final String shutdownTOKEN = ".shutdown"; + + public static final String getstatsTOKEN = "getStatistics"; + + public static final String recordRqDurationTOKEN = "recordRqDuration"; + + /** + * POST method for checking an object stored on the OSD + */ + public static final String checkObjectTOKEN = "checkObject"; + + public static final String acquireLeaseTOKEN = "acquireLease"; + + public static final String returnLeaseTOKEN = "returnLease"; + + public static final String cleanUpTOKEN = "cleanUp"; + + // public static final String bufferstatsTOKEN = "/.sys.bufferstats"; + + // public static final String queuestatsTOKEN = "/.sys.queuestats"; +} diff --git a/servers/src/org/xtreemfs/osd/RequestDetails.java b/servers/src/org/xtreemfs/osd/RequestDetails.java new file mode 100644 index 0000000000000000000000000000000000000000..188ccad9911e516dc73219769d7461c90276cc8d --- /dev/null +++ b/servers/src/org/xtreemfs/osd/RequestDetails.java @@ -0,0 +1,219 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.osd; + +import org.xtreemfs.common.Capability; +import org.xtreemfs.common.ClientLease; +import org.xtreemfs.common.striping.Location; +import org.xtreemfs.common.striping.Locations; +import org.xtreemfs.osd.replication.TransferStrategy; +import org.xtreemfs.osd.storage.CowPolicy; + +public final class RequestDetails { + + protected String fileId; + + protected long objectNumber; + + protected boolean rangeRequested; + + protected long byteRangeStart; + + protected long byteRangeEnd; + + protected Capability capability; + + protected Locations locationList; + + protected Location currentReplica; + + protected boolean objectVersionNumberRequested; + + protected int objectVersionNumber; + + private long truncateFileSize; + + private String newFSandEpoch; + + private boolean checkOnly; + + private boolean invalidChecksum; + + private ClientLease lease; + + private TransferStrategy replicationTransfer; + + private String requestId; + + private CowPolicy cowPolicy; + + public String getFileId() { + return fileId; + } + + public void setFileId(String fileId) { + this.fileId = fileId; + } + + public long getObjectNumber() { + return objectNumber; + } + + public void setObjectNumber(long objectNumber) { + this.objectNumber = objectNumber; + } + + public boolean isRangeRequested() { + return rangeRequested; + } + + public void setRangeRequested(boolean rangeRequested) { + this.rangeRequested = rangeRequested; + } + + public long getByteRangeStart() { + return byteRangeStart; + } + + public void setByteRangeStart(long byteRangeStart) { + this.byteRangeStart = byteRangeStart; + } + + public long getByteRangeEnd() { + return byteRangeEnd; + } + + public void setByteRangeEnd(long byteRangeEnd) { + this.byteRangeEnd = byteRangeEnd; + } + + public Capability getCapability() { + return capability; + } + + public void setCapability(Capability capability) { + this.capability = capability; + } + + public Locations getLocationList() { + return locationList; + } + + public void setLocationList(Locations locationList) { + this.locationList = locationList; + } + + public Location getCurrentReplica() { + return currentReplica; + } + + public void setCurrentReplica(Location currentReplica) { + this.currentReplica = currentReplica; + } + + public long getTruncateFileSize() { + return truncateFileSize; + } + + public void setTruncateFileSize(long fileSize) { + this.truncateFileSize = fileSize; + } + + public boolean isObjectVersionNumberRequested() { + return objectVersionNumberRequested; + } + + public void setObjectVersionNumberRequested( + boolean objectVersionNumberRequested) { + this.objectVersionNumberRequested = objectVersionNumberRequested; + } + + public int getObjectVersionNumber() { + return objectVersionNumber; + } + + public void setObjectVersionNumber(int objectVersionNumber) { + this.objectVersionNumber = objectVersionNumber; + } + + public String getNewFSandEpoch() { + return newFSandEpoch; + } + + public void setNewFSandEpoch(String newFSandEpoch) { + this.newFSandEpoch = newFSandEpoch; + } + + public boolean isCheckOnly() { + return checkOnly; + } + + public void setCheckOnly(boolean checkOnly) { + this.checkOnly = checkOnly; + } + + public boolean isInvalidChecksum() { + return invalidChecksum; + } + + public void setInvalidChecksum(boolean invalidChecksum) { + this.invalidChecksum = invalidChecksum; + } + + public ClientLease getLease() { + return lease; + } + + public void setLease(ClientLease lease) { + this.lease = lease; + } + + public TransferStrategy getReplicationTransfer() { + return this.replicationTransfer; + } + + public void setReplicationTransfer(TransferStrategy replicationTransfer) { + if (this != null) + this.replicationTransfer = replicationTransfer; + // TODO: throw exception for additional set + } + + public String getRequestId() { + return requestId; + } + + public void setRequestId(String requestId) { + this.requestId = requestId; + } + + public CowPolicy getCowPolicy() { + return cowPolicy; + } + + public void setCowPolicy(CowPolicy cowPolicy) { + this.cowPolicy = cowPolicy; + } + +} diff --git a/servers/src/org/xtreemfs/osd/RequestDispatcher.java b/servers/src/org/xtreemfs/osd/RequestDispatcher.java new file mode 100644 index 0000000000000000000000000000000000000000..5f2af5bb36fc0dc517b5257d8810eff0fa5ee99e --- /dev/null +++ b/servers/src/org/xtreemfs/osd/RequestDispatcher.java @@ -0,0 +1,72 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Felix Langner (ZIB) + */ + +package org.xtreemfs.osd; + +import java.io.IOException; +import java.net.InetSocketAddress; + +import org.xtreemfs.common.Request; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.clients.dir.DIRClient; +import org.xtreemfs.common.striping.Location; +import org.xtreemfs.foundation.speedy.SpeedyRequest; +import org.xtreemfs.osd.ops.Operation; +import org.xtreemfs.osd.stages.Stage; +import org.xtreemfs.osd.stages.StageStatistics; + +public interface RequestDispatcher { + + public static enum Operations { + READ, WRITE, STATUS_PAGE, FETCH_GMAX, TRUNCATE, TRUNCATE_LOCAL, DELETE, + OFT_DELETE, DELETE_LOCAL, GET_PROTOCOL_VERSION, SHUTDOWN, CHECK_OBJECT, + GMAX, CLOSE_FILE, GET_STATS, STATS_CONFIG, ACQUIRE_LEASE, RETURN_LEASE, + CLEAN_UP, FETCH_AND_WRITE_REPLICA + } + + public static enum Stages { + PARSER, AUTH, STORAGE, DELETION, STATS, REPLICATION + } + + public Operation getOperation(Operations opCode); + + public Stage getStage(Stages stage); + + public OSDConfig getConfig(); + + public StageStatistics getStatistics(); + + public boolean isHeadOSD(Location xloc); + + public void sendSpeedyRequest(Request originalRequest, + SpeedyRequest speedyRq, InetSocketAddress server) throws IOException; + + public void sendUDP(ReusableBuffer data, InetSocketAddress receiver); + + public void requestFinished(OSDRequest rq); + + public void shutdown(); + + public DIRClient getDIRClient(); +} diff --git a/servers/src/org/xtreemfs/osd/UDPCommunicator.java b/servers/src/org/xtreemfs/osd/UDPCommunicator.java new file mode 100644 index 0000000000000000000000000000000000000000..1cdc020ef9372bab575f917a965b283810161a89 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/UDPCommunicator.java @@ -0,0 +1,225 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.osd; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.nio.channels.ClosedByInterruptException; +import java.nio.channels.DatagramChannel; +import java.nio.channels.SelectionKey; +import java.nio.channels.Selector; +import java.util.Iterator; +import java.util.Set; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.LifeCycleThread; + +/** + * + * @author bjko + */ +public class UDPCommunicator extends LifeCycleThread { + + public final int port; + + private DatagramChannel channel; + + private Selector selector; + + private volatile boolean quit; + + private final AtomicBoolean sendMode; + + private final LinkedBlockingQueue q; + + private final UDPReceiverInterface receiver; + + public static final int MAX_UDP_SIZE = 1024; + + public UDPCommunicator(int port, UDPReceiverInterface receiver) { + super("UDPComStage"); + this.port = port; + q = new LinkedBlockingQueue(); + sendMode = new AtomicBoolean(false); + this.receiver = receiver; + } + + /** + * sends a UDPRequest. + * @attention Overwrites the first byte of rq.data with the message type. + */ + public void send(ReusableBuffer data, InetSocketAddress receiver) { + UDPRequest rq = new UDPRequest(); + rq.address = receiver; + rq.data = data; + data.position(0); + q.add(rq); + + if (q.size() == 1) { + //System.out.println("wakeup!"); + selector.wakeup(); + } + } + + public void shutdown() { + quit = true; + interrupt(); + } + + @Override + public void run() { + + try { + + selector = Selector.open(); + + channel = DatagramChannel.open(); + channel.socket().bind(new InetSocketAddress(port)); + channel.configureBlocking(false); + channel.register(selector, SelectionKey.OP_READ); + + Logging.logMessage(Logging.LEVEL_INFO,this,"UDP socket on port "+port+" ready"); + + notifyStarted(); + + boolean isRdOnly = true; + + while (!quit) { + + if (q.size() == 0) { + if (!isRdOnly) { + channel.keyFor(selector).interestOps(SelectionKey.OP_READ); + //System.out.println("read only"); + isRdOnly = true; + } + } else { + if (isRdOnly) { + channel.keyFor(selector).interestOps(SelectionKey.OP_READ | SelectionKey.OP_WRITE); + //System.out.println("read write"); + isRdOnly = false; + } + } + + int numKeys = selector.select(); + + if (q.size() == 0) { + if (!isRdOnly) { + channel.keyFor(selector).interestOps(SelectionKey.OP_READ); + //System.out.println("read only"); + isRdOnly = true; + } + } else { + if (isRdOnly) { + channel.keyFor(selector).interestOps(SelectionKey.OP_READ | SelectionKey.OP_WRITE); + //System.out.println("read write"); + isRdOnly = false; + } + } + + if (numKeys == 0) + continue; + + if (q.size() > 10000) { + System.out.println("QS!!!!! "+q.size()); + System.out.println("is readOnly: "+isRdOnly); + } + + // fetch events + Set keys = selector.selectedKeys(); + Iterator iter = keys.iterator(); + + // process all events + while(iter.hasNext()) { + + SelectionKey key = iter.next(); + + // remove key from the list + iter.remove(); + + if (key.isReadable()) { + InetSocketAddress sender = null; + //do { + ReusableBuffer data = BufferPool.allocate(MAX_UDP_SIZE); + sender = (InetSocketAddress) channel.receive(data.getBuffer()); + if (sender == null) { + BufferPool.free(data); + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_WARN,this,"read key for empty read"); + } else { + + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG,this,"read data from "+sender); + + receiver.receiveUDP(data,sender); + } + //} while (sender != null); + } else if (key.isWritable()) { + UDPRequest r = q.poll(); + while (r != null) { + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_WARN,this,"sent packet to "+r.address); + int sent = channel.send(r.data.getBuffer(),r.address); + BufferPool.free(r.data); + if (sent == 0) { + //System.out.println("cannot send anymore!"); + q.put(r); + break; + } + r = q.poll(); + } + } else { + throw new RuntimeException("strange key state: "+key); + } + } + + } + + selector.close(); + channel.close(); + + } catch(ClosedByInterruptException ex) { + // ignore + } catch (IOException ex) { + Logging.logMessage(Logging.LEVEL_ERROR,this,ex); + } catch (Throwable th) { + notifyCrashed(th instanceof Exception ? (Exception) th + : new Exception(th)); + return; + } + + notifyStopped(); + } + + private static final class UDPRequest { + public InetSocketAddress address; + public ReusableBuffer data; + } + + +} diff --git a/servers/src/org/xtreemfs/osd/UDPMessageType.java b/servers/src/org/xtreemfs/osd/UDPMessageType.java new file mode 100644 index 0000000000000000000000000000000000000000..dbbdd16e157b0bc441622cee11d9fcffd38c83d9 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/UDPMessageType.java @@ -0,0 +1,32 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin, + Barcelona Supercomputing Center - Centro Nacional de Supercomputacion + and Consiglio Nazionale delle Ricerche. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB), Jesús Malo (BSC) + * Eugenio Cesario (CNR) + */ + +package org.xtreemfs.osd; + +public enum UDPMessageType { + Striping, MPXLN +} \ No newline at end of file diff --git a/servers/src/org/xtreemfs/osd/UDPReceiverInterface.java b/servers/src/org/xtreemfs/osd/UDPReceiverInterface.java new file mode 100644 index 0000000000000000000000000000000000000000..947b82be638ba748daddb4ca8d3847e2b1827f62 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/UDPReceiverInterface.java @@ -0,0 +1,46 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.osd; + +import java.net.InetSocketAddress; + +import org.xtreemfs.common.buffer.ReusableBuffer; + +/** + * An interface for UDP communication. + */ +public interface UDPReceiverInterface { + + /** + * This method is invoked when a UDP packet is received. + * + * @param data + * a buffer containing the data sent with the UDP request + * @param sender + * the sender's socket address + */ + public void receiveUDP(ReusableBuffer data, InetSocketAddress sender); + +} diff --git a/servers/src/org/xtreemfs/osd/ops/AcquireLease.java b/servers/src/org/xtreemfs/osd/ops/AcquireLease.java new file mode 100644 index 0000000000000000000000000000000000000000..23fca9bee030436a902b876c35f4896aee4d2e9c --- /dev/null +++ b/servers/src/org/xtreemfs/osd/ops/AcquireLease.java @@ -0,0 +1,75 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package org.xtreemfs.osd.ops; + +import java.util.List; +import org.xtreemfs.common.ClientLease; +import org.xtreemfs.osd.ErrorCodes; +import org.xtreemfs.osd.ErrorRecord; +import org.xtreemfs.osd.ErrorRecord.ErrorClass; +import org.xtreemfs.osd.OSDRequestDispatcher; +import org.xtreemfs.osd.OSDRequest; +import org.xtreemfs.osd.RequestDispatcher.Stages; +import org.xtreemfs.osd.stages.AuthenticationStage; +import org.xtreemfs.osd.stages.Stage.StageResponseCode; +import org.xtreemfs.osd.stages.StageCallbackInterface; + +/** + * + * @author bjko + */ +public class AcquireLease extends Operation { + + public AcquireLease(OSDRequestDispatcher master) { + super(master); + } + + + public void startRequest(OSDRequest rq) { + master.getStage(Stages.AUTH).enqueueOperation(rq, AuthenticationStage.STAGEOP_AUTHENTICATE | AuthenticationStage.STAGEOP_OFT_OPEN | AuthenticationStage.STAGEOP_ACQUIRE_LEASE, new StageCallbackInterface() { + + public void methodExecutionCompleted(OSDRequest request, StageResponseCode result) { + postAuthenticate(request); + } + }); + } + + public void postAuthenticate(OSDRequest rq) { + + master.getStage(Stages.AUTH).enqueueOperation(rq, AuthenticationStage.STAGEOP_ACQUIRE_LEASE, new StageCallbackInterface() { + + public void methodExecutionCompleted(OSDRequest request, StageResponseCode result) { + master.requestFinished(request); + } + }); + + } + + /** + * Parses and inspects the JSON RPC arguments. + * + * @param rq + * the request + * @param arguments + * the JSON RPC arguments + * @return null if successful, error message otherwise + */ + @Override + public ErrorRecord parseRPCBody(OSDRequest rq, List arguments) { + try { + final ClientLease l = ClientLease.parseFromList(arguments); + rq.getDetails().setLease(l); + + return null; + } catch (Exception ex) { + return new ErrorRecord(ErrorClass.USER_EXCEPTION, ErrorCodes.INVALID_RPC, + "body is not well-formatted or does not contain valid arguments", ex); + } + } + + + +} diff --git a/servers/src/org/xtreemfs/osd/ops/CheckObjectRPC.java b/servers/src/org/xtreemfs/osd/ops/CheckObjectRPC.java new file mode 100644 index 0000000000000000000000000000000000000000..70073b17569ca0730369894ef57a1211119e5e7b --- /dev/null +++ b/servers/src/org/xtreemfs/osd/ops/CheckObjectRPC.java @@ -0,0 +1,83 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.osd.ops; + +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.osd.OSDRequest; +import org.xtreemfs.osd.RequestDispatcher; +import org.xtreemfs.osd.RequestDispatcher.Stages; +import org.xtreemfs.osd.stages.AuthenticationStage; +import org.xtreemfs.osd.stages.StageCallbackInterface; +import org.xtreemfs.osd.stages.StorageThread; +import org.xtreemfs.osd.stages.Stage.StageResponseCode; + +public final class CheckObjectRPC extends Operation { + + public CheckObjectRPC(RequestDispatcher master) { + super(master); + } + + public void startRequest(OSDRequest rq) { + + master.getStage(Stages.AUTH).enqueueOperation( + rq, + AuthenticationStage.STAGEOP_AUTHENTICATE + | AuthenticationStage.STAGEOP_OFT_OPEN, + new StageCallbackInterface() { + + public void methodExecutionCompleted(OSDRequest request, + StageResponseCode result) { + postAuthenticate(request, result); + } + }); + } + + protected void postAuthenticate(OSDRequest rq, StageResponseCode result) { + + if (result == StageResponseCode.OK) { + + master.getStage(Stages.STORAGE).enqueueOperation(rq, + StorageThread.STAGEOP_READ_OBJECT, + new StageCallbackInterface() { + + public void methodExecutionCompleted(OSDRequest request, + StageResponseCode result) { + postCheck(request, result); + } + }); + + } else { + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, + "authentication failed for " + rq.getRequestId()); + master.requestFinished(rq); + } + } + + private void postCheck(OSDRequest request, StageResponseCode result) { + master.requestFinished(request); + } + +} \ No newline at end of file diff --git a/servers/src/org/xtreemfs/osd/ops/CleanUpOperation.java b/servers/src/org/xtreemfs/osd/ops/CleanUpOperation.java new file mode 100644 index 0000000000000000000000000000000000000000..29afb1791cc311778ad398192ad94b2d70bdc06c --- /dev/null +++ b/servers/src/org/xtreemfs/osd/ops/CleanUpOperation.java @@ -0,0 +1,100 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHOR: Felix Langner (ZIB) + */ +package org.xtreemfs.osd.ops; + +import org.xtreemfs.osd.OSDRequest; +import org.xtreemfs.osd.RequestDispatcher; +import org.xtreemfs.osd.RequestDispatcher.Stages; +import org.xtreemfs.osd.stages.AuthenticationStage; +import org.xtreemfs.osd.stages.StageCallbackInterface; +import org.xtreemfs.osd.stages.StorageThread; +import org.xtreemfs.osd.stages.Stage.StageResponseCode; +import org.xtreemfs.common.logging.Logging; + +/** + * @author langner + * + */ +public final class CleanUpOperation extends Operation { + + public CleanUpOperation(RequestDispatcher master) { + super(master); + } + + /* (non-Javadoc) + * @see org.xtreemfs.osd.ops.Operation#startRequest(org.xtreemfs.osd.Request) + */ + @Override + public void startRequest(OSDRequest rq) { + + // use anonymous inner classes implementation + master.getStage(Stages.AUTH).enqueueOperation(rq, + AuthenticationStage.STAGEOP_AUTHENTICATE, + new StageCallbackInterface() { + + public void methodExecutionCompleted(OSDRequest request, + StageResponseCode result) { + postAuthenticate(request, result); + } + }); + } + + /** + * called after the authentication stage has processed the request + * + * @param rq + * the request + * @param result + * authentication stage result + */ + protected void postAuthenticate(OSDRequest rq, StageResponseCode result) { + if (result == StageResponseCode.OK) { + + master.getStage(Stages.STORAGE).enqueueOperation(rq, + StorageThread.STAGEOP_CLEAN_UP, + new StageCallbackInterface() { + + public void methodExecutionCompleted(OSDRequest request, + StageResponseCode result) { + postVerify(request, result); + } + }); + + } else { + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, + "authentication failed for " + rq.getRequestId()); + master.requestFinished(rq); + } + } + + /** + * finishes the request + * @param request + * @param result + */ + private void postVerify(OSDRequest request, StageResponseCode result) { + master.requestFinished(request); + } +} diff --git a/servers/src/org/xtreemfs/osd/ops/CloseFileEvent.java b/servers/src/org/xtreemfs/osd/ops/CloseFileEvent.java new file mode 100644 index 0000000000000000000000000000000000000000..91bd08accc73b8016a293c40fb1d6f0516351fdf --- /dev/null +++ b/servers/src/org/xtreemfs/osd/ops/CloseFileEvent.java @@ -0,0 +1,53 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.osd.ops; + +import org.xtreemfs.osd.OSDRequest; +import org.xtreemfs.osd.RequestDispatcher; +import org.xtreemfs.osd.RequestDispatcher.Stages; +import org.xtreemfs.osd.stages.DeletionStage; +import org.xtreemfs.osd.stages.ParserStage; + +public class CloseFileEvent extends Operation { + + public CloseFileEvent(RequestDispatcher master) { + super(master); + } + + @Override + public void startRequest(OSDRequest rq) { + + master.getStage(Stages.PARSER).enqueueOperation(rq, + ParserStage.STAGEOP_REMOVE_CACHE_ENTRY, null); + + // if "delete on close" is set, delete all objects + if ((Boolean) rq.getAttachment()) { + rq.setAttachment(false); // mark for an immediate deletion + master.getStage(Stages.DELETION).enqueueOperation(rq, + DeletionStage.STAGEOP_DELETE_OBJECTS, null); + } + } + +} diff --git a/servers/src/org/xtreemfs/osd/ops/DeleteLocalRPC.java b/servers/src/org/xtreemfs/osd/ops/DeleteLocalRPC.java new file mode 100644 index 0000000000000000000000000000000000000000..b68004d5a3b11d806d5ee4f48e38d1ab63f98b8c --- /dev/null +++ b/servers/src/org/xtreemfs/osd/ops/DeleteLocalRPC.java @@ -0,0 +1,52 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.osd.ops; + +import org.xtreemfs.osd.OSDRequest; +import org.xtreemfs.osd.RequestDispatcher; +import org.xtreemfs.osd.RequestDispatcher.Stages; +import org.xtreemfs.osd.stages.DeletionStage; +import org.xtreemfs.osd.stages.StageCallbackInterface; +import org.xtreemfs.osd.stages.Stage.StageResponseCode; + +public final class DeleteLocalRPC extends Operation { + + public DeleteLocalRPC(RequestDispatcher master) { + super(master); + } + + public void startRequest(OSDRequest rq) { + + master.getStage(Stages.DELETION).enqueueOperation(rq, + DeletionStage.STAGEOP_CHECK_OPEN_STATE, + new StageCallbackInterface() { + public void methodExecutionCompleted(OSDRequest request, + StageResponseCode result) { + master.requestFinished(request); + } + }); + } + +} \ No newline at end of file diff --git a/servers/src/org/xtreemfs/osd/ops/DeleteOFTRPC.java b/servers/src/org/xtreemfs/osd/ops/DeleteOFTRPC.java new file mode 100644 index 0000000000000000000000000000000000000000..80150a234226e4a9912548bb33a680864b76beae --- /dev/null +++ b/servers/src/org/xtreemfs/osd/ops/DeleteOFTRPC.java @@ -0,0 +1,73 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.osd.ops; + +import org.xtreemfs.common.Request; +import org.xtreemfs.osd.OSDRequest; +import org.xtreemfs.osd.RequestDispatcher; +import org.xtreemfs.osd.RequestDispatcher.Stages; +import org.xtreemfs.osd.stages.AuthenticationStage; +import org.xtreemfs.osd.stages.DeletionStage; +import org.xtreemfs.osd.stages.StageCallbackInterface; +import org.xtreemfs.osd.stages.Stage.StageResponseCode; + +public final class DeleteOFTRPC extends Operation { + + public DeleteOFTRPC(RequestDispatcher master) { + super(master); + } + + /** + * Start the operation for a request + * + * @param rq + * the request + */ + @Override + public void startRequest(OSDRequest rq) { + + // use anoymous inner classes impl + master.getStage(Stages.AUTH).enqueueOperation(rq, + AuthenticationStage.STAGEOP_OFT_DELETE, + new StageCallbackInterface() { + public void methodExecutionCompleted(OSDRequest request, + StageResponseCode result) { + postDelete(request, result); + } + }); + } + + private void postDelete(Request rq, StageResponseCode result) { + master.getStage(Stages.DELETION).enqueueOperation( + (OSDRequest) rq.getAttachment(), DeletionStage.STAGEOP_DELETE_OBJECTS, + new StageCallbackInterface() { + public void methodExecutionCompleted(OSDRequest request, + StageResponseCode result) { + master.requestFinished(request); + } + }); + } + +} \ No newline at end of file diff --git a/servers/src/org/xtreemfs/osd/ops/DeleteOperation.java b/servers/src/org/xtreemfs/osd/ops/DeleteOperation.java new file mode 100644 index 0000000000000000000000000000000000000000..fa6307ffd31e1fdbbdc3fec709d5176a5d2c3729 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/ops/DeleteOperation.java @@ -0,0 +1,93 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.osd.ops; + +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.osd.OSDRequest; +import org.xtreemfs.osd.RequestDispatcher; +import org.xtreemfs.osd.RequestDispatcher.Stages; +import org.xtreemfs.osd.stages.AuthenticationStage; +import org.xtreemfs.osd.stages.DeletionStage; +import org.xtreemfs.osd.stages.StageCallbackInterface; +import org.xtreemfs.osd.stages.Stage.StageResponseCode; + +public final class DeleteOperation extends Operation { + + public DeleteOperation(RequestDispatcher master) { + super(master); + } + + @Override + public void startRequest(OSDRequest rq) { + + // use anoymous inner classes impl + master.getStage(Stages.AUTH).enqueueOperation(rq, + AuthenticationStage.STAGEOP_AUTHENTICATE, + new StageCallbackInterface() { + + public void methodExecutionCompleted(OSDRequest request, + StageResponseCode result) { + postAuthenticate(request, result); + } + }); + } + + /** + * called after the authentication stage has processed the request + * + * @param rq + * the request + * @param result + * authentication stage result + * @param error + * error details or null + */ + protected void postAuthenticate(OSDRequest rq, StageResponseCode result) { + + if (result == StageResponseCode.OK) { + + master.getStage(Stages.DELETION).enqueueOperation(rq, + DeletionStage.STAGEOP_DELETE_FILE, + new StageCallbackInterface() { + + public void methodExecutionCompleted(OSDRequest request, + StageResponseCode result) { + postDelete(request, result); + } + }); + + } else { + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, + "authentication failed for " + rq.getRequestId()); + master.requestFinished(rq); + } + } + + private void postDelete(OSDRequest request, StageResponseCode result) { + master.requestFinished(request); + } + +} \ No newline at end of file diff --git a/servers/src/org/xtreemfs/osd/ops/FetchAndWriteReplica.java b/servers/src/org/xtreemfs/osd/ops/FetchAndWriteReplica.java new file mode 100644 index 0000000000000000000000000000000000000000..b7bc46469283226bf577a28220196ff49bcad222 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/ops/FetchAndWriteReplica.java @@ -0,0 +1,156 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Christian Lorenz (ZIB) + */ +package org.xtreemfs.osd.ops; + +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.osd.OSDRequest; +import org.xtreemfs.osd.RequestDispatcher; +import org.xtreemfs.osd.RequestDispatcher.Stages; +import org.xtreemfs.osd.stages.ReplicationStage; +import org.xtreemfs.osd.stages.StageCallbackInterface; +import org.xtreemfs.osd.stages.StorageThread; +import org.xtreemfs.osd.stages.Stage.StageResponseCode; + +/** + * This operation is only for OSD internal processing. It generates no response. + * 25.09.2008 + * + * @author clorenz + */ +public class FetchAndWriteReplica extends Operation { + /** + * @param master + */ + public FetchAndWriteReplica(RequestDispatcher master) { + super(master); + } + + /* + * (non-Javadoc) + * + * @see org.xtreemfs.osd.ops.Operation#startRequest(org.xtreemfs.osd.Request) + */ + @Override + public void startRequest(OSDRequest rq) { + // FIXME: testcode + String str = rq.getRequestId() + + ": start WriteReplica request (fetch object)"; + if (rq.getOriginalOsdRequest() != null) + str += " with original request " + + rq.getOriginalOsdRequest().getRequestId(); + System.out.println(str); + master.getStage(Stages.REPLICATION).enqueueOperation(rq, + ReplicationStage.STAGEOP_INTERNAL_SEND_FETCH_OBJECT_REQUEST, + new StageCallbackInterface() { + + public void methodExecutionCompleted(OSDRequest request, + StageResponseCode result) { + postFetchObject(request, result); + } + }); + } + + private void postFetchObject(OSDRequest rq, StageResponseCode result) { + if (result == StageResponseCode.OK) { + System.out.println(rq.getRequestId() + + ": write object to StorageStage"); + + // continue with original request + OSDRequest originalRq = rq.getOriginalOsdRequest(); + if (originalRq != null) { + System.out.println(rq.getRequestId() + + ": copy data to original request " + + originalRq.getRequestId()); + // copy fetched data to original request + originalRq.setData(BufferPool.allocate(rq.getData().capacity()) + .put(rq.getData()), rq.getDataType()); + // go on with the original request operation-callback + originalRq.getCurrentCallback().methodExecutionCompleted( + originalRq, result); + rq.setOriginalOsdRequest(null); + } + + master.getStage(Stages.STORAGE).enqueueOperation(rq, + StorageThread.STAGEOP_WRITE_OBJECT, + new StageCallbackInterface() { + + public void methodExecutionCompleted( + OSDRequest request, StageResponseCode result) { + postWrite(request, result); + } + }); + } else { + if (Logging.isDebug()) + Logging + .logMessage(Logging.LEVEL_DEBUG, this, + "fetching object " + + rq.getDetails().getFileId() + ":" + + rq.getDetails().getObjectNumber() + + " failed"); + master.requestFinished(rq); + } + } + + private void postWrite(OSDRequest rq, StageResponseCode result) { + if (result == StageResponseCode.OK) { + System.out.println(rq.getRequestId() + ": end WriteReplica"); + + // initiate next steps for replication + master + .getStage(Stages.REPLICATION) + .enqueueOperation( + rq, + ReplicationStage.STAGEOP_INTERNAL_REPLICATION_REQUEST_FINISHED, + new StageCallbackInterface() { + + public void methodExecutionCompleted( + OSDRequest request, + StageResponseCode result) { + postInitiatingNextReplicationSteps(request, + result); + } + }); + } else { + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, "writing object " + + rq.getDetails().getFileId() + ":" + + rq.getDetails().getObjectNumber() + " failed"); + master.requestFinished(rq); + // TODO: handling for original request + } + } + + private void postInitiatingNextReplicationSteps(OSDRequest rq, + StageResponseCode result) { + // do nothing except cleanup + System.out.println(rq.getRequestId() + ": cleanup"); + cleanUp(rq); + } + + private void cleanUp(OSDRequest rq) { + if (rq.getData() != null) + BufferPool.free(rq.getData()); + } +} diff --git a/servers/src/org/xtreemfs/osd/ops/FetchGmaxRPC.java b/servers/src/org/xtreemfs/osd/ops/FetchGmaxRPC.java new file mode 100644 index 0000000000000000000000000000000000000000..ff3fbe8aa82c4ba72e7d01b384480aa2ff67dcf2 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/ops/FetchGmaxRPC.java @@ -0,0 +1,57 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.osd.ops; + +import org.xtreemfs.osd.OSDRequest; +import org.xtreemfs.osd.RequestDispatcher; +import org.xtreemfs.osd.RequestDispatcher.Stages; +import org.xtreemfs.osd.stages.StageCallbackInterface; +import org.xtreemfs.osd.stages.StorageThread; +import org.xtreemfs.osd.stages.Stage.StageResponseCode; + +public final class FetchGmaxRPC extends Operation { + + public FetchGmaxRPC(RequestDispatcher master) { + super(master); + } + + @Override + public void startRequest(OSDRequest rq) { + + master.getStage(Stages.STORAGE).enqueueOperation(rq, + StorageThread.STAGEOP_FETCH_GMAX, new StageCallbackInterface() { + + public void methodExecutionCompleted(OSDRequest request, + StageResponseCode result) { + requestFinished(request, result); + } + }); + } + + private void requestFinished(OSDRequest request, StageResponseCode result) { + master.requestFinished(request); + } + +} \ No newline at end of file diff --git a/servers/src/org/xtreemfs/osd/ops/GetProtocolVersionOperation.java b/servers/src/org/xtreemfs/osd/ops/GetProtocolVersionOperation.java new file mode 100644 index 0000000000000000000000000000000000000000..c7b125e2063c6605c125f8537184b975877fde1e --- /dev/null +++ b/servers/src/org/xtreemfs/osd/ops/GetProtocolVersionOperation.java @@ -0,0 +1,73 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.osd.ops; + +import java.util.ArrayList; +import java.util.List; + +import org.xtreemfs.common.VersionManagement; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.foundation.pinky.HTTPUtils.DATA_TYPE; +import org.xtreemfs.osd.ErrorRecord; +import org.xtreemfs.osd.OSDRequestDispatcher; +import org.xtreemfs.osd.OSDRequest; +import org.xtreemfs.osd.ErrorRecord.ErrorClass; + +public final class GetProtocolVersionOperation extends Operation { + + public GetProtocolVersionOperation(OSDRequestDispatcher master) { + super(master); + } + + public void startRequest(OSDRequest rq) { + final long ver = (Long) rq.getAttachment(); + rq.setData(ReusableBuffer.wrap(Long.toString(ver).getBytes()), + DATA_TYPE.JSON); + finishRequest(rq); + } + + public ErrorRecord parseRPCBody(OSDRequest rq, List arguments) { + + List versions = new ArrayList(arguments.size()); + for (Object arg : arguments) { + if (arg instanceof Long) + versions.add((Long) arg); + else + return new ErrorRecord(ErrorClass.USER_EXCEPTION, + "invalid version number: " + arg); + } + + final long result = VersionManagement.getMatchingProtVers(versions); + + if (result == -1) + return new ErrorRecord(ErrorClass.USER_EXCEPTION, + "No matching protocol version found. Server supports " + + VersionManagement.getSupportedProtVersAsString()); + + rq.setAttachment(result); + return null; + } + +} diff --git a/servers/src/org/xtreemfs/osd/ops/GetStatistics.java b/servers/src/org/xtreemfs/osd/ops/GetStatistics.java new file mode 100644 index 0000000000000000000000000000000000000000..cc79e1db275fab88fd28371fc6f7295c00c22838 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/ops/GetStatistics.java @@ -0,0 +1,34 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package org.xtreemfs.osd.ops; + +import org.xtreemfs.osd.OSDRequestDispatcher; +import org.xtreemfs.osd.OSDRequest; +import org.xtreemfs.osd.RequestDispatcher.Stages; +import org.xtreemfs.osd.stages.Stage.StageResponseCode; +import org.xtreemfs.osd.stages.StageCallbackInterface; +import org.xtreemfs.osd.stages.StatisticsStage; + +/** + * + * @author bjko + */ +public final class GetStatistics extends Operation { + + public GetStatistics(OSDRequestDispatcher master) { + super(master); + } + + @Override + public void startRequest(OSDRequest rq) { + master.getStage(Stages.STATS).enqueueOperation(rq, StatisticsStage.STAGEOP_STATISTICS, new StageCallbackInterface() { + + public void methodExecutionCompleted(OSDRequest request, StageResponseCode result) { + master.requestFinished(request); + } + }); + } +} diff --git a/servers/src/org/xtreemfs/osd/ops/GmaxEvent.java b/servers/src/org/xtreemfs/osd/ops/GmaxEvent.java new file mode 100644 index 0000000000000000000000000000000000000000..6bacb70dd2a7085c576d2d30b02bb12e96539054 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/ops/GmaxEvent.java @@ -0,0 +1,44 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.osd.ops; + +import org.xtreemfs.osd.OSDRequest; +import org.xtreemfs.osd.RequestDispatcher; +import org.xtreemfs.osd.RequestDispatcher.Stages; +import org.xtreemfs.osd.stages.StorageThread; + +public class GmaxEvent extends Operation { + + public GmaxEvent(RequestDispatcher master) { + super(master); + } + + @Override + public void startRequest(OSDRequest rq) { + master.getStage(Stages.STORAGE).enqueueOperation(rq, + StorageThread.STAGEOP_PROCESS_GMAX_EVENT, null); + } + +} diff --git a/servers/src/org/xtreemfs/osd/ops/Operation.java b/servers/src/org/xtreemfs/osd/ops/Operation.java new file mode 100644 index 0000000000000000000000000000000000000000..e6173a696706ca918221b32883779eaafd0ed0c9 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/ops/Operation.java @@ -0,0 +1,74 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.osd.ops; + +import java.util.List; + +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.osd.ErrorRecord; +import org.xtreemfs.osd.OSDRequest; +import org.xtreemfs.osd.RequestDispatcher; +import org.xtreemfs.osd.ErrorRecord.ErrorClass; + + +public abstract class Operation { + + + protected RequestDispatcher master; + + + public Operation(RequestDispatcher master) { + this.master = master; + } + + /** + * called after request was parsed and operation assigned. + * @param rq the new request + */ + public abstract void startRequest(OSDRequest rq); + + /** + * Parses and inspects the JSON RPC arguments. + * @param rq the request + * @param arguments the JSON RPC arguments + * @return null if successful, error message otherwise + */ + public ErrorRecord parseRPCBody(OSDRequest rq, List arguments) { + return null; + } + + protected void sendInternalError(OSDRequest rq, String message) { + Logging.logMessage(Logging.LEVEL_ERROR,this,message+" / request: "+rq); + rq.setError(new ErrorRecord(ErrorClass.INTERNAL_SERVER_ERROR, message)); + master.requestFinished(rq); + } + + protected void finishRequest(OSDRequest rq) { + master.requestFinished(rq); + } + + + +} diff --git a/servers/src/org/xtreemfs/osd/ops/ReadOperation.java b/servers/src/org/xtreemfs/osd/ops/ReadOperation.java new file mode 100644 index 0000000000000000000000000000000000000000..830f5ecf22075c25d98ca5a6dffd7917f6217414 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/ops/ReadOperation.java @@ -0,0 +1,123 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.osd.ops; + +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.osd.OSDRequest; +import org.xtreemfs.osd.RequestDispatcher; +import org.xtreemfs.osd.RequestDispatcher.Stages; +import org.xtreemfs.osd.stages.AuthenticationStage; +import org.xtreemfs.osd.stages.ReplicationStage; +import org.xtreemfs.osd.stages.StageCallbackInterface; +import org.xtreemfs.osd.stages.StorageThread; +import org.xtreemfs.osd.stages.Stage.StageResponseCode; + +public final class ReadOperation extends Operation { + + public ReadOperation(RequestDispatcher master) { + super(master); + } + + /** + * Start the operation for a request + * + * @param rq + * the request + */ + @Override + public void startRequest(OSDRequest rq) { + + // use anoymous inner classes impl + master.getStage(Stages.AUTH).enqueueOperation( + rq, + AuthenticationStage.STAGEOP_AUTHENTICATE + | AuthenticationStage.STAGEOP_OFT_OPEN, + new StageCallbackInterface() { + + public void methodExecutionCompleted(OSDRequest request, + StageResponseCode result) { + postAuthenticate(request, result); + } + }); + } + + /** + * called after the authentication stage has processed the request + * + * @param rq + * the request + * @param result + * authentication stage result + * @param error + * error details or null + */ + protected void postAuthenticate(OSDRequest rq, StageResponseCode result) { + + if (result == StageResponseCode.OK) { + + master.getStage(Stages.STORAGE).enqueueOperation(rq, + StorageThread.STAGEOP_READ_OBJECT, + new StageCallbackInterface() { + + public void methodExecutionCompleted( + OSDRequest request, StageResponseCode result) { + postRead(request, result); + } + }); + + } else { + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, + "authentication failed for " + rq.getRequestId()); + master.requestFinished(rq); + } + } + + private void postRead(final OSDRequest rq, StageResponseCode result) { + + // FIXME: comment this out to enable replication +// if (result == StageResponseCode.OK) + master.requestFinished(rq); +/* else if (result == StageResponseCode.FAILED) { + master.getStage(Stages.REPLICATION).enqueueOperation(rq, + ReplicationStage.STAGEOP_FETCH_OBJECT, + new StageCallbackInterface() { + + public void methodExecutionCompleted( + OSDRequest request, StageResponseCode result) { + // FIXME: testcode + System.out.println(rq.getRequestId() + + ": process original callback"); + postFetchObject(request, result); + } + }); + }*/ + } + + private void postFetchObject(OSDRequest rq, StageResponseCode result) { + master.requestFinished(rq); + } + +} \ No newline at end of file diff --git a/servers/src/org/xtreemfs/osd/ops/ReturnLease.java b/servers/src/org/xtreemfs/osd/ops/ReturnLease.java new file mode 100644 index 0000000000000000000000000000000000000000..fabb292e8eab9999e9b1f41a175413978e21bb87 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/ops/ReturnLease.java @@ -0,0 +1,75 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package org.xtreemfs.osd.ops; + +import java.util.List; +import org.xtreemfs.common.ClientLease; +import org.xtreemfs.osd.ErrorCodes; +import org.xtreemfs.osd.ErrorRecord; +import org.xtreemfs.osd.ErrorRecord.ErrorClass; +import org.xtreemfs.osd.OSDRequestDispatcher; +import org.xtreemfs.osd.OSDRequest; +import org.xtreemfs.osd.RequestDispatcher.Stages; +import org.xtreemfs.osd.stages.AuthenticationStage; +import org.xtreemfs.osd.stages.Stage.StageResponseCode; +import org.xtreemfs.osd.stages.StageCallbackInterface; + +/** + * + * @author bjko + */ +public class ReturnLease extends Operation { + + public ReturnLease(OSDRequestDispatcher master) { + super(master); + } + + + public void startRequest(OSDRequest rq) { + master.getStage(Stages.AUTH).enqueueOperation(rq, AuthenticationStage.STAGEOP_AUTHENTICATE | AuthenticationStage.STAGEOP_OFT_OPEN | AuthenticationStage.STAGEOP_ACQUIRE_LEASE, new StageCallbackInterface() { + + public void methodExecutionCompleted(OSDRequest request, StageResponseCode result) { + postAuthenticate(request); + } + }); + } + + public void postAuthenticate(OSDRequest rq) { + + master.getStage(Stages.AUTH).enqueueOperation(rq, AuthenticationStage.STAGEOP_RETURN_LEASE, new StageCallbackInterface() { + + public void methodExecutionCompleted(OSDRequest request, StageResponseCode result) { + master.requestFinished(request); + } + }); + + } + + /** + * Parses and inspects the JSON RPC arguments. + * + * @param rq + * the request + * @param arguments + * the JSON RPC arguments + * @return null if successful, error message otherwise + */ + @Override + public ErrorRecord parseRPCBody(OSDRequest rq, List arguments) { + try { + final ClientLease l = ClientLease.parseFromList(arguments); + rq.getDetails().setLease(l); + + return null; + } catch (Exception ex) { + return new ErrorRecord(ErrorClass.USER_EXCEPTION, ErrorCodes.INVALID_RPC, + "body is not well-formatted or does not contain valid arguments", ex); + } + } + + + +} diff --git a/servers/src/org/xtreemfs/osd/ops/ShutdownOperation.java b/servers/src/org/xtreemfs/osd/ops/ShutdownOperation.java new file mode 100644 index 0000000000000000000000000000000000000000..75b0438ffb0e7432e04bb34c6fff2b83a4e5e24b --- /dev/null +++ b/servers/src/org/xtreemfs/osd/ops/ShutdownOperation.java @@ -0,0 +1,49 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.osd.ops; + +import org.xtreemfs.osd.OSDRequestDispatcher; +import org.xtreemfs.osd.OSDRequest; + +public final class ShutdownOperation extends Operation { + + public ShutdownOperation(OSDRequestDispatcher master) { + super(master); + } + + public void startRequest(OSDRequest rq) { + + // send response to client + finishRequest(rq); + + // initiate OSD shutdown + new Thread() { + public void run() { + master.shutdown(); + } + }.start(); + } + +} diff --git a/servers/src/org/xtreemfs/osd/ops/StatisticsConfig.java b/servers/src/org/xtreemfs/osd/ops/StatisticsConfig.java new file mode 100644 index 0000000000000000000000000000000000000000..7cb3ebd31f6f5c772f85446becfb50be77e64504 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/ops/StatisticsConfig.java @@ -0,0 +1,66 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package org.xtreemfs.osd.ops; + +import java.util.List; +import org.xtreemfs.osd.ErrorCodes; +import org.xtreemfs.osd.ErrorRecord; +import org.xtreemfs.osd.ErrorRecord.ErrorClass; +import org.xtreemfs.osd.OSDRequestDispatcher; +import org.xtreemfs.osd.OSDRequest; +import org.xtreemfs.osd.RequestDispatcher.Stages; +import org.xtreemfs.osd.stages.Stage.StageResponseCode; +import org.xtreemfs.osd.stages.StageCallbackInterface; +import org.xtreemfs.osd.stages.StatisticsStage; + +/** + * + * @author bjko + */ +public class StatisticsConfig extends Operation { + + public StatisticsConfig(OSDRequestDispatcher master) { + super(master); + } + + + public void startRequest(OSDRequest rq) { + master.getStage(Stages.STATS).enqueueOperation(rq, StatisticsStage.STAGEOP_MEASURE_RQT, new StageCallbackInterface() { + + public void methodExecutionCompleted(OSDRequest request, StageResponseCode result) { + master.requestFinished(request); + } + }); + } + + /** + * Parses and inspects the JSON RPC arguments. + * + * @param rq + * the request + * @param arguments + * the JSON RPC arguments + * @return null if successful, error message otherwise + */ + @Override + public ErrorRecord parseRPCBody(OSDRequest rq, List arguments) { + try { + if (arguments.size() > 0) { + final Boolean[] settings = new Boolean[2]; + settings[0] = (Boolean) arguments.get(0); + settings[1] = (Boolean) arguments.get(1); + rq.setAttachment(settings); + } + return null; + } catch (Exception ex) { + return new ErrorRecord(ErrorClass.USER_EXCEPTION, ErrorCodes.INVALID_RPC, + "body is not well-formatted or does not contain valid arguments", ex); + } + } + + + +} diff --git a/servers/src/org/xtreemfs/osd/ops/StatusPageOperation.java b/servers/src/org/xtreemfs/osd/ops/StatusPageOperation.java new file mode 100644 index 0000000000000000000000000000000000000000..ca390951c0fcb8ee4fb1e328bfba7f3d68aa9dea --- /dev/null +++ b/servers/src/org/xtreemfs/osd/ops/StatusPageOperation.java @@ -0,0 +1,213 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.osd.ops; + +import java.io.BufferedReader; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.util.Date; + +import org.xtreemfs.common.TimeSync; +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.util.OutputUtils; +import org.xtreemfs.common.uuids.UUIDResolver; +import org.xtreemfs.foundation.pinky.HTTPUtils.DATA_TYPE; +import org.xtreemfs.foundation.pinky.PinkyRequest; +import org.xtreemfs.osd.OSDRequestDispatcher; +import org.xtreemfs.osd.OSDRequest; +import org.xtreemfs.osd.RequestDispatcher; +import org.xtreemfs.osd.stages.AuthenticationStage; +import org.xtreemfs.osd.stages.StatisticsStage; + +public final class StatusPageOperation extends Operation { + + protected final String statusPageTemplate; + + private enum Vars { + UUID(""), + MAXMEM(""), + FREEMEM(""), + AVAILPROCS(""), + BPSTATS(""), + PORT(""), + DIRURL(""), + DEBUG(""), + NUMCON(""), + PINKYQ(""), + PARSERQ(""), + AUTHQ(""), + STORAGEQ(""), + DELETIONQ(""), + OPENFILES(""), + OBJWRITE(""), + OBJREAD(""), + BYTETX(""), + BYTERX(""), + GMAXMSG(""), + GMAXRPC(""), + DELETES(""), + GLOBALTIME(""), + GLOBALRESYNC(""), + LOCALTIME(""), + LOCALRESYNC(""), + MEMSTAT(""), + UUIDCACHE(""), + STATCOLLECT(""), + DISKFREE(""); + + private String template; + + Vars(String template) { + this.template = template; + } + + public String toString() { + return template; + } + } + + public StatusPageOperation(OSDRequestDispatcher master) { + + super(master); + + StringBuffer sb = null; + try { + InputStream is = this.getClass().getClassLoader().getResourceAsStream( + "/org/xtreemfs/osd/templates/status.html"); + if (is == null) + is = this.getClass().getClassLoader().getResourceAsStream( + "org/xtreemfs/osd/templates/status.html"); + if (is == null) + is = this.getClass().getResourceAsStream("../templates/status.html"); + BufferedReader br = new BufferedReader(new InputStreamReader(is)); + sb = new StringBuffer(); + String line = br.readLine(); + while (line != null) { + sb.append(line + "\n"); + line = br.readLine(); + } + br.close(); + } catch (Exception ex) { + Logging.logMessage(Logging.LEVEL_DEBUG, this, ex); + } + if (sb == null) { + statusPageTemplate = "

Template was not found, unable to show status page!

"; + } else { + statusPageTemplate = sb.toString(); + } + } + + public void startRequest(OSDRequest rq) { + PinkyRequest pr = rq.getPinkyRequest(); + //if (pr.requestAuthentication("admin","yagga")) { + final String html = printStatusPage(); + rq.setData(ReusableBuffer.wrap(html.getBytes()), DATA_TYPE.HTML); + //} + finishRequest(rq); + } + + public String printStatusPage() { + + long globalTime = TimeSync.getGlobalTime(); + long localTime = TimeSync.getLocalSystemTime(); + + String tmp = statusPageTemplate.replace(Vars.AVAILPROCS.toString(), Runtime.getRuntime() + .availableProcessors() + + " bytes"); + tmp = tmp.replace(Vars.FREEMEM.toString(), Runtime.getRuntime().freeMemory() + " bytes"); + tmp = tmp.replace(Vars.MAXMEM.toString(), Runtime.getRuntime().maxMemory() + " bytes"); + tmp = tmp.replace(Vars.BPSTATS.toString(), BufferPool.getStatus()); + tmp = tmp.replace(Vars.UUID.toString(), master.getConfig().getUUID().toString()); + tmp = tmp.replace(Vars.PORT.toString(), Integer.toString(master.getConfig().getPort())); + tmp = tmp.replace(Vars.DIRURL.toString(), "http://" + + master.getConfig().getDirectoryService().getHostName() + ":" + + master.getConfig().getDirectoryService().getPort()); + tmp = tmp.replace(Vars.DEBUG.toString(), Integer.toString(master.getConfig() + .getDebugLevel())); + tmp = tmp.replace(Vars.NUMCON.toString(), Integer.toString(((OSDRequestDispatcher) master) + .getPinkyCons())); + tmp = tmp.replace(Vars.PINKYQ.toString(), Integer.toString(((OSDRequestDispatcher) master) + .getPinkyQueueLength())); + tmp = tmp.replace(Vars.PARSERQ.toString(), Integer.toString(master.getStage( + RequestDispatcher.Stages.PARSER).getQueueLength())); + tmp = tmp.replace(Vars.AUTHQ.toString(), Integer.toString(master.getStage( + RequestDispatcher.Stages.AUTH).getQueueLength())); + tmp = tmp.replace(Vars.STORAGEQ.toString(), Integer.toString(master.getStage( + RequestDispatcher.Stages.STORAGE).getQueueLength())); + tmp = tmp.replace(Vars.DELETIONQ.toString(), Integer.toString(master.getStage( + RequestDispatcher.Stages.DELETION).getQueueLength())); + tmp = tmp.replace(Vars.OPENFILES.toString(), Integer.toString(((AuthenticationStage) master + .getStage(RequestDispatcher.Stages.AUTH)).getNumOpenFiles())); + tmp = tmp.replace(Vars.OBJWRITE.toString(), master.getStatistics().numWrites.toString()); + tmp = tmp.replace(Vars.OBJREAD.toString(), master.getStatistics().numReads.toString()); + tmp = tmp.replace(Vars.BYTETX.toString(), OutputUtils + .formatBytes(master.getStatistics().bytesTX.get())); + tmp = tmp.replace(Vars.BYTERX.toString(), OutputUtils + .formatBytes(master.getStatistics().bytesRX.get())); + tmp = tmp.replace(Vars.GMAXMSG.toString(), master.getStatistics().numGmaxReceived + .toString()); + tmp = tmp.replace(Vars.GMAXRPC.toString(), master.getStatistics().numGmaxRPCs.toString()); + tmp = tmp + .replace(Vars.DELETES.toString(), Long.toString(master.getStatistics().numDeletes)); + tmp = tmp.replace(Vars.GLOBALTIME.toString(), new Date(globalTime).toString() + " (" + + globalTime + ")"); + tmp = tmp.replace(Vars.GLOBALRESYNC.toString(), Long.toString(TimeSync + .getTimeSyncInterval())); + tmp = tmp.replace(Vars.LOCALTIME.toString(), new Date(localTime).toString() + " (" + + localTime + ")"); + tmp = tmp.replace(Vars.LOCALRESYNC.toString(), Long.toString(TimeSync + .getLocalRenewInterval())); + tmp = tmp.replace(Vars.UUIDCACHE.toString(), UUIDResolver.getCache()); + tmp = tmp.replace(Vars.STATCOLLECT.toString(), "basic stats: "+(StatisticsStage.collect_statistics ? "enabled" : "disabled")+ + "
per stage request details: "+(StatisticsStage.measure_request_times ? "enabled" : "disabled")); + + long freeMem = Runtime.getRuntime().freeMemory(); + String span = ""; + if (freeMem < 1024 * 1024 * 32) { + span = ""; + } else if (freeMem < 1024 * 1024 * 2) { + span = ""; + } + tmp = tmp.replace(Vars.MEMSTAT.toString(), span + OutputUtils.formatBytes(freeMem) + " / " + + OutputUtils.formatBytes(Runtime.getRuntime().maxMemory()) + " / " + + OutputUtils.formatBytes(Runtime.getRuntime().totalMemory()) + ""); + + long freeDisk = ((OSDRequestDispatcher) master).getFreeSpace(); + + span = ""; + if (freeDisk < 1024 * 1024 * 1024 * 2) { + span = ""; + } else if (freeDisk < 1024 * 1024 * 512) { + span = ""; + } + tmp = tmp.replace(Vars.DISKFREE.toString(), span + OutputUtils.formatBytes(freeDisk) + + ""); + + return tmp; + } + +} diff --git a/servers/src/org/xtreemfs/osd/ops/TruncateLocalRPC.java b/servers/src/org/xtreemfs/osd/ops/TruncateLocalRPC.java new file mode 100644 index 0000000000000000000000000000000000000000..8842751d76a0a21a8e3f5bdce7f249aa244a91ea --- /dev/null +++ b/servers/src/org/xtreemfs/osd/ops/TruncateLocalRPC.java @@ -0,0 +1,115 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.osd.ops; + +import java.util.List; + +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.osd.ErrorCodes; +import org.xtreemfs.osd.ErrorRecord; +import org.xtreemfs.osd.OSDRequest; +import org.xtreemfs.osd.RequestDispatcher; +import org.xtreemfs.osd.ErrorRecord.ErrorClass; +import org.xtreemfs.osd.RequestDispatcher.Stages; +import org.xtreemfs.osd.stages.AuthenticationStage; +import org.xtreemfs.osd.stages.ParserStage; +import org.xtreemfs.osd.stages.StageCallbackInterface; +import org.xtreemfs.osd.stages.StorageThread; +import org.xtreemfs.osd.stages.Stage.StageResponseCode; + +public class TruncateLocalRPC extends Operation { + + public TruncateLocalRPC(RequestDispatcher master) { + super(master); + } + + /** + * Parses and inspects the JSON RPC arguments. + * + * @param rq + * the request + * @param arguments + * the JSON RPC arguments + * @return null if successful, error message otherwise + */ + @Override + public ErrorRecord parseRPCBody(OSDRequest rq, List arguments) { + try { + final String fileId = (String) arguments.get(0); + final long newFileSize = (Long) arguments.get(1); + + if (!ParserStage.validateFileId(fileId)) { + return new ErrorRecord(ErrorClass.USER_EXCEPTION, ErrorCodes.INVALID_FILEID, + "fileId in JSON body contains invalid characters"); + } + + rq.getDetails().setFileId(fileId); + rq.getDetails().setTruncateFileSize(newFileSize); + return null; + } catch (Exception ex) { + return new ErrorRecord(ErrorClass.USER_EXCEPTION, ErrorCodes.INVALID_RPC, + "body is not well-formatted or does not contain valid arguments", ex); + } + } + + @Override + public void startRequest(OSDRequest rq) { + + // use anoymous inner classes impl + master.getStage(Stages.AUTH).enqueueOperation(rq, + AuthenticationStage.STAGEOP_AUTHENTICATE | AuthenticationStage.STAGEOP_OFT_OPEN, + new StageCallbackInterface() { + + public void methodExecutionCompleted(OSDRequest request, StageResponseCode result) { + postAuthenticate(request, result); + } + }); + } + + protected void postAuthenticate(OSDRequest rq, StageResponseCode result) { + + if (result == StageResponseCode.OK) { + + master.getStage(Stages.STORAGE).enqueueOperation(rq, + StorageThread.STAGEOP_TRUNCATE_LOCAL, new StageCallbackInterface() { + + public void methodExecutionCompleted(OSDRequest request, StageResponseCode result) { + postTruncate(request, result); + } + }); + + } else { + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, "authentication failed for " + + rq.getRequestId()); + master.requestFinished(rq); + } + } + + private void postTruncate(OSDRequest request, StageResponseCode result) { + master.requestFinished(request); + } + +} diff --git a/servers/src/org/xtreemfs/osd/ops/TruncateRPC.java b/servers/src/org/xtreemfs/osd/ops/TruncateRPC.java new file mode 100644 index 0000000000000000000000000000000000000000..2afcfadd500cf2ee8abfd81c1a67e8e4da291d85 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/ops/TruncateRPC.java @@ -0,0 +1,115 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.osd.ops; + +import java.util.List; + +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.osd.ErrorCodes; +import org.xtreemfs.osd.ErrorRecord; +import org.xtreemfs.osd.OSDRequest; +import org.xtreemfs.osd.RequestDispatcher; +import org.xtreemfs.osd.ErrorRecord.ErrorClass; +import org.xtreemfs.osd.RequestDispatcher.Stages; +import org.xtreemfs.osd.stages.AuthenticationStage; +import org.xtreemfs.osd.stages.ParserStage; +import org.xtreemfs.osd.stages.StageCallbackInterface; +import org.xtreemfs.osd.stages.StorageThread; +import org.xtreemfs.osd.stages.Stage.StageResponseCode; + +public class TruncateRPC extends Operation { + + public TruncateRPC(RequestDispatcher master) { + super(master); + } + + /** + * Parses and inspects the JSON RPC arguments. + * + * @param rq + * the request + * @param arguments + * the JSON RPC arguments + * @return null if successful, error message otherwise + */ + @Override + public ErrorRecord parseRPCBody(OSDRequest rq, List arguments) { + try { + final String fileId = (String) arguments.get(0); + final long newFileSize = (Long) arguments.get(1); + + if (!ParserStage.validateFileId(fileId)) { + return new ErrorRecord(ErrorClass.USER_EXCEPTION, ErrorCodes.INVALID_FILEID, + "fileId in JSON body contains invalid characters"); + } + + rq.getDetails().setFileId(fileId); + rq.getDetails().setTruncateFileSize(newFileSize); + return null; + } catch (Exception ex) { + return new ErrorRecord(ErrorClass.USER_EXCEPTION, ErrorCodes.INVALID_RPC, + "body is not well-formatted or does not contain valid arguments", ex); + } + } + + @Override + public void startRequest(OSDRequest rq) { + + // use anoymous inner classes impl + master.getStage(Stages.AUTH).enqueueOperation(rq, + AuthenticationStage.STAGEOP_AUTHENTICATE | AuthenticationStage.STAGEOP_OFT_OPEN, + new StageCallbackInterface() { + + public void methodExecutionCompleted(OSDRequest request, StageResponseCode result) { + postAuthenticate(request, result); + } + }); + } + + protected void postAuthenticate(OSDRequest rq, StageResponseCode result) { + + if (result == StageResponseCode.OK) { + + master.getStage(Stages.STORAGE).enqueueOperation(rq, StorageThread.STAGEOP_TRUNCATE, + new StageCallbackInterface() { + + public void methodExecutionCompleted(OSDRequest request, StageResponseCode result) { + postTruncate(request, result); + } + }); + + } else { + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, "authentication failed for " + + rq.getRequestId()); + master.requestFinished(rq); + } + } + + private void postTruncate(OSDRequest request, StageResponseCode result) { + master.requestFinished(request); + } + +} diff --git a/servers/src/org/xtreemfs/osd/ops/WriteOperation.java b/servers/src/org/xtreemfs/osd/ops/WriteOperation.java new file mode 100644 index 0000000000000000000000000000000000000000..10d789dff3ad7943ba5792e1167b6f5cd4b52ab3 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/ops/WriteOperation.java @@ -0,0 +1,103 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.osd.ops; + +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.osd.OSDRequest; +import org.xtreemfs.osd.RequestDispatcher; +import org.xtreemfs.osd.RequestDispatcher.Stages; +import org.xtreemfs.osd.stages.AuthenticationStage; +import org.xtreemfs.osd.stages.StageCallbackInterface; +import org.xtreemfs.osd.stages.StorageThread; +import org.xtreemfs.osd.stages.Stage.StageResponseCode; + +public final class WriteOperation extends Operation { + + public WriteOperation(RequestDispatcher master) { + super(master); + } + + /** + * Start the operation for a request + * + * @param rq + * the request + */ + @Override + public void startRequest(OSDRequest rq) { + + // use anoymous inner classes impl + master.getStage(Stages.AUTH).enqueueOperation( + rq, + AuthenticationStage.STAGEOP_AUTHENTICATE + | AuthenticationStage.STAGEOP_OFT_OPEN, + new StageCallbackInterface() { + + public void methodExecutionCompleted(OSDRequest request, + StageResponseCode result) { + postAuthenticate(request, result); + } + }); + } + + /** + * called after the authentication stage has processed the request + * + * @param rq + * the request + * @param result + * authentication stage result + * @param error + * error details or null + */ + protected void postAuthenticate(OSDRequest rq, StageResponseCode result) { + + if (result == StageResponseCode.OK) { + + assert(rq.getDetails().getCowPolicy() != null); + + master.getStage(Stages.STORAGE).enqueueOperation(rq, + StorageThread.STAGEOP_WRITE_OBJECT, + new StageCallbackInterface() { + + public void methodExecutionCompleted(OSDRequest request, + StageResponseCode result) { + postWrite(request, result); + } + }); + + } else { + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, + "authentication failed for " + rq.getRequestId()); + master.requestFinished(rq); + } + } + + private void postWrite(OSDRequest request, StageResponseCode result) { + master.requestFinished(request); + } + +} \ No newline at end of file diff --git a/servers/src/org/xtreemfs/osd/replication/ObjectDissemination.java b/servers/src/org/xtreemfs/osd/replication/ObjectDissemination.java new file mode 100644 index 0000000000000000000000000000000000000000..3044467458be7838a1e8bbedb5b8bb2f81c567f9 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/replication/ObjectDissemination.java @@ -0,0 +1,156 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Christian Lorenz (ZIB) + */ +package org.xtreemfs.osd.replication; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.HashMap; + +import org.xtreemfs.foundation.speedy.SpeedyRequest; +import org.xtreemfs.foundation.speedy.SpeedyResponseListener; +import org.xtreemfs.osd.OSDRequest; +import org.xtreemfs.osd.RequestDetails; +import org.xtreemfs.osd.RequestDispatcher; +import org.xtreemfs.osd.RequestDispatcher.Operations; +import org.xtreemfs.osd.replication.TransferStrategy.NextRequest; +import org.xtreemfs.osd.stages.ReplicationStage; + +/** + * + * 15.09.2008 + * + * @author clorenz + */ +public class ObjectDissemination { + private ReplicationStage stage; + + private RequestDispatcher master; + + int id = 100; // FIXME: testcode + + /** + * files which are downloaded, maybe in background (without an actual + * request) key: fileID + */ + private HashMap filesInProgress; + + /** + * key: fileID:objectID + */ + private HashMap waitingOSDRequests; + + public ObjectDissemination(ReplicationStage stage, RequestDispatcher master) { + this.stage = stage; + this.master = master; + + this.filesInProgress = new HashMap(); + this.waitingOSDRequests = new HashMap(); + } + + public void fetchObject(OSDRequest rq) { + RequestDetails rqDetails = rq.getDetails(); + TransferStrategy strategy = this.filesInProgress.get(rqDetails + .getFileId()); + if (strategy == null) { + // file not in progress, so create a new strategy + strategy = new SimpleStrategy(rqDetails); + // keep strategy in mind + this.filesInProgress.put(rqDetails.getFileId(), strategy); + } + rqDetails.setReplicationTransfer(strategy); + + rqDetails.getReplicationTransfer().addRequiredObject( + rqDetails.getObjectNumber()); + rqDetails.getReplicationTransfer().addPreferredObject( + rqDetails.getObjectNumber()); + + // generate new WriteReplica Request + OSDRequest newRq = generateWriteReplicaRequest(rqDetails); + waitingOSDRequests.put(rqDetails.getFileId() + ":" + + rqDetails.getObjectNumber(), rq); + prepareRequest(newRq); + + // start new request + newRq.getOperation().startRequest(newRq); + } + + private OSDRequest generateWriteReplicaRequest(RequestDetails details) { + OSDRequest newRq = new OSDRequest(id++); + newRq.setOperation(master.getOperation(Operations.FETCH_AND_WRITE_REPLICA)); + // TODO: set options for request + newRq.getDetails().setFileId(details.getFileId()); + newRq.getDetails().setReplicationTransfer( + details.getReplicationTransfer()); + return newRq; + } + + public void sendFetchObjectRequest(OSDRequest rq) { + // TODO: set needed options from infos in RequestDetails + SpeedyRequest speedyRq = new SpeedyRequest("read", "testhost", "", ""); + speedyRq.registerListener(new SpeedyResponseListener() { + public void receiveRequest(SpeedyRequest theRequest) { + // TODO + OSDRequest originalRq = (OSDRequest) theRequest + .getOriginalRequest(); + stage.enqueueOperation(originalRq, + ReplicationStage.STAGEOP_INTERNAL_OBJECT_FETCHED, + originalRq.getCurrentCallback()); + } + }); + try { + master.sendSpeedyRequest(rq, speedyRq, new InetSocketAddress(0)); + } catch (IOException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + } + + private void prepareRequest(OSDRequest rq) { + NextRequest next = rq.getDetails().getReplicationTransfer() + .selectNext(); + + rq.getDetails().setObjectNumber(next.objectID); + + // if a original OSDRequest is waiting for this object, attach it to the + // request, which should fetch it + if (this.waitingOSDRequests.containsKey(rq.getDetails().getFileId() + + ":" + rq.getDetails().getObjectNumber())) { + rq.setOriginalOsdRequest(this.waitingOSDRequests.remove(rq + .getDetails().getFileId() + + ":" + rq.getDetails().getObjectNumber())); + } + + // TODO: set options for request + } + + public void prepareRequests(OSDRequest rq) { + // maybe start new requests + for (int i = 0; i < 4 && this.id < 110; i++) { + OSDRequest newRq = generateWriteReplicaRequest(rq.getDetails()); + prepareRequest(newRq); + // start new request + newRq.getOperation().startRequest(newRq); + } + } +} diff --git a/servers/src/org/xtreemfs/osd/replication/RandomStrategy.java b/servers/src/org/xtreemfs/osd/replication/RandomStrategy.java new file mode 100644 index 0000000000000000000000000000000000000000..acf301b9909894ee8ab2845c0261a9f65f87ddb7 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/replication/RandomStrategy.java @@ -0,0 +1,78 @@ +/* Copyright (c) 2008 Barcelona Supercomputing Center - Centro Nacional + de Supercomputacion and Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Christian Lorenz (ZIB) + */ +package org.xtreemfs.osd.replication; + +import java.util.List; +import java.util.Random; + +import org.xtreemfs.common.uuids.ServiceUUID; +import org.xtreemfs.osd.RequestDetails; +import org.xtreemfs.osd.replication.TransferStrategy.NextRequest; + +/** + * A simple strategy, which selects objects and replicas randomly. + * + * 08.12.2008 + * + * @author clorenz + */ +public class RandomStrategy extends TransferStrategy { + private Random random; + + /** + * @param rqDetails + */ + public RandomStrategy(RequestDetails rqDetails) { + super(rqDetails); + this.random = new Random(); + } + + @Override + public NextRequest selectNext() { + NextRequest next = new NextRequest(); + // first fetch a preferred object + if (!this.preferredObjects.isEmpty()) { + next.objectID = this.preferredObjects.remove(random.nextInt() + % this.preferredObjects.size()); + } else { // fetch an object + if (!this.requiredObjects.isEmpty()) { + next.objectID = this.requiredObjects.remove(random.nextInt() + % this.requiredObjects.size()); + } else + return null; + } + // use random OSD + List osds = this.details.locationList + .getOSDsByObject(next.objectID); + if (!osds.isEmpty()) { + next.osd = osds.get(random.nextInt() % osds.size()); + } else + return null; + + next.requestObjectList = false; + return next; + } + +} diff --git a/servers/src/org/xtreemfs/osd/replication/SimpleStrategy.java b/servers/src/org/xtreemfs/osd/replication/SimpleStrategy.java new file mode 100644 index 0000000000000000000000000000000000000000..cd5ace7873b554785538f66739b426030185f11c --- /dev/null +++ b/servers/src/org/xtreemfs/osd/replication/SimpleStrategy.java @@ -0,0 +1,73 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Christian Lorenz (ZIB) + */ +package org.xtreemfs.osd.replication; + +import java.util.List; + +import org.xtreemfs.common.uuids.ServiceUUID; +import org.xtreemfs.osd.RequestDetails; + +/** + * A simple transfer strategy, which fetches the next object and iterates + * sequentially through the replicas (like Round-Robin). + * + * 13.10.2008 + * + * @author clorenz + */ +public class SimpleStrategy extends TransferStrategy { + private int indexOfLastUsedOSD = -1; + + /** + * @param rqDetails + */ + public SimpleStrategy(RequestDetails rqDetails) { + super(rqDetails); + } + + @Override + public NextRequest selectNext() { + NextRequest next = new NextRequest(); + // first fetch a preferred object + if (!this.preferredObjects.isEmpty()) { + next.objectID = this.preferredObjects.remove(0); + } else { // fetch an object + if (!this.requiredObjects.isEmpty()) { + next.objectID = this.requiredObjects.remove(0); + } else + return null; + } + // use the next replica relative to the last used replica + List osds = this.details.locationList + .getOSDsByObject(next.objectID); + if (!osds.isEmpty()) { + this.indexOfLastUsedOSD = indexOfLastUsedOSD++ % osds.size(); + next.osd = osds.get(this.indexOfLastUsedOSD); + } else + return null; + + next.requestObjectList = false; + return next; + } +} diff --git a/servers/src/org/xtreemfs/osd/replication/TransferStrategy.java b/servers/src/org/xtreemfs/osd/replication/TransferStrategy.java new file mode 100644 index 0000000000000000000000000000000000000000..9ee2662e847eec91f45ba75c8d32a78094a761a5 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/replication/TransferStrategy.java @@ -0,0 +1,148 @@ +package org.xtreemfs.osd.replication; + +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Christian Lorenz (ZIB) + */ + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; + +import org.xtreemfs.common.Capability; +import org.xtreemfs.common.striping.Location; +import org.xtreemfs.common.striping.Locations; +import org.xtreemfs.common.uuids.ServiceUUID; +import org.xtreemfs.osd.RequestDetails; + +/** + * This class provides the basic functionality needed by the different transfer + * strategies. + * + * 09.09.2008 + * + * @author clorenz + */ +public abstract class TransferStrategy { + protected class ReplicationDetails { + String fileId; + Capability capability; + Locations locationList; + Location currentReplica; + + ReplicationDetails(String fileId, Capability capability, + Locations locationList, Location currentReplica) { + super(); + this.fileId = fileId; + this.capability = capability; + this.locationList = locationList; + this.currentReplica = currentReplica; + } + } + + public class NextRequest { + ServiceUUID osd; + long objectID; + boolean requestObjectList; + } + + protected ReplicationDetails details; + + protected ArrayList requiredObjects; // maybe additionally current + + protected ArrayList preferredObjects; // requested objects + + protected HashMap> aviableObjectsOnOSD; + protected HashMap> aviableOSDsForObject; + + /** + * @param rqDetails + */ + protected TransferStrategy(RequestDetails rqDetails) { + super(); + this.details = new ReplicationDetails(rqDetails.getFileId(), rqDetails + .getCapability(), rqDetails.getLocationList(), rqDetails + .getCurrentReplica()); + this.requiredObjects = new ArrayList(); + this.preferredObjects = new ArrayList(); + this.aviableObjectsOnOSD = new HashMap>(); + this.aviableOSDsForObject = new HashMap>(); + } + + /** + * + * @return null, if no object to fetch exists + */ + public abstract NextRequest selectNext(); + + /** + * @param e + * @return + * @see java.util.ArrayList#add(java.lang.Object) + */ + public boolean addRequiredObject(long objectID) { + return this.requiredObjects.add(new Long(objectID)); + } + + /** + * @param o + * @return + * @see java.util.ArrayList#remove(java.lang.Object) + */ + public boolean removeRequiredObject(long objectID) { + return this.requiredObjects.remove(new Long(objectID)); + } + + /** + * @return + * @see java.util.ArrayList#size() + */ + public int getRequiredObjectsCount() { + return this.requiredObjects.size(); + } + + /** + * @param e + * @return + * @see java.util.ArrayList#add(java.lang.Object) + */ + public boolean addPreferredObject(long objectID) { + return this.preferredObjects.add(new Long(objectID)); + } + + /** + * @param o + * @return + * @see java.util.ArrayList#remove(java.lang.Object) + */ + public boolean removePreferredObject(long objectID) { + return this.preferredObjects.remove(new Long(objectID)); + } + + /** + * @return + * @see java.util.ArrayList#size() + */ + public int getPreferredObjectsCount() { + return this.preferredObjects.size(); + } +} diff --git a/servers/src/org/xtreemfs/osd/stages/AuthenticationStage.java b/servers/src/org/xtreemfs/osd/stages/AuthenticationStage.java new file mode 100644 index 0000000000000000000000000000000000000000..6b012c86ce386ff85915664d10d871796cd3e319 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/stages/AuthenticationStage.java @@ -0,0 +1,448 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.osd.stages; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import org.xtreemfs.common.Capability; +import org.xtreemfs.common.Request; +import org.xtreemfs.common.TimeSync; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.clients.osd.ConcurrentFileMap; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.pinky.HTTPHeaders; +import org.xtreemfs.foundation.pinky.HTTPUtils; +import org.xtreemfs.foundation.pinky.HTTPUtils.DATA_TYPE; +import org.xtreemfs.common.ClientLease; +import org.xtreemfs.osd.ErrorCodes; +import org.xtreemfs.osd.ErrorRecord; +import org.xtreemfs.osd.ErrorRecord.ErrorClass; +import org.xtreemfs.osd.OpenFileTable; +import org.xtreemfs.osd.OSDRequest; +import org.xtreemfs.osd.RequestDetails; +import org.xtreemfs.osd.RequestDispatcher; +import org.xtreemfs.osd.ops.Operation; +import org.xtreemfs.osd.storage.CowPolicy; + +public class AuthenticationStage extends Stage { + + public final static int STAGEOP_AUTHENTICATE = 1 << 0; + + public final static int STAGEOP_OFT_OPEN = 1 << 1; + + public final static int STAGEOP_OFT_DELETE = 1 << 2; + + public final static int STAGEOP_ACQUIRE_LEASE = 1 << 4; + + public final static int STAGEOP_RETURN_LEASE = 1 << 5; + + public final static int STAGEOP_VERIFIY_CLEANUP = 1 << 6; + + private final static long OFT_CLEAN_INTERVAL = 1000 * 60; + + private final static long OFT_OPEN_EXTENSION = 1000 * 30; + + private final Map> capCache; + + private final OpenFileTable oft; + + // time left to next clean op + private long timeToNextOFTclean; + + // last check of the OFT + private long lastOFTcheck; + + private final Operation closeOp; + + /** Creates a new instance of AuthenticationStage */ + public AuthenticationStage(RequestDispatcher master) { + + super("OSD Auth Stage"); + + capCache = new HashMap>(); + oft = new OpenFileTable(); + closeOp = master.getOperation(RequestDispatcher.Operations.CLOSE_FILE); + } + + @Override + public void run() { + + notifyStarted(); + + // interval to check the OFT + + timeToNextOFTclean = OFT_CLEAN_INTERVAL; + lastOFTcheck = TimeSync.getLocalSystemTime(); + + while (!quit) { + Request rq = null; + try { + final StageMethod op = q.poll(timeToNextOFTclean, TimeUnit.MILLISECONDS); + + checkOpenFileTable(); + + if (op == null) { + // Logging.logMessage(Logging.LEVEL_DEBUG,this,"no request + // -- timer only"); + continue; + } + + rq = op.getRq(); + + if (Logging.tracingEnabled()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, "processing request #" + + rq.getRequestId()); + + processMethod(op); + + } catch (InterruptedException ex) { + break; + } catch (Exception ex) { + if (rq != null) + Logging.logMessage(Logging.LEVEL_DEBUG, this, + "exception occurred while processing:" + rq); + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + notifyCrashed(ex); + break; + } + } + + notifyStopped(); + } + + private void checkOpenFileTable() { + final long tPassed = TimeSync.getLocalSystemTime() - lastOFTcheck; + timeToNextOFTclean = timeToNextOFTclean - tPassed; + // Logging.logMessage(Logging.LEVEL_DEBUG,this,"time to next OFT: + // "+timeToNextOFTclean); + if (timeToNextOFTclean <= 0) { + Logging.logMessage(Logging.LEVEL_DEBUG, this, "OpenFileTable clean"); + // do OFT clean + List closedFiles = oft.clean(TimeSync + .getLocalSystemTime()); + // Logging.logMessage(Logging.LEVEL_DEBUG,this,"closing + // "+closedFiles.size()+" files"); + for (OpenFileTable.OpenFileTableEntry entry : closedFiles) { + + Logging.logMessage(Logging.LEVEL_DEBUG, this, "send internal close event for " + + entry.getFileId() + ", deleteOnClose=" + entry.isDeleteOnClose()); + capCache.remove(entry.getFileId()); + OSDRequest closeEvent = new OSDRequest(0); + closeEvent.getDetails().setFileId(entry.getFileId()); + closeEvent.setOperation(closeOp); + closeEvent.setAttachment(Boolean.valueOf(entry.isDeleteOnClose())); + closeOp.startRequest(closeEvent); + } + timeToNextOFTclean = OFT_CLEAN_INTERVAL; + } + lastOFTcheck = TimeSync.getLocalSystemTime(); + } + + protected void processMethod(StageMethod m) { + + final int requestedMethod = m.getStageMethod(); + + if ((requestedMethod & STAGEOP_AUTHENTICATE) != 0) { + //for quicker responses + Logging.logMessage(Logging.LEVEL_DEBUG, this,"STAGEOP AUTH"); + processAuthenticate(m); + } + + if ((requestedMethod & STAGEOP_OFT_OPEN) != 0) { + Logging.logMessage(Logging.LEVEL_DEBUG, this,"STAGEOP OPEN"); + final String fId = m.getRq().getDetails().getFileId(); + CowPolicy cowPolicy; + if (oft.contains(fId)) { + cowPolicy = oft.refresh(fId, TimeSync.getLocalSystemTime() + + OFT_OPEN_EXTENSION); + } else { + //find out which COW mode to use + //currently everything is no COW + oft.openFile(fId, TimeSync.getLocalSystemTime() + + OFT_OPEN_EXTENSION,CowPolicy.PolicyNoCow); + cowPolicy = CowPolicy.PolicyNoCow; + } + m.getRq().getDetails().setCowPolicy(cowPolicy); + } + + if ((requestedMethod & STAGEOP_OFT_DELETE) != 0) { + processOFTDelete(m); + } + + if (requestedMethod == STAGEOP_ACQUIRE_LEASE) { + processAcquireLease(m); + } + + if (requestedMethod == STAGEOP_RETURN_LEASE) { + processReturnLease(m); + } + + if (requestedMethod == STAGEOP_VERIFIY_CLEANUP) { + processVerifyCleanup(m); + } + + this.methodExecutionSuccess(m, Stage.StageResponseCode.OK); + } + + /** + * Checks a list of fileIDs in the attachment of the request against the + * open-file-table. + * + * @param m + * @throws JSONException + */ + private void processVerifyCleanup(StageMethod m) { + // check against the o-f-t + ConcurrentFileMap fileList = (ConcurrentFileMap) m.getRq().getAttachment(); + for (String volId : fileList.resolvedVolumeIDSet()) { + for (String fileId : fileList.getFileIDSet(volId)){ + if (oft.isDeleteOnClose(fileId)) + fileList.remove(volId,fileId); + } + } + + Logging.logMessage(Logging.LEVEL_TRACE, this, "CleanUp: all done sending back to client!"); + + // return the files to the application + try { + if (!fileList.isEmpty()){ + m.getRq().setData( + ReusableBuffer.wrap(JSONParser.writeJSON(fileList.getJSONCompatible()).getBytes()), + DATA_TYPE.JSON); + Logging.logMessage(Logging.LEVEL_INFO, this, "\nThere are '" + fileList.size() + + "' Zombies on this OSD."); + methodExecutionSuccess(m, StageResponseCode.FINISH); + }else{ + Logging.logMessage(Logging.LEVEL_INFO, this, "\nThere are no Zombies on this OSD."); + methodExecutionSuccess(m, StageResponseCode.FINISH); + } + } catch (JSONException e) { + methodExecutionSuccess(m, StageResponseCode.FAILED); + } + } + + private void processAuthenticate(StageMethod m) { + + final OSDRequest rq = m.getRq(); + final RequestDetails rqDetails = rq.getDetails(); + final Capability rqCap = rqDetails.getCapability(); + + if (rqCap == null) { + // The request does not need a capability (if so, the ParserStage + // would have added it) + + // @todo FIXME server/client authorization + + Logging.logMessage(Logging.LEVEL_WARN, this, + "Request without capability requirements were authenticated."); + // rq.tAuth = System.currentTimeMillis(); + } else { + + try { + + boolean isValid = false; + // look in capCache + Set cachedCaps = capCache.get(rqCap.getFileId()); + if (cachedCaps != null) { + if (cachedCaps.contains(rqCap.getSignature())) { + isValid = true; + } + } + + if (!isValid) { + isValid = rqCap.isValid(); + if (isValid) { + // add to cache + if (cachedCaps == null) { + cachedCaps = new HashSet(); + capCache.put(rqCap.getFileId(), cachedCaps); + } + cachedCaps.add(rqCap.getSignature()); + } + } + + // depending on the result the event listener is sent + if (isValid) { + // rq.tAuth = System.currentTimeMillis(); + // requestAuthenticated(rq, HTTPUtils.SC_OKAY, null); + return; + } else { + // rq.tAuth = System.currentTimeMillis(); + this.methodExecutionFailed(m, new ErrorRecord(ErrorClass.USER_EXCEPTION, + ErrorCodes.AUTH_FAILED, "invalid capability")); + } + + } catch (ClassCastException ex) { + // invalid capability string + // rq.tAuth = System.currentTimeMillis(); + Logging.logMessage(Logging.LEVEL_DEBUG, this, + "X-Capability header is not valid JSON: " + ex.getMessage()); + this.methodExecutionFailed(m, new ErrorRecord( + ErrorRecord.ErrorClass.USER_EXCEPTION, ErrorCodes.INVALID_HEADER, + HTTPHeaders.HDR_XCAPABILITY + " is not valid JSON", ex)); + } + } + + } + + private void processAcquireLease(StageMethod m) { + ClientLease l = m.getRq().getDetails().getLease(); + + if (l == null) { + this.methodExecutionFailed(m, new ErrorRecord(ErrorClass.BAD_REQUEST, + ErrorCodes.INVALID_RPC, "expected a lease object")); + } + if ((l.getFileId() == null) || (l.getFileId().length() == 0)) { + this.methodExecutionFailed(m, new ErrorRecord(ErrorClass.BAD_REQUEST, + ErrorCodes.INVALID_RPC, "fileId is required")); + } + final List leases = oft.getLeases(l.getFileId()); + + final long leaseId = l.getSequenceNo(); + + Iterator iter = leases.iterator(); + while (iter.hasNext()) { + final ClientLease other = iter.next(); + if (other.getExpires() < TimeSync.getGlobalTime()) + iter.remove(); + if (other.getSequenceNo() == leaseId) { + Logging.logMessage(Logging.LEVEL_DEBUG, this, "known lease Id"); + // renew lease + if (other.getClientId().equals(l.getClientId())) { + Logging.logMessage(Logging.LEVEL_DEBUG, this, "renew"); + l = other; + l.setExpires(TimeSync.getGlobalTime() + ClientLease.LEASE_VALIDITY); + sendLease(m, l, false); + return; + } else { + Logging.logMessage(Logging.LEVEL_DEBUG, this, "not owner: " + + other.getClientId()); + methodExecutionFailed(m, new ErrorRecord(ErrorClass.USER_EXCEPTION, + ErrorCodes.NOT_LEASE_OWNER, "only the owner can renew a lease (owner is " + + other.getClientId() + ")")); + return; + } + } else { + if (l.isConflicting(other)) { + Logging.logMessage(Logging.LEVEL_DEBUG, this, "conflicting"); + // lease cannot be granted, other client has lease + sendLease(m, other, true); + return; + } + } + } + // everything is fine, grant lease + l.setSequenceNo(TimeSync.getGlobalTime()); + l.setExpires(TimeSync.getGlobalTime() + ClientLease.LEASE_VALIDITY); + leases.add(l); + sendLease(m, l, false); + + } + + private void processReturnLease(StageMethod m) { + ClientLease l = m.getRq().getDetails().getLease(); + + if (l == null) { + this.methodExecutionFailed(m, new ErrorRecord(ErrorClass.BAD_REQUEST, + ErrorCodes.INVALID_RPC, "expected a lease object")); + } + if ((l.getFileId() == null) || (l.getFileId().length() == 0)) { + this.methodExecutionFailed(m, new ErrorRecord(ErrorClass.BAD_REQUEST, + ErrorCodes.INVALID_RPC, "fileId is required")); + } + final List leases = oft.getLeases(l.getFileId()); + + final long leaseId = l.getSequenceNo(); + + Iterator iter = leases.iterator(); + while (iter.hasNext()) { + final ClientLease other = iter.next(); + if (other.getExpires() < TimeSync.getGlobalTime()) + iter.remove(); + if (other.getSequenceNo() == leaseId) { + if (other.getClientId().equals(l.getClientId())) { + iter.remove(); + } else { + Logging.logMessage(Logging.LEVEL_DEBUG, this, "not owner: " + + other.getClientId()); + methodExecutionFailed(m, new ErrorRecord(ErrorClass.USER_EXCEPTION, + ErrorCodes.NOT_LEASE_OWNER, "only the owner can renew a lease (owner is " + + other.getClientId() + ")")); + return; + } + } + } + methodExecutionSuccess(m, StageResponseCode.FINISH); + } + + private void sendLease(StageMethod m, ClientLease lease, boolean failed) { + try { + ClientLease tmp = lease; + if (failed) { + tmp = lease.clone(); + tmp.setClientId(null); + tmp.setSequenceNo(0); + } + List rv = new ArrayList(1); + rv.add(tmp.encodeAsMap()); + final String result = JSONParser.writeJSON(rv); + Logging.logMessage(Logging.LEVEL_DEBUG, this, "Lease: " + result); + m.getRq().setData(ReusableBuffer.wrap(result.getBytes(HTTPUtils.ENC_UTF8)), + HTTPUtils.DATA_TYPE.JSON); + methodExecutionSuccess(m, StageResponseCode.FINISH); + } catch (JSONException ex) { + this.methodExecutionFailed(m, new ErrorRecord( + ErrorRecord.ErrorClass.INTERNAL_SERVER_ERROR, "cannot encode Lease object", ex)); + } + } + + private void processOFTDelete(StageMethod m) { + + final boolean deleteOnClose = oft.contains(m.getRq().getDetails().getFileId()); + + // if the file is still open, mark it for a deletion on close + if (deleteOnClose) + oft.setDeleteOnClose(m.getRq().getDetails().getFileId()); + + // set a flag for the Deletion Stage that indicates whether the + // deletion has to be deferred + ((Request) m.getRq().getAttachment()).setAttachment(deleteOnClose); + + this.methodExecutionSuccess(m, Stage.StageResponseCode.OK); + } + + public int getNumOpenFiles() { + return oft.getNumOpenFiles(); + } +} diff --git a/servers/src/org/xtreemfs/osd/stages/DeletionStage.java b/servers/src/org/xtreemfs/osd/stages/DeletionStage.java new file mode 100644 index 0000000000000000000000000000000000000000..9e459fafad94c2992bc0e3fc77f6eaa07afed89d --- /dev/null +++ b/servers/src/org/xtreemfs/osd/stages/DeletionStage.java @@ -0,0 +1,280 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.osd.stages; + +import java.io.IOException; +import java.util.List; + +import java.util.concurrent.LinkedBlockingQueue; +import java.util.logging.Level; +import java.util.logging.Logger; + +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.striping.Location; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.speedy.SpeedyRequest; +import org.xtreemfs.foundation.speedy.SpeedyResponseListener; +import org.xtreemfs.osd.ErrorRecord; +import org.xtreemfs.osd.OSDException; +import org.xtreemfs.osd.OSDRequest; +import org.xtreemfs.osd.RequestDispatcher; +import org.xtreemfs.osd.ErrorRecord.ErrorClass; +import org.xtreemfs.osd.ops.Operation; +import org.xtreemfs.osd.storage.MetadataCache; +import org.xtreemfs.osd.storage.StorageLayout; +import org.xtreemfs.osd.storage.Striping; +import org.xtreemfs.osd.storage.Striping.RPCMessage; + +public class DeletionStage extends Stage { + + public static final int STAGEOP_DELETE_FILE = 0; + + public static final int STAGEOP_CHECK_OPEN_STATE = 1; + + public static final int STAGEOP_DELETE_OBJECTS = 2; + + private MetadataCache cache; + + private StorageLayout layout; + + private Striping striping; + + private RequestDispatcher master; + + private DeleteThread deletor; + + public DeletionStage(RequestDispatcher master, Striping striping, MetadataCache cache, + StorageLayout layout) { + + super("OSD Deletion Stage"); + + this.master = master; + this.striping = striping; + this.cache = cache; + this.layout = layout; + + deletor = new DeleteThread(layout); + } + + public void start() { + super.start(); + deletor.start(); + deletor.setPriority(MIN_PRIORITY); + } + + public void shutdown() { + super.shutdown(); + deletor.shutdown(); + } + + @Override + protected void processMethod(StageMethod method) { + + try { + switch (method.getStageMethod()) { + case STAGEOP_DELETE_FILE: + processDisseminateRequests(method); + break; + case STAGEOP_CHECK_OPEN_STATE: + processCheckOpenState(method); + break; + case STAGEOP_DELETE_OBJECTS: + processDeleteObjects(method); + methodExecutionSuccess(method, StageResponseCode.OK); + break; + } + + } catch (OSDException exc) { + methodExecutionFailed(method, exc.getErrorRecord()); + } catch (Exception exc) { + Logging.logMessage(Logging.LEVEL_ERROR, this,exc); + methodExecutionFailed(method, new ErrorRecord( + ErrorRecord.ErrorClass.INTERNAL_SERVER_ERROR, "an error has occurred", exc)); + } + } + + private boolean processDisseminateRequests(StageMethod rq) throws IOException, JSONException { + + final Location currentLoc = rq.getRq().getDetails().getCurrentReplica(); + + // for the sake of robustness, check if contacted OSD is head OSD and + // redirect if necessary + if (!master.isHeadOSD(currentLoc)) { + throw new OSDException(ErrorClass.REDIRECT, currentLoc.getOSDs().get(0).toString()); + } + + if (currentLoc.getWidth() == 1) { + // if no dissemination of delete requests is necessary, immediately + // proceed with next step + processCheckOpenState(rq); + return true; + + } else { + // if requests need to be disseminated, send RPCs to all remote OSDs + sendDeleteRequests(rq); + + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, "sent delete requests for file " + + rq.getRq().getDetails().getFileId() + " to remote OSDs in " + + currentLoc.getOSDs() + ", local OSD is " + master.getConfig().getUUID()); + } + + return false; + } + + private void processCheckOpenState(StageMethod rq) throws IOException, JSONException { + + String fileId = rq.getRq().getDetails().getFileId(); + + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, "preparing deletion of file " + fileId + + ": sending request to delete file from open file table"); + + // send an internal RPC to the Authentication Stage to initiate the + // file deletion in the open file table and to find out whether the + // object deletion has to be deferred due to a valid open state + final Operation deleteOftRPC = master.getOperation(RequestDispatcher.Operations.OFT_DELETE); + final OSDRequest rpc = new OSDRequest(-1); + rpc.getDetails().setFileId(fileId); + rpc.setOperation(deleteOftRPC); + rpc.setAttachment(rq.getRq()); + deleteOftRPC.startRequest(rpc); + } + + private void processDeleteObjects(StageMethod rq) throws IOException { + + final String fileId = rq.getRq().getDetails().getFileId(); + + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, + "received request for deleting objects of file " + fileId); + + // do not perform an immediate deletion if "delete on close" is set + if (rq.getRq().getAttachment() != null && (Boolean) rq.getRq().getAttachment()) { + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, "file " + fileId + + " is still open, object deletion will be deferred"); + return; + } + + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, "deleting objects of file " + fileId); + + // remove the file info from the storage cache + cache.removeFileInfo(fileId); + + // otherwise, remove all local objects + if (layout.fileExists(fileId)) + deletor.enqueueFileForDeletion(fileId); + else + throw new OSDException(ErrorClass.USER_EXCEPTION, "file " + fileId + "not found"); + } + + private void sendDeleteRequests(StageMethod rq) throws IOException, JSONException { + + List deleteReqs = striping.createDeleteRequests(rq.getRq().getDetails()); + + final StageMethod req = rq; + + SpeedyRequest[] reqs = new SpeedyRequest[deleteReqs.size()]; + int i = 0; + for (RPCMessage msg : deleteReqs) { + SpeedyRequest sr = msg.req; + sr.listener = new SpeedyResponseListener() { + + public void receiveRequest(SpeedyRequest theRequest) { + + theRequest.freeBuffer(); + + // count received responses + OSDRequest osdReq = (OSDRequest) theRequest.getOriginalRequest(); + long count = (Long) osdReq.getAttachment(); + count++; + osdReq.setAttachment(count); + + // check if all responses have been received; + // if so, enqueue an operation for the next step + if (count == osdReq.getHttpRequests().length) + enqueueOperation(osdReq, STAGEOP_CHECK_OPEN_STATE, req.getCallback()); + } + + }; + reqs[i++] = sr; + } + + rq.getRq().setHttpRequests(reqs); + rq.getRq().setAttachment(0L); + + Logging.logMessage(Logging.LEVEL_DEBUG, this, "sending delete requests to remote OSDs ..."); + + for (RPCMessage msg : deleteReqs) + master.sendSpeedyRequest(rq.getRq(), msg.req, msg.addr); + + master.getStatistics().numDeletes++; + } + + private final static class DeleteThread extends Thread { + + private transient boolean quit; + + private final StorageLayout layout; + private final LinkedBlockingQueue files; + + public DeleteThread(StorageLayout layout) { + quit = false; + this.layout = layout; + files = new LinkedBlockingQueue(); + } + + public void shutdown() { + this.quit = true; + this.interrupt(); + } + + public void enqueueFileForDeletion(String fileID) { + assert(this.isAlive()); + files.add(fileID); + } + + public void run() { + try { + do { + Logging.logMessage(Logging.LEVEL_DEBUG, this,"DeleteThread started"); + final String fileID = files.take(); + try { + layout.deleteFile(fileID); + } catch (IOException ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this,ex); + } + } while (!quit); + } catch (InterruptedException ex) { + //idontcare + } + Logging.logMessage(Logging.LEVEL_DEBUG, this,"DeleteThread finished"); + } + + } + +} diff --git a/servers/src/org/xtreemfs/osd/stages/ParserStage.java b/servers/src/org/xtreemfs/osd/stages/ParserStage.java new file mode 100644 index 0000000000000000000000000000000000000000..c1dba48498e172d6e674f0712738eda9b5cf98d8 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/stages/ParserStage.java @@ -0,0 +1,564 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.osd.stages; + +import java.util.List; + +import org.xtreemfs.common.Capability; +import org.xtreemfs.common.Request; +import org.xtreemfs.common.TimeSync; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.striping.Location; +import org.xtreemfs.common.striping.Locations; +import org.xtreemfs.common.uuids.UnknownUUIDException; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.json.JSONString; +import org.xtreemfs.foundation.pinky.HTTPHeaders; +import org.xtreemfs.foundation.pinky.HTTPUtils; +import org.xtreemfs.foundation.pinky.PinkyRequest; +import org.xtreemfs.osd.ErrorCodes; +import org.xtreemfs.osd.ErrorRecord; +import org.xtreemfs.osd.LocationsCache; +import org.xtreemfs.osd.RPCTokens; +import org.xtreemfs.osd.OSDRequest; +import org.xtreemfs.osd.RequestDispatcher; +import org.xtreemfs.osd.ErrorRecord.ErrorClass; +import org.xtreemfs.osd.ops.Operation; + +/** + * Parses HTTP requests and constructs requests plus operation + */ +public final class ParserStage extends Stage { + + /** + * parse request method + */ + public static final int STAGEOP_PARSE = 1; + + /** + * remove cache entry method + */ + public static final int STAGEOP_REMOVE_CACHE_ENTRY = 2; + + /** + * X-Location cache + */ + private final LocationsCache xLocCache; + + /** + * master + */ + private RequestDispatcher master; + + public ParserStage(RequestDispatcher controller) { + super("OSD Parser Stage"); + xLocCache = new LocationsCache(10000); + this.master = controller; + } + + @Override + protected void processMethod(final StageMethod op) { + final OSDRequest rq = op.getRq(); + final int stageOp = op.getStageMethod(); + + if (stageOp == STAGEOP_PARSE) { + final ErrorRecord parseResult = parseMethod(rq); + if (Logging.tracingEnabled()) { + Logging.logMessage(Logging.LEVEL_TRACE, this, "result is : " + parseResult); + Logging.logMessage(Logging.LEVEL_DEBUG, this, rq.toString()); + } + + if (parseResult != null) { + rq.setError(parseResult); + master.requestFinished(rq); + } else { + assert (rq.getOperation() != null); + calcRequestDuration(rq); + rq.getOperation().startRequest(rq); + } + } else if (stageOp == STAGEOP_REMOVE_CACHE_ENTRY) { + removeCacheEntry(op); + } + + } + + protected ErrorRecord parseMethod(OSDRequest rq) { + + final PinkyRequest pr = rq.getPinkyRequest(); + + // check method to handle different requests + if (pr.requestMethod.equals(HTTPUtils.GET_TOKEN)) { + + if (pr.requestURI == null) + return new ErrorRecord(ErrorClass.USER_EXCEPTION, ErrorCodes.INVALID_FILEID, + "missing file ID"); + + if (pr.requestURI.equals("/")) { + // status page request + rq.setType(OSDRequest.Type.STATUS_PAGE); + rq.setOperation(master.getOperation(RequestDispatcher.Operations.STATUS_PAGE)); + } else { + // regular get request + rq.setType(OSDRequest.Type.READ); + final String fileId = parseFileIdFromURI(pr); + if (fileId == null) { + return new ErrorRecord(ErrorClass.USER_EXCEPTION, ErrorCodes.INVALID_FILEID, + "fileId contains invalid characters"); + } + rq.getDetails().setFileId(fileId); + rq.setOperation(master.getOperation(RequestDispatcher.Operations.READ)); + } + + } else if (pr.requestMethod.equals(HTTPUtils.PUT_TOKEN)) { + // write request + rq.setType(OSDRequest.Type.WRITE); + final String fileId = parseFileIdFromURI(pr); + if (fileId == null) { + return new ErrorRecord(ErrorClass.USER_EXCEPTION, ErrorCodes.INVALID_FILEID, + "fileId contains invalid characters"); + } + rq.getDetails().setFileId(fileId); + rq.setOperation(master.getOperation(RequestDispatcher.Operations.WRITE)); + + } else if (pr.requestMethod.equals(HTTPUtils.DELETE_TOKEN)) { + rq.setType(OSDRequest.Type.DELETE); + final String fileId = parseFileIdFromURI(pr); + if (fileId == null) { + return new ErrorRecord(ErrorClass.USER_EXCEPTION, ErrorCodes.INVALID_FILEID, + "fileId contains invalid characters"); + } + rq.getDetails().setFileId(fileId); + rq.setOperation(master.getOperation(RequestDispatcher.Operations.DELETE)); + + } else if (pr.requestMethod.equals(HTTPUtils.POST_TOKEN)) { + rq.setType(OSDRequest.Type.RPC); + + assert (pr.requestURI != null); + + if (pr.requestURI.length() == 0) { + return new ErrorRecord(ErrorClass.USER_EXCEPTION, + ErrorCodes.METHOD_NOT_IMPLEMENTED, + "must specify a method name to execute with POST"); + } + + ErrorRecord result = parseRPC(rq); + if (result != null) + return result; + } else { + return new ErrorRecord(ErrorClass.USER_EXCEPTION, ErrorCodes.METHOD_NOT_IMPLEMENTED, + pr.requestMethod + " not implemented by this server or invalid"); + } + + // parse headers for all HTTP requests + // this has to be done after the fileID was extracted which depends on + // the request type + final ErrorRecord hdrResult = parseHeaders(rq); + if (hdrResult != null) { + return hdrResult; + } + + return null; + + } + + protected ErrorRecord parseRPC(OSDRequest rq) { + final PinkyRequest pr = rq.getPinkyRequest(); + String methodName; + if (pr.requestURI.charAt(0) == '/') { + if (pr.requestURI.length() == 1) { + return new ErrorRecord(ErrorClass.USER_EXCEPTION, + ErrorCodes.METHOD_NOT_IMPLEMENTED, + "must specify a method name to execute with POST"); + } + methodName = pr.requestURI.substring(1); + } else { + methodName = pr.requestURI; + } + + List body = null; + try { + if (rq.getPinkyRequest().requestBody != null) + body = parseJSONBody(rq); + } catch (JSONException ex) { + return new ErrorRecord(ErrorClass.USER_EXCEPTION, ErrorCodes.INVALID_RPC, + "body contains invalid JSON", ex); + } + + if (methodName.equals(RPCTokens.fetchGlobalMaxToken)) { + final Operation fetchGmax = master + .getOperation(RequestDispatcher.Operations.FETCH_GMAX); + rq.setOperation(fetchGmax); + return null; + } + + else if (methodName.equals(RPCTokens.truncateTOKEN)) { + final Operation truncate = master.getOperation(RequestDispatcher.Operations.TRUNCATE); + rq.setOperation(truncate); + ErrorRecord result = truncate.parseRPCBody(rq, body); + if (result != null) + return result; + } + + else if (methodName.equals(RPCTokens.truncateLocalTOKEN)) { + final Operation truncate = master + .getOperation(RequestDispatcher.Operations.TRUNCATE_LOCAL); + rq.setOperation(truncate); + ErrorRecord result = truncate.parseRPCBody(rq, body); + if (result != null) + return result; + } + + else if (methodName.equals(RPCTokens.deleteLocalTOKEN)) { + final Operation deleteLocal = master + .getOperation(RequestDispatcher.Operations.DELETE_LOCAL); + rq.setOperation(deleteLocal); + return null; + } + + else if (methodName.equals(RPCTokens.getProtocolVersionTOKEN)) { + final Operation getProtocolVer = master + .getOperation(RequestDispatcher.Operations.GET_PROTOCOL_VERSION); + rq.setOperation(getProtocolVer); + ErrorRecord result = getProtocolVer.parseRPCBody(rq, body); + if (result != null) + return result; + } + + else if (methodName.equals(RPCTokens.shutdownTOKEN)) { + final Operation shutdown = master.getOperation(RequestDispatcher.Operations.SHUTDOWN); + rq.setOperation(shutdown); + return null; + } + + else if (methodName.equals(RPCTokens.getstatsTOKEN)) { + final Operation stats = master.getOperation(RequestDispatcher.Operations.GET_STATS); + rq.setOperation(stats); + return null; + } + + else if (methodName.equals(RPCTokens.checkObjectTOKEN)) { + final Operation checkObject = master.getOperation(RequestDispatcher.Operations.READ); + rq.setOperation(checkObject); + rq.getDetails().setCheckOnly(true); + return null; + } + + else if (methodName.equals(RPCTokens.recordRqDurationTOKEN)) { + final Operation statConfig = master.getOperation(RequestDispatcher.Operations.STATS_CONFIG); + rq.setOperation(statConfig); + ErrorRecord result = statConfig.parseRPCBody(rq, body); + if (result != null) + return result; + } + + else if (methodName.equals(RPCTokens.acquireLeaseTOKEN)) { + final Operation acquireLease = master.getOperation(RequestDispatcher.Operations.ACQUIRE_LEASE); + rq.setOperation(acquireLease); + ErrorRecord result = acquireLease.parseRPCBody(rq, body); + if (result != null) + return result; + } + + else if (methodName.equals(RPCTokens.returnLeaseTOKEN)) { + final Operation returnLease = master.getOperation(RequestDispatcher.Operations.RETURN_LEASE); + rq.setOperation(returnLease); + ErrorRecord result = returnLease.parseRPCBody(rq, body); + if (result != null) + return result; + } + + else if (methodName.equals(RPCTokens.cleanUpTOKEN)) { + final Operation cleanup = master.getOperation(RequestDispatcher.Operations.CLEAN_UP); + rq.setOperation(cleanup); + ErrorRecord result = cleanup.parseRPCBody(rq, body); + if (result != null) + return result; + } + + else { + return new ErrorRecord(ErrorClass.USER_EXCEPTION, ErrorCodes.METHOD_NOT_IMPLEMENTED, + methodName + " not implemented"); + } + return null; + } + + /** + * Parses the all available headers + * + * @param rq + * the request to parse + * @return null, if parsing was successful, errorMessage otherwise + */ + protected ErrorRecord parseHeaders(OSDRequest rq) { + + final HTTPHeaders hdrs = rq.getPinkyRequest().requestHeaders; + + for (final HTTPHeaders.HeaderEntry hdr : hdrs) { + + if (hdr.name.equalsIgnoreCase(HTTPHeaders.HDR_XOBJECTNUMBER)) { + + try { + rq.getDetails().setObjectNumber(Long.parseLong(hdr.value)); + + if (rq.getDetails().getObjectNumber() < 0) { + return new ErrorRecord(ErrorClass.USER_EXCEPTION, + ErrorCodes.INVALID_HEADER, HTTPHeaders.HDR_XOBJECTNUMBER + + " must contain a number >= 0"); + } + + } catch (Exception ex) { + return new ErrorRecord(ErrorClass.USER_EXCEPTION, ErrorCodes.INVALID_HEADER, + HTTPHeaders.HDR_XOBJECTNUMBER + " must contain a valid integer", ex); + } + + } else if (hdr.name.equalsIgnoreCase(HTTPHeaders.HDR_XCAPABILITY)) { + + try { + Capability cap = new Capability(hdr.value, master.getConfig().getCapabilitySecret()); + rq.getDetails().setCapability(cap); + + } catch (JSONException ex) { + return new ErrorRecord(ErrorClass.USER_EXCEPTION, ErrorCodes.INVALID_HEADER, + HTTPHeaders.HDR_XCAPABILITY + " is not valid JSON", ex); + } catch (Exception ex) { + return new ErrorRecord(ErrorClass.USER_EXCEPTION, ErrorCodes.INVALID_HEADER, + HTTPHeaders.HDR_XCAPABILITY + " is not valid and cannot be parsed", ex); + } + + } else if (hdr.name.equalsIgnoreCase(HTTPHeaders.HDR_CONTENT_RANGE)) { + + try { + final String rangeHdr = hdr.value.trim(); + // header is of format "bytes start - end /* + + final String rangeOnly = rangeHdr.substring(rangeHdr.indexOf(' '), + rangeHdr.lastIndexOf('/')).trim(); + + final int indexOfMinus = rangeOnly.indexOf('-'); + final String startRange = rangeOnly.substring(0, indexOfMinus).trim(); + final String endRange = rangeOnly.substring(indexOfMinus + 1).trim(); + + rq.getDetails().setByteRangeStart(Long.parseLong(startRange)); + rq.getDetails().setByteRangeEnd(Long.parseLong(endRange)); + rq.getDetails().setRangeRequested(true); + } catch (Exception ex) { + return new ErrorRecord(ErrorClass.USER_EXCEPTION, ErrorCodes.INVALID_HEADER, + HTTPHeaders.HDR_CONTENT_RANGE + " is not valid and cannot be parsed", ex); + } + + } else if (hdr.name.equalsIgnoreCase(HTTPHeaders.HDR_XLOCATIONS)) { + + final ErrorRecord result = parseLocations(rq, hdr.value); + if (result != null) { + return result; + } + + } else if (hdr.name.equalsIgnoreCase(HTTPHeaders.HDR_XVERSIONNUMBER)) { + + try { + rq.getDetails().setObjectVersionNumber(Integer.parseInt(hdr.value)); + rq.getDetails().setObjectVersionNumberRequested(true); + + if (rq.getDetails().getObjectVersionNumber() < 0) { + return new ErrorRecord(ErrorClass.USER_EXCEPTION, + ErrorCodes.INVALID_HEADER, HTTPHeaders.HDR_XVERSIONNUMBER + + " must contain a number >= 0"); + } + + } catch (Exception ex) { + return new ErrorRecord(ErrorClass.USER_EXCEPTION, ErrorCodes.INVALID_HEADER, + HTTPHeaders.HDR_XOBJECTNUMBER + " must contain a valid integer", ex); + } + + } else if (hdr.name.equalsIgnoreCase(HTTPHeaders.HDR_XFILEID)) { + + try { + if (validateFileId(hdr.value)) + rq.getDetails().setFileId(hdr.value); + + } catch (Exception ex) { + return new ErrorRecord(ErrorClass.USER_EXCEPTION, ErrorCodes.INVALID_HEADER, + HTTPHeaders.HDR_XOBJECTNUMBER + " must contain a valid integer", ex); + } + + } else if (hdr.name.equalsIgnoreCase(HTTPHeaders.HDR_XLEASETO)) { + try { + Long to = Long.valueOf(hdr.value); + if (to < TimeSync.getGlobalTime()) { + return new ErrorRecord(ErrorClass.USER_EXCEPTION, ErrorCodes.LEASE_TIMED_OUT, + "lease timed out"); + } + } catch (NumberFormatException ex) { + return new ErrorRecord(ErrorClass.USER_EXCEPTION, ErrorCodes.INVALID_HEADER, + HTTPHeaders.HDR_XLEASETO + " must contain a valid integer", ex); + } + } else if (hdr.name.equalsIgnoreCase(HTTPHeaders.HDR_XREQUESTID)) { + rq.getDetails().setRequestId(hdr.value); + } + } + + return null; + } + + /** + * Parse the Locations header + * + * @param rq + * the request + * @param loc + * the locations header string + * @return an error, null if successfull + */ + protected ErrorRecord parseLocations(OSDRequest rq, String loc) { + int locVer = -1; + try { + int lastComma = loc.lastIndexOf(','); + String lastArg = loc.substring(lastComma + 1, loc.length() - 1); + if (lastArg.indexOf('"') >= 0) { + // last arg is replication policy + int sndLastComma = loc.substring(0, lastComma).lastIndexOf(','); + String tmp = loc.substring(sndLastComma + 1, lastComma); + tmp = tmp.trim(); + locVer = Integer.parseInt(tmp); + } else { + // last arg is version number + lastArg = lastArg.trim(); + locVer = Integer.parseInt(lastArg); + } + + } catch (Exception e) { + if (Logging.isDebug()) { + Logging.logMessage(Logging.LEVEL_DEBUG, null, e); + } + return new ErrorRecord(ErrorClass.USER_EXCEPTION, ErrorCodes.INVALID_HEADER, + HTTPHeaders.HDR_XLOCATIONS + " cannot be parsed", e); + } + + Locations cachedLoc = xLocCache.getLocations(rq.getDetails().getFileId()); + if ((cachedLoc != null) && (cachedLoc.getVersion() == locVer)) { + rq.getDetails().setLocationList(cachedLoc); + } else { + try { + Locations receivedLoc = new Locations(new JSONString(loc)); + if ((receivedLoc.getNumberOfReplicas() == 0) && (cachedLoc == null)) { + return new ErrorRecord(ErrorClass.USER_EXCEPTION, ErrorCodes.NEED_FULL_XLOC, + "X-Location not in cache, resend full X-Location list"); + } + + if ((cachedLoc != null) && (cachedLoc.getVersion() > locVer)) { + return new ErrorRecord(ErrorClass.USER_EXCEPTION, ErrorCodes.XLOC_OUTDATED, + "outdated X-Location sent, version is " + cachedLoc.getVersion() + " sent " + + locVer); + } + + xLocCache.update(rq.getDetails().getFileId(), receivedLoc); + rq.getDetails().setLocationList(receivedLoc); + } catch (JSONException ex) { + return new ErrorRecord(ErrorClass.USER_EXCEPTION, ErrorCodes.INVALID_HEADER, + "X-Location header is not valid JSON", ex); + } + } + + //resolve addresses + try { + rq.getDetails().getLocationList().resolveAll(); + } catch (UnknownUUIDException ex) { + return new ErrorRecord(ErrorClass.USER_EXCEPTION, ErrorCodes.INVALID_HEADER, + "Cannot resolve uuid in X-Locations list: "+ex); + } + + // find the location for the currentReplica (i.e. the one this OSD is + // in) + // FIXME: get correct schema! + final Location currentReplica = rq.getDetails().getLocationList().getLocation( + master.getConfig().getUUID()); + if (currentReplica == null) { + return new ErrorRecord(ErrorClass.USER_EXCEPTION, ErrorCodes.NOT_IN_XLOC, "this OSD (" + + master.getConfig().getUUID() + ") is not part of the current X-Location list"); + } + rq.getDetails().setCurrentReplica(currentReplica); + return null; + } + + /** + * Extracts the file ID from the URI. + * + * @param pr + * the Pinky Request + * @return the file ID + */ + private String parseFileIdFromURI(PinkyRequest pr) { + if (pr.requestURI.length() == 0) + return null; + String rqURI = pr.requestURI; + if (pr.requestURI.charAt(0) == '/') { + rqURI = pr.requestURI.substring(1); + } + if (validateFileId(rqURI)) { + return rqURI; + } else { + return null; + } + } + + /** + * Parses the JSON content of the body. + * + * @param rq + * the OSD request + * @return an object representation of the JSON body + * @throws org.xtreemfs.foundation.json.JSONException + */ + private List parseJSONBody(Request rq) throws JSONException { + if (rq.getPinkyRequest().getBody() == null) { + throw new JSONException("body is empty"); + } + return (List) JSONParser.parseJSON(new JSONString(new String(rq.getPinkyRequest() + .getBody()))); + } + + /** + * remove an entry for a file from the X-Location cache + * + * @param op + * StageMethod to execute + */ + private void removeCacheEntry(StageMethod op) { + this.xLocCache.removeLocations(op.getRq().getDetails().getFileId()); + this.methodExecutionSuccess(op, StageResponseCode.OK); + } + + public static boolean validateFileId(String requestURI) { + for (int i = 0; i < requestURI.length(); i++) { + char c = requestURI.charAt(i); + + if ((c < '0') || ((c > ':') && (c < 'A')) || (c > 'F')) { + return false; + } + } + return true; + } +} diff --git a/servers/src/org/xtreemfs/osd/stages/ReplicationStage.java b/servers/src/org/xtreemfs/osd/stages/ReplicationStage.java new file mode 100644 index 0000000000000000000000000000000000000000..54d4da95c325b314655a74f54cd9605e9daa8660 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/stages/ReplicationStage.java @@ -0,0 +1,131 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Christian Lorenz (ZIB) + */ +package org.xtreemfs.osd.stages; + +import java.io.IOException; + +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.osd.OSDRequest; +import org.xtreemfs.osd.RequestDispatcher; +import org.xtreemfs.osd.replication.ObjectDissemination; + +/** + * + * 09.09.2008 + * + * @author clorenz + */ +public class ReplicationStage extends Stage { + public static final int STAGEOP_FETCH_OBJECT = 1; + + public static final int STAGEOP_INTERNAL_SEND_FETCH_OBJECT_REQUEST = 2; + + public static final int STAGEOP_INTERNAL_OBJECT_FETCHED = 3; + + public static final int STAGEOP_INTERNAL_REPLICATION_REQUEST_FINISHED = 4; + + private RequestDispatcher master; + + private ObjectDissemination disseminationLayer; + + public ReplicationStage(RequestDispatcher dispatcher) { + super("OSD Replication Stage"); + + this.master = dispatcher; + this.disseminationLayer = new ObjectDissemination(this, master); + } + + /* + * (non-Javadoc) + * + * @see org.xtreemfs.osd.stages.Stage#processMethod(org.xtreemfs.osd.stages.Stage.StageMethod) + */ + @Override + protected void processMethod(StageMethod method) { + try { + switch (method.getStageMethod()) { + case STAGEOP_FETCH_OBJECT: { + processFetchObject(method); + break; + } + case STAGEOP_INTERNAL_OBJECT_FETCHED: { + System.out.println(method.getRq().getRequestId() + + ": object fetched"); // FIXME: testcode + methodExecutionSuccess(method, StageResponseCode.OK); + break; + } + case STAGEOP_INTERNAL_SEND_FETCH_OBJECT_REQUEST: { + processInternalSendFetchObjectRequest(method); + break; + } + case STAGEOP_INTERNAL_REPLICATION_REQUEST_FINISHED: { + processInternalReplicationRequestFinished(method); + methodExecutionSuccess(method, StageResponseCode.OK); + break; + } + default: { + System.out.println(method.getRq().getRequestId() + + ": not supported method"); // FIXME: testcode + } + } + } catch (IOException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } catch (JSONException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + } + + private void processInternalReplicationRequestFinished(StageMethod method) { + System.out.println(method.getRq().getRequestId() + + ": initiating next steps"); // FIXME: testcode + disseminationLayer.prepareRequests(method.getRq()); + + } + + private boolean processFetchObject(StageMethod method) throws IOException, + JSONException { + System.out.println(method.getRq().getRequestId() + + ": want to fetch object"); // FIXME: testcode + disseminationLayer.fetchObject(method.getRq()); + return true; + } + + private void processInternalSendFetchObjectRequest(StageMethod method) { + // TODO Auto-generated method stub + disseminationLayer.sendFetchObjectRequest(method.getRq()); + System.out.println(method.getRq().getRequestId() + ": fetch object"); // FIXME: + // testcode + } + + @Override + public void enqueueOperation(OSDRequest rq, int method, + StageCallbackInterface callback) { + // save callback + rq.setCurrentCallback(callback); + super.enqueueOperation(rq, method, callback); + } + +} diff --git a/servers/src/org/xtreemfs/osd/stages/Stage.java b/servers/src/org/xtreemfs/osd/stages/Stage.java new file mode 100644 index 0000000000000000000000000000000000000000..03d8b4c2322f1e90002501d7b6fa4a8ef319770f --- /dev/null +++ b/servers/src/org/xtreemfs/osd/stages/Stage.java @@ -0,0 +1,264 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.osd.stages; + +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; + +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; + +import org.xtreemfs.common.Request; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.LifeCycleThread; +import org.xtreemfs.osd.ErrorRecord; +import org.xtreemfs.osd.OSDRequest; +import org.xtreemfs.osd.ErrorRecord.ErrorClass; + +public abstract class Stage extends LifeCycleThread { + + /** + * global stage response codes. These codes are sent back by a stage to + * indicate to the operation's state machine, which action to take next. + * + * This list contains some basic codes as well as all specialized codes used + * by individual stages. + * + */ + public enum StageResponseCode { + /** + * go to next operation + */ + OK, + /** + * request failed, send error + */ + FAILED, + /** + * stay in current state and wait for next event + */ + WAIT, + /** + * finish request by sending the response + */ + FINISH + + } + + /** + * queue containing all requests + */ + protected BlockingQueue q; + + /** + * set to true if stage should shut down + */ + protected volatile boolean quit; + + public AtomicInteger _numRq, _maxRqTime, _minRqTime; + public AtomicLong _sumRqTime; + + public Stage(String stageName) { + super(stageName); + q = new LinkedBlockingQueue(); + this.quit = false; + + _numRq = new AtomicInteger(0); + _maxRqTime = new AtomicInteger(0); + _minRqTime = new AtomicInteger(Integer.MAX_VALUE); + _sumRqTime = new AtomicLong(0); + } + + /** + * send an request for a stage operation + * + * @param rq + * the request + * @param the + * method in the stage to execute + */ + public void enqueueOperation(OSDRequest rq, int method, StageCallbackInterface callback) { + rq.setEnqueueNanos(System.nanoTime()); + q.add(new StageMethod(rq, method, callback)); + } + + /** + * shut the stage thread down + */ + public void shutdown() { + this.quit = true; + this.interrupt(); + } + + /** + * Get current number of requests in the queue. + * + * @return queue length + */ + public int getQueueLength() { + return q.size(); + } + + @Override + public void run() { + + notifyStarted(); + + while (!quit) { + Request rq = null; + try { + final StageMethod op = q.take(); + + rq = op.getRq(); + + if (Logging.tracingEnabled()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, "processing request #" + + rq.getRequestId() + " method: " + op.getStageMethod()); + + processMethod(op); + + } catch (InterruptedException ex) { + break; + } catch (Exception ex) { + if (rq != null) + Logging.logMessage(Logging.LEVEL_DEBUG, this, + "exception occurred while processing:" + rq); + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + this.notifyCrashed(ex); + break; + } + } + + notifyStopped(); + } + + /** + * Operation was not successful, returns an error to the client. + * + * @param rq + * the request + * @param err + * the error details to send to the client + */ + protected void methodExecutionFailed(StageMethod m, ErrorRecord err) { + assert (m != null); + if (m.getCallback() == null) { + if (Logging.tracingEnabled()) { + Logging.logMessage(Logging.LEVEL_TRACE, this, "event dropped (possibly finished) " + + m.getRq().getRequestId() + " with error: " + err); + } + return; + } + assert (m.getRq() != null); + assert (err != null); + + if (err.getErrorClass() == ErrorClass.INTERNAL_SERVER_ERROR) + Logging.logMessage(Logging.LEVEL_ERROR, this, err.toString()); + + m.getRq().setError(err); + if (StatisticsStage.measure_request_times) + calcRequestDuration(m.getRq()); + m.getCallback().methodExecutionCompleted(m.getRq(), StageResponseCode.FAILED); + } + + /** + * Stage operation was executed successful. + * + * @param rq + * @param code + */ + protected void methodExecutionSuccess(StageMethod m, StageResponseCode code) { + assert (m != null); + assert (m.getRq() != null); + if (m.getCallback() == null) { + if (Logging.tracingEnabled()) { + Logging.logMessage(Logging.LEVEL_TRACE, this, "event dropped (possibly finished) " + + m.getRq().getRequestId()); + } + return; + } + if (StatisticsStage.measure_request_times) + calcRequestDuration(m.getRq()); + m.getCallback().methodExecutionCompleted(m.getRq(), code); + } + + protected void calcRequestDuration(Request rq) { + long d = (System.nanoTime()-rq.getEnqueueNanos())/100000l; + _numRq.incrementAndGet(); + if (_minRqTime.get() > d) + _minRqTime.set((int)d); + if (_maxRqTime.get() < d) + _maxRqTime.set((int)d); + _sumRqTime.addAndGet(d); + } + + /** + * Handles the actual execution of a stage method. Must be implemented by + * all stages. + * + * @param method + * the stage method to execute + */ + protected abstract void processMethod(StageMethod method); + + protected static final class StageMethod { + private OSDRequest rq; + + private int stageMethod; + + private StageCallbackInterface callback; + + public StageMethod(OSDRequest rq, int stageMethod, StageCallbackInterface callback) { + this.rq = rq; + this.stageMethod = stageMethod; + this.callback = callback; + } + + public int getStageMethod() { + return stageMethod; + } + + public void setStageMethod(int stageMethod) { + this.stageMethod = stageMethod; + } + + public OSDRequest getRq() { + return rq; + } + + public void setRq(OSDRequest rq) { + this.rq = rq; + } + + public StageCallbackInterface getCallback() { + return callback; + } + + public void setCallback(StageCallbackInterface callback) { + this.callback = callback; + } + } + +} diff --git a/servers/src/org/xtreemfs/osd/stages/StageCallbackInterface.java b/servers/src/org/xtreemfs/osd/stages/StageCallbackInterface.java new file mode 100644 index 0000000000000000000000000000000000000000..f4e216a8f43ee26b1780440461880ac07a0c730f --- /dev/null +++ b/servers/src/org/xtreemfs/osd/stages/StageCallbackInterface.java @@ -0,0 +1,34 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.osd.stages; + +import org.xtreemfs.osd.OSDRequest; + + +public interface StageCallbackInterface { + + public void methodExecutionCompleted(OSDRequest request, Stage.StageResponseCode result); + +} diff --git a/servers/src/org/xtreemfs/osd/stages/StageStatistics.java b/servers/src/org/xtreemfs/osd/stages/StageStatistics.java new file mode 100644 index 0000000000000000000000000000000000000000..f98f5bc18ffd0e41d9f112cd78769d7dde14c65e --- /dev/null +++ b/servers/src/org/xtreemfs/osd/stages/StageStatistics.java @@ -0,0 +1,47 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.osd.stages; + +import java.util.concurrent.atomic.AtomicLong; + +public class StageStatistics { + + public AtomicLong bytesTX, bytesRX, numGmaxReceived, numWrites, numReads, + numGmaxRPCs, numTruncateRPCs; + + public long numDeletes; + + public StageStatistics() { + bytesTX = new AtomicLong(); + bytesRX = new AtomicLong(); + numGmaxReceived = new AtomicLong(); + numWrites = new AtomicLong(); + numReads = new AtomicLong(); + numGmaxRPCs = new AtomicLong(); + numTruncateRPCs = new AtomicLong(); + numDeletes = 0; + } + +} diff --git a/servers/src/org/xtreemfs/osd/stages/StatisticsStage.java b/servers/src/org/xtreemfs/osd/stages/StatisticsStage.java new file mode 100644 index 0000000000000000000000000000000000000000..5c336a1967cc43e7fdb6b281fe4c0ec7b82f386e --- /dev/null +++ b/servers/src/org/xtreemfs/osd/stages/StatisticsStage.java @@ -0,0 +1,341 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package org.xtreemfs.osd.stages; + +import java.lang.management.ManagementFactory; +import java.lang.management.OperatingSystemMXBean; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.logging.Level; +import java.util.logging.Logger; +import org.xtreemfs.common.RingBuffer; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.pinky.HTTPUtils.DATA_TYPE; +import org.xtreemfs.osd.ErrorRecord; +import org.xtreemfs.osd.OSDRequestDispatcher; +import org.xtreemfs.osd.RequestDispatcher.Stages; + +/** + * + * @author bjko + */ +public final class StatisticsStage extends Stage { + + public static final int STAGEOP_STATUS_PAGE = 1; + + public static final int STAGEOP_STATISTICS = 2; + + public static final int STAGEOP_MEASURE_RQT = 3; + + public static boolean measure_request_times; + + public static boolean collect_statistics; + + private final RingBuffer txRate; + private final RingBuffer rxRate; + private final RingBuffer cpu; + private final RingBuffer readRate; + private final RingBuffer writeRate; + private final RingBuffer memUsage; + private final RingBuffer[] stagesAvg; + private final RingBuffer[] stagesMin; + private final RingBuffer[] stagesMax; + + private final StageStatistics stats; + + private StatisticsCollector collector; + private final int statlength; + + private final Stages[] statStages; + + private final OSDRequestDispatcher master; + + public StatisticsStage(OSDRequestDispatcher master, StageStatistics stats, int statlength) { + super("StatStage"); + this.master = master; + this.statlength = statlength; + txRate = new RingBuffer(statlength,0l); + rxRate = new RingBuffer(statlength,0l); + cpu = new RingBuffer(statlength,0l); + readRate = new RingBuffer(statlength,0l); + writeRate = new RingBuffer(statlength,0l); + memUsage = new RingBuffer(statlength,0l); + + statStages = new Stages[]{Stages.PARSER, Stages.AUTH, Stages.STORAGE}; + + final int numStages = statStages.length; + stagesAvg = new RingBuffer[numStages]; + stagesMin = new RingBuffer[numStages]; + stagesMax = new RingBuffer[numStages]; + for (int i = 0; i < stagesAvg.length; i++) { + stagesAvg[i] = new RingBuffer(statlength,0l); + stagesMin[i] = new RingBuffer(statlength,0l); + stagesMax[i] = new RingBuffer(statlength,0l); + } + this.stats = stats; + + StatisticsStage.measure_request_times = master.getConfig().isMeasureRqsEnabled(); + StatisticsStage.collect_statistics = master.getConfig().isBasicStatsEnabled(); + if (StatisticsStage.collect_statistics) { + collector = new StatisticsCollector(stats); + } + } + + public void start() { + super.start(); + if (collector != null) + collector.start(); + } + + public void shutdown() { + if (collector != null) { + collector.quit = true; + collector.interrupt(); + } + super.shutdown(); + } + + + @Override + protected void processMethod(StageMethod method) { + if (method.getStageMethod() == STAGEOP_STATISTICS) { + processStageOpStatistics(method); + } else if (method.getStageMethod() == STAGEOP_MEASURE_RQT) { + processStageOpSettings(method); + + } + } + + private void processStageOpSettings(StageMethod method) { + final Boolean[] settings = (Boolean[]) method.getRq().getAttachment(); + final Boolean enableRqMeasurements = settings[0]; + final Boolean enableBasicStats = settings[1]; + if (enableRqMeasurements != null) { + StatisticsStage.measure_request_times = enableRqMeasurements.booleanValue(); + } + if (enableBasicStats != null) { + StatisticsStage.collect_statistics = enableBasicStats.booleanValue(); + if (enableBasicStats) { + if (collector == null) { + collector = new StatisticsCollector(stats); + collector.start(); + } + } else { + if (collector != null) { + collector.quit = true; + collector.interrupt(); + collector = null; + } + } + } + try { + List data = new ArrayList(2); + data.add(new Boolean(StatisticsStage.measure_request_times)); + data.add(new Boolean(StatisticsStage.collect_statistics)); + method.getRq().setData(ReusableBuffer.wrap(JSONParser.writeJSON(data).getBytes()), DATA_TYPE.JSON); + methodExecutionSuccess(method, StageResponseCode.FINISH); + } catch (JSONException ex) { + methodExecutionFailed(method, new ErrorRecord(ErrorRecord.ErrorClass.INTERNAL_SERVER_ERROR, ex.getMessage())); + } + } + + private void processStageOpStatistics(StageMethod method) { + Map data = new HashMap(); + + if (StatisticsStage.collect_statistics) { + List txSeries = new LinkedList(); + synchronized (txRate) { + Iterator iter = txRate.iterator(); + while (iter.hasNext()) { + txSeries.add(iter.next()); + } + } + data.put("TX", txSeries); + + List rxSeries = new LinkedList(); + synchronized (txRate) { + Iterator iter = rxRate.iterator(); + while (iter.hasNext()) { + rxSeries.add(iter.next()); + } + } + data.put("RX", rxSeries); + + List cpuSeries = new LinkedList(); + synchronized (txRate) { + Iterator iter = cpu.iterator(); + while (iter.hasNext()) { + cpuSeries.add(iter.next()); + } + } + data.put("CPU", cpuSeries); + + List readSeries = new LinkedList(); + synchronized (txRate) { + Iterator iter = readRate.iterator(); + while (iter.hasNext()) { + readSeries.add(iter.next()); + } + } + data.put("READ", readSeries); + + List writeSeries = new LinkedList(); + synchronized (txRate) { + Iterator iter = writeRate.iterator(); + while (iter.hasNext()) { + writeSeries.add(iter.next()); + } + } + data.put("WRITE", writeSeries); + + List memSeries = new LinkedList(); + synchronized (txRate) { + Iterator iter = memUsage.iterator(); + while (iter.hasNext()) { + memSeries.add(iter.next()); + } + } + data.put("MEM", memSeries); + + data.put("FREEDISK", ((OSDRequestDispatcher) master).getFreeSpace()); + + data.put("TOTALDISK", ((OSDRequestDispatcher) master).getTotalSpace()); + + data.put("USEDMEM", Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()); + + data.put("TOTALMEM", Runtime.getRuntime().maxMemory()); + } + + if (StatisticsStage.measure_request_times) { + + for (int st = 0; st < statStages.length; st++) { + final String stName = statStages[st].toString(); + List avgs = new LinkedList(); + synchronized (txRate) { + Iterator iter = stagesAvg[st].iterator(); + while (iter.hasNext()) { + avgs.add(iter.next()); + } + } + data.put(stName+"_AVG", avgs); + + List min = new LinkedList(); + synchronized (txRate) { + Iterator iter = stagesMin[st].iterator(); + while (iter.hasNext()) { + min.add(iter.next()); + } + } + data.put(stName+"_MIN", min); + + List max = new LinkedList(); + synchronized (txRate) { + Iterator iter = stagesMax[st].iterator(); + while (iter.hasNext()) { + max.add(iter.next()); + } + } + data.put(stName+"_MAX", max); + } + } + + + + try { + method.getRq().setData(ReusableBuffer.wrap(JSONParser.writeJSON(data).getBytes()), DATA_TYPE.JSON); + methodExecutionSuccess(method, StageResponseCode.FINISH); + } catch (JSONException ex) { + methodExecutionFailed(method, new ErrorRecord(ErrorRecord.ErrorClass.INTERNAL_SERVER_ERROR, ex.getMessage())); + } + } + + private final class StatisticsCollector extends Thread { + + public static final long INTEVRAL = 1000; + + private transient boolean quit; + + private final StageStatistics stats; + + public StatisticsCollector(StageStatistics stats) { + quit = false; + this.stats = stats; + } + + @Override + public void run() { + long lastTx = stats.bytesTX.get(); + long lastRx = stats.bytesRX.get(); + long lastRead = stats.numReads.get(); + long lastWrite = stats.numWrites.get(); + OperatingSystemMXBean osb = ManagementFactory.getOperatingSystemMXBean(); + Logging.logMessage(Logging.LEVEL_INFO, this,"statistics collector started"); + do { + try { + sleep(INTEVRAL); + + long newTx = stats.bytesTX.get(); + long tx = newTx - lastTx; + lastTx = newTx; + + long newRx = stats.bytesRX.get(); + long rx = newRx - lastRx; + lastRx = newRx; + + + long newRead = stats.numReads.get(); + long read = newRead - lastRead; + lastRead = newRead; + + long newWrite = stats.numWrites.get(); + long write = newWrite - lastWrite; + lastWrite = newWrite; + + synchronized (txRate) { + txRate.insert(tx); + rxRate.insert(rx); + cpu.insert(Long.valueOf((long)osb.getSystemLoadAverage()*1000)); + readRate.insert(read); + writeRate.insert(write); + memUsage.insert(Runtime.getRuntime().freeMemory()*100/Runtime.getRuntime().maxMemory()); + if (StatisticsStage.measure_request_times) { + //parserStage results + int i = 0; + for (Stages st : statStages) { + final Stage pStage = master.getStage(st); + long numRq = pStage._numRq.getAndSet(0); + long sumRq = pStage._sumRqTime.getAndSet(0); + long avgTime = (numRq > 0) ? sumRq/numRq : 0; + stagesAvg[i].insert(avgTime); + long maxTime = pStage._maxRqTime.getAndSet(0); + long minTime = pStage._minRqTime.getAndSet(Integer.MAX_VALUE); + stagesMax[i].insert(maxTime); + if (minTime < Integer.MAX_VALUE) + stagesMin[i].insert(minTime); + else + stagesMin[i].insert(Long.valueOf(0)); + i++; + } + } + } + } catch (InterruptedException ex) { + break; + } + + } while (!quit); + Logging.logMessage(Logging.LEVEL_INFO, this,"statistics collector stopped"); + } + + } + +} diff --git a/servers/src/org/xtreemfs/osd/stages/StorageStage.java b/servers/src/org/xtreemfs/osd/stages/StorageStage.java new file mode 100644 index 0000000000000000000000000000000000000000..b3d80c1fc6ad48f3bc25f4cf1f4696353e297e08 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/stages/StorageStage.java @@ -0,0 +1,118 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin and + Consiglio Nazionale delle Ricerche. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB), Eugenio Cesario (CNR) + */ + +package org.xtreemfs.osd.stages; + +import java.io.IOException; + +import org.xtreemfs.osd.OSDRequest; +import org.xtreemfs.osd.RequestDispatcher; +import org.xtreemfs.osd.storage.MetadataCache; +import org.xtreemfs.osd.storage.StorageLayout; +import org.xtreemfs.osd.storage.Striping; + +public class StorageStage extends Stage { + + private StorageThread[] storageThreads; + + /** Creates a new instance of MultithreadedStorageStage */ + public StorageStage(RequestDispatcher master, Striping striping, + MetadataCache cache, StorageLayout layout, int numOfThreads) + throws IOException { + + super("OSD Storage Stage"); + + int numberOfThreads = 5; + if (numOfThreads > 0) + numberOfThreads = numOfThreads; + + storageThreads = new StorageThread[numberOfThreads]; + for (int i = 0; i < numberOfThreads; i++) + storageThreads[i] = new StorageThread(i, master, striping, cache, + layout); + } + + public void enqueueOperation(OSDRequest rq, int method, + StageCallbackInterface callback) { + + rq.setEnqueueNanos(System.nanoTime()); + + // choose the thread the new request has to be + // assigned to, for its execution + int taskId = getTaskId(rq); + + // add the new request to the storageTask, + // in order to start/schedule its execution + // concurrently with other threads assigned to other + // storageTasks + storageThreads[taskId].enqueueOperation(rq, method, callback); + } + + @Override + protected void processMethod(StageMethod method) { + // empty, processing takes place in storage thread + } + + public void run() { + // start all storage threads + for (StorageThread th : storageThreads) + th.start(); + } + + public void shutdown() { + for (StorageThread th : storageThreads) + th.shutdown(); + } + + public void waitForStartup() throws Exception { + // wait for all storage threads to be ready + for (StorageThread th : storageThreads) + th.waitForStartup(); + } + + public void waitForShutdown() throws Exception { + // wait for all storage threads to be shut down + for (StorageThread th : storageThreads) + th.waitForShutdown(); + } + + private int getTaskId(OSDRequest rq) { + + // calculate a hash value from the file ID and return the responsible + // thread + String fileId = rq.getDetails().getFileId() + + rq.getDetails().getObjectNumber(); + int key = Math.abs(fileId.hashCode()); + int index = (key % storageThreads.length); + + // String objId = rq.getDetails().getFileId() + // + rq.getDetails().getObjectNumber(); + // int key = Math.abs(objId.hashCode()); + // int index = (key % storageThreads.length); + + return index; + } + +} diff --git a/servers/src/org/xtreemfs/osd/stages/StorageThread.java b/servers/src/org/xtreemfs/osd/stages/StorageThread.java new file mode 100644 index 0000000000000000000000000000000000000000..a7c0a51e9a37f3fa49fd390988b333a2b7cebdab --- /dev/null +++ b/servers/src/org/xtreemfs/osd/stages/StorageThread.java @@ -0,0 +1,1214 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin and + Consiglio Nazionale delle Ricerche. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB), Eugenio Cesario (CNR), Christian Lorenz (ZIB), Felix Langner (ZIB) + */ + +package org.xtreemfs.osd.stages; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.MalformedURLException; +import java.net.URL; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.xtreemfs.common.auth.NullAuthProvider; +import org.xtreemfs.common.buffer.ASCIIString; +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.clients.RPCClient; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.clients.RPCResponseListener; +import org.xtreemfs.common.clients.dir.DIRClient; +import org.xtreemfs.common.clients.mrc.MRCClient; +import org.xtreemfs.common.clients.osd.ConcurrentFileMap; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.striping.StripeInfo; +import org.xtreemfs.common.striping.StripingPolicy; +import org.xtreemfs.common.uuids.ServiceUUID; +import org.xtreemfs.common.uuids.UnknownUUIDException; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.pinky.HTTPUtils; +import org.xtreemfs.foundation.pinky.PinkyRequest; +import org.xtreemfs.foundation.pinky.HTTPUtils.DATA_TYPE; +import org.xtreemfs.foundation.speedy.SpeedyRequest; +import org.xtreemfs.foundation.speedy.SpeedyResponseListener; +import org.xtreemfs.osd.ErrorRecord; +import org.xtreemfs.osd.OSDException; +import org.xtreemfs.osd.OSDRequest; +import org.xtreemfs.osd.RequestDetails; +import org.xtreemfs.osd.RequestDispatcher; +import org.xtreemfs.osd.ErrorRecord.ErrorClass; +import org.xtreemfs.osd.RequestDispatcher.Stages; +import org.xtreemfs.osd.storage.FileInfo; +import org.xtreemfs.osd.storage.MetadataCache; +import org.xtreemfs.osd.storage.StorageLayout; +import org.xtreemfs.osd.storage.Striping; +import org.xtreemfs.osd.storage.Striping.RPCMessage; +import org.xtreemfs.osd.storage.Striping.UDPMessage; + +public class StorageThread extends Stage { + + public static final int STAGEOP_READ_OBJECT = 1; + + public static final int STAGEOP_READ_OBJECT_GMAX_FETCHED = 2; + + public static final int STAGEOP_WRITE_OBJECT = 3; + + public static final int STAGEOP_FETCH_GMAX = 4; + + public static final int STAGEOP_PROCESS_GMAX_EVENT = 5; + + public static final int STAGEOP_TRUNCATE = 6; + + public static final int STAGEOP_TRUNCATE_ACKS_RECEIVED = 7; + + public static final int STAGEOP_TRUNCATE_LOCAL = 8; + + public static final int STAGEOP_CLEAN_UP = 9; + + public static final int STAGEOP_CLEAN_UP2 = 10; + + private MetadataCache cache; + + private StorageLayout layout; + + private Striping striping; + + private RequestDispatcher master; + + + public StorageThread(int id, RequestDispatcher dispatcher, Striping striping, + MetadataCache cache, StorageLayout layout) { + + super("OSD Storage Thread " + id); + + this.cache = cache; + this.layout = layout; + this.master = dispatcher; + this.striping = striping; + } + + @Override + protected void processMethod(StageMethod method) { + + try { + + switch (method.getStageMethod()) { + case STAGEOP_READ_OBJECT: + if (processRead(method)) + methodExecutionSuccess(method, StageResponseCode.OK); + break; + case STAGEOP_READ_OBJECT_GMAX_FETCHED: + processReadGmaxFetched(method); + methodExecutionSuccess(method, StageResponseCode.OK); + break; + case STAGEOP_WRITE_OBJECT: + processWrite(method); + methodExecutionSuccess(method, StageResponseCode.OK); + break; + case STAGEOP_FETCH_GMAX: + processFetchGmax(method); + methodExecutionSuccess(method, StageResponseCode.OK); + break; + case STAGEOP_PROCESS_GMAX_EVENT: + processGmaxEvent(method); + methodExecutionSuccess(method, StageResponseCode.OK); + break; + case STAGEOP_TRUNCATE: + if (processTruncate(method, false)) + methodExecutionSuccess(method, StageResponseCode.OK); + break; + case STAGEOP_TRUNCATE_ACKS_RECEIVED: + processTruncateAcksReceived(method); + methodExecutionSuccess(method, StageResponseCode.OK); + break; + case STAGEOP_TRUNCATE_LOCAL: + processTruncate(method, true); + methodExecutionSuccess(method, StageResponseCode.OK); + break; + case STAGEOP_CLEAN_UP: + processCleanUp1(method); + break; + case STAGEOP_CLEAN_UP2: + processCleanUp2(method); + break; + } + + } catch (OSDException exc) { + methodExecutionFailed(method, exc.getErrorRecord()); + } catch (Throwable th) { + methodExecutionFailed(method, new ErrorRecord( + ErrorRecord.ErrorClass.INTERNAL_SERVER_ERROR, "an internal error has occurred", th)); + } + } + + private boolean processRead(StageMethod rq) throws IOException, JSONException { + + final RequestDetails details = rq.getRq().getDetails(); + + final String fileId = details.getFileId(); + final long objNo = details.getObjectNumber(); + final StripingPolicy sp = details.getCurrentReplica().getStripingPolicy(); + final long stripeSize = sp.getStripeSize(objNo); + final long[] range = { details.getByteRangeStart(), details.getByteRangeEnd() }; + final FileInfo fi = layout.getFileInfo(sp, fileId); + + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, "READ: " + fileId + "-" + objNo + "."); + + int objVer = fi.getObjectVersion(objNo); + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, "getting objVer " + objVer); + + String objChksm = fi.getObjectChecksum(objNo); + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, "checksum is " + objChksm); + + // retrieve the object from the storage layout + ReusableBuffer data = layout.readObject(fileId, objNo, objVer, objChksm, sp, sp.getOSDByObject(objNo)); + + // test the checksum + if (!layout.checkObject(data, objChksm)) { + Logging.logMessage(Logging.LEVEL_WARN, this, "invalid checksum: file=" + fileId + + ", objNo=" + objNo); + details.setInvalidChecksum(true); + } + + // object exists locally ... + if (data.capacity() > 0) { + + if (Logging.tracingEnabled()) + Logging.logMessage(Logging.LEVEL_TRACE, this, fileId + "-" + objNo + + " found locally"); + + // check whether the object is complete + if (data.capacity() < stripeSize) { + // object incomplete + + if (Logging.tracingEnabled()) + Logging.logMessage(Logging.LEVEL_TRACE, this, fileId + "-" + objNo + + " incomplete"); + + if (objNo < fi.getLastObjectNumber()) { + // object known to be incomplete: fill object with + // padding zeros + data = padWithZeros(data, (int) sp.getStripeSize(objNo)); + if (details.isRangeRequested()) + data.range((int) range[0], (int) (range[1] - range[0]) + 1); + + setReadResponse(rq, data); + return true; + } + + else { + // not sure in striped case whether object is complete + // or not + + // if the read does not go beyond the object size, + // return the data immediately + if (details.isRangeRequested() && data.capacity() >= range[1]) { + data.range((int) range[0], (int) (range[1] - range[0]) + 1); + setReadResponse(rq, data); + return true; + } + + // otherwise, fetch globalMax if necessary + else { + + // return the data in the non-striped case + if (sp.getWidth() == 1) { + + if (details.isRangeRequested()) { + final int rangeSize = (int) (range[1] - range[0]) + 1; + if (data.capacity() >= rangeSize) + data.range((int) range[0], rangeSize); + else { + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, + "read beyond EOF: " + rq.getRq().getPinkyRequest()); + final int eofLength = data.capacity() - ((int) range[0]); + data.range((int) range[0], eofLength); + } + } + + setReadResponse(rq, data); + return true; + } + + // fetch globalMax otherwise + else { + rq.getRq().setData(data, HTTPUtils.DATA_TYPE.BINARY); + sendGmaxRequests(rq); + return false; + } + } + } + + } else { + // object is complete + + if (Logging.tracingEnabled()) + Logging.logMessage(Logging.LEVEL_TRACE, this, fileId + "-" + objNo + + " complete"); + + if (details.isRangeRequested()) + data.range((int) range[0], (int) (range[1] - range[0]) + 1); + + setReadResponse(rq, data); + return true; + } + + } + + // object does not exist locally + else { + // check if the object is a 'hole' or an EOF + + if (Logging.tracingEnabled()) + Logging.logMessage(Logging.LEVEL_TRACE, this, fileId + "-" + objNo + + " not found locally"); + + if (objNo < fi.getLastObjectNumber()) { + // hole + data = padWithZeros(data, (int) stripeSize); + if (details.isRangeRequested()) + data.range((int) range[0], (int) (range[1] - range[0]) + 1); + + setReadResponse(rq, data); + return true; + + } else { + + // in the non-striped case, it must be an EOF + if (sp.getWidth() == 1) { + setReadResponse(rq, data); + return true; + } + + // in the striped case, it may be either a hole or an EOF + // fetch globalMax + else { + rq.getRq().setData(data, HTTPUtils.DATA_TYPE.BINARY); + sendGmaxRequests(rq); + return false; + } + } + + } + + } + + public void processReadGmaxFetched(StageMethod rq) throws IOException { + + // update globalMax from all "globalMax" responses + striping.processGmaxResponses(rq.getRq()); + + final RequestDetails details = rq.getRq().getDetails(); + final String fileId = details.getFileId(); + final long objNo = details.getObjectNumber(); + final StripingPolicy sp = details.getCurrentReplica().getStripingPolicy(); + final ReusableBuffer data = rq.getRq().getData(); + final FileInfo fi = layout.getFileInfo(sp, fileId); + + // object exists: padding or no padding + if (data.capacity() > 0) { + + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, "object " + objNo + + " exists locally..."); + + // if last object, send partial object + if (objNo == fi.getLastObjectNumber()) { + + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, "object " + objNo + + " is the last object"); + + setReadResponse(rq, data); + } + + // if not last object, send padded object + else { + Logging.logMessage(Logging.LEVEL_DEBUG, this, "object " + objNo + + " not the last object"); + + rq.getRq().setData(padWithZeros(data, (int) sp.getStripeSize(objNo)), + HTTPUtils.DATA_TYPE.BINARY); + + if (details.isRangeRequested()) + data.range((int) details.getByteRangeStart(), + (int) (details.getByteRangeEnd() - details.getByteRangeStart()) + 1); + + setReadResponse(rq, data); + } + + } + + // object does not exist: padding or EOF + else { + + Logging.logMessage(Logging.LEVEL_DEBUG, this, "object " + objNo + + " does not exist locally"); + + // if hole, send padding object + if (objNo <= fi.getLastObjectNumber()) { + + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, "object " + objNo + + " is a hole, return zero-padded object"); + + ReusableBuffer paddedData = padWithZeros(data, (int) sp.getStripeSize(objNo)); + long bytes = paddedData.capacity(); + setReadResponse(rq, paddedData); + master.getStatistics().bytesTX.addAndGet(bytes); + } + + // otherwise, send EOF + else { + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, "object " + objNo + + " is out of range, return null object (EOF)"); + + setReadResponse(rq, data); + } + + } + } + + private void processWrite(StageMethod rq) throws IOException { + + final RequestDetails details = rq.getRq().getDetails(); + + final String fileId = details.getFileId(); + final StripingPolicy sp = details.getCurrentReplica().getStripingPolicy(); + final long objNo = details.getObjectNumber(); + final FileInfo fi = layout.getFileInfo(sp, fileId); + final String checksum = fi.getObjectChecksum(objNo); + final PinkyRequest pr = rq.getRq().getPinkyRequest(); + + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, "WRITE: " + fileId + "-" + objNo + "." + + " last objNo=" + fi.getLastObjectNumber()); + + + + try { + + // determine obj version to write + int currentV = Math.max(fi.getObjectVersion(objNo), details + .isObjectVersionNumberRequested() ? details.getObjectVersionNumber() : 0); + if (currentV == 0) + currentV++; + int nextV = currentV; + int offset = 0; + + if (details.isRangeRequested()) + offset = (int) details.getByteRangeStart(); + + ReusableBuffer writeData = pr.requestBody; + assert(writeData != null); + if (StatisticsStage.collect_statistics) { + master.getStatistics().bytesRX.addAndGet(writeData.capacity()); + master.getStatistics().numWrites.incrementAndGet(); + } + + if (details.getCowPolicy().isCOW((int)objNo)) { + nextV++; + + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, "incremented version: " + fileId + + "-" + objNo + "." + nextV); + // increment version number and copy old object, if only a range + // is written + // otherwise simply write data to new object version + if (details.isRangeRequested()) { + + // load old object and overwrite with range + ReusableBuffer oldObj = layout.readObject(fileId, objNo, currentV, checksum, + sp, 0l); + + // test the checksum + if (!layout.checkObject(oldObj, checksum)) { + Logging.logMessage(Logging.LEVEL_WARN, this, "invalid checksum: file=" + + fileId + ", objNo=" + objNo); + BufferPool.free(oldObj); + throw new OSDException(ErrorClass.INTERNAL_SERVER_ERROR, "invalid checksum"); + } + + // if the old objct does not have sufficient capacity, + // enlarge it + if (oldObj.capacity() < details.getByteRangeEnd() + 1) { + + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, + "extend object buffer and copy"); + + // allocate a new buffer and copy the old object into + // the buffer + ReusableBuffer tmp = BufferPool + .allocate((int) details.getByteRangeEnd() + 1); + writeData.position(0); + oldObj.position(0); + tmp.put(oldObj); + + // pad the space between the old object and the written + // byte range with zeros + while (tmp.position() < details.getByteRangeStart()) + tmp.put((byte) 0); + tmp.position((int) details.getByteRangeStart()); + + tmp.put(writeData); + BufferPool.free(oldObj); + writeData = tmp; + } else { + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, "overwrite old data"); + oldObj.position((int) details.getByteRangeStart()); + writeData.position(0); + oldObj.put(writeData); + writeData = oldObj; + writeData.position(0); + } + // we generated the whole object... + offset = 0; + } + details.getCowPolicy().objectChanged((int)objNo); + } + + if (details.isObjectVersionNumberRequested()) { + // update push with version number + assert (details.getObjectVersionNumber() >= currentV) : "local version no: " + + currentV + ", latest version no:" + details.getObjectVersionNumber() + + ", current version no:" + fi.getObjectVersion(objNo) + ", req=" + + rq.getRq().getPinkyRequest(); + nextV = details.getObjectVersionNumber(); + } + + writeData.position(0); + layout.writeObject(fileId, objNo, writeData, nextV, offset, checksum, sp, sp.getOSDByObject(objNo)); + String newChecksum = layout.createChecksum(fileId, objNo, writeData.capacity() == sp + .getStripeSize(objNo) ? writeData : null, nextV, checksum); + + // if a new buffer had to be allocated for writing the object, free + // it now (the request body will be freed automatically) + if (writeData != pr.requestBody) + BufferPool.free(writeData); + + fi.getObjVersions().put(objNo, nextV); + fi.getObjChecksums().put(objNo, newChecksum); + + details.setObjectVersionNumber(nextV); + + // if the write refers to the last known object or to an object + // beyond, i.e. the file size and globalMax are potentially + // affected: + if (objNo >= fi.getLastObjectNumber()) { + + long newObjSize = pr.requestBody.capacity(); + if (details.isRangeRequested()) + newObjSize += details.getByteRangeStart(); + + // calculate new filesize... + long newFS = 0; + if (objNo > 0) { + newFS = sp.getLastByte(objNo - 1) + 1 + newObjSize; + } else { + newFS = newObjSize; + } + + // check whether the file size might have changed; in this case, + // ensure that the X-New-Filesize header will be set + if (newFS > fi.getFilesize() && objNo >= fi.getLastObjectNumber()) { + // Metadata meta = info.getMetadata(); + // meta.putKnownSize(newFS); + Logging.logMessage(Logging.LEVEL_DEBUG, this, "new filesize: " + newFS); + details.setNewFSandEpoch(JSONParser.toJSON(new Object[] { newFS, + fi.getTruncateEpoch() })); + } else { + Logging.logMessage(Logging.LEVEL_DEBUG, this, "no new filesize: " + newFS + "/" + + fi.getFilesize() + ", " + fi.getLastObjectNumber() + "/" + objNo); + } + + // update file size and last object number + fi.setFilesize(newFS); + fi.setLastObjectNumber(objNo); + + // if the written object has a larger ID than the largest + // locally-known object of the file, send 'globalMax' messages + // to all other OSDs and update local globalMax + if (objNo > fi.getLastObjectNumber()) { + + fi.setLastObjectNumber(objNo); + + List msgs = striping.createGmaxMessages(new ASCIIString(fileId), + newFS, objNo, fi.getTruncateEpoch(), details.getCurrentReplica()); + + for (UDPMessage msg : msgs) + master.sendUDP(msg.buf.createViewBuffer(), msg.addr); + + // one buffer has been allocated, which will not be freed + // automatically; this has to be done here + BufferPool.free(msgs.get(0).buf); + } + + } + + } catch (IOException ex) { + ex.printStackTrace(); + throw ex; + } catch (JSONException ex) { + throw new IOException(ex); + } + } + + private boolean processTruncate(StageMethod rq, boolean intraOSD) throws IOException, + JSONException { + + final RequestDetails details = rq.getRq().getDetails(); + + // for the sake of robustness, check if contacted OSD is head OSD if the + // truncate operation is not local + if (!intraOSD && !master.isHeadOSD(details.getCurrentReplica())) + throw new OSDException(ErrorClass.REDIRECT, details.getCurrentReplica().getOSDs() + .get(0).toString()); + + final String fileId = details.getFileId(); + final long fileSize = details.getTruncateFileSize(); + final StripingPolicy sp = details.getCurrentReplica().getStripingPolicy(); + final long epochNumber = details.getCapability().getEpochNo(); + + final FileInfo fi = layout.getFileInfo(sp, fileId); + + if (fi.getTruncateEpoch() >= epochNumber) + throw new OSDException(ErrorClass.USER_EXCEPTION, "invalid truncate epoch for file " + + fileId + ": " + epochNumber + ", current one is " + fi.getTruncateEpoch()); + + // find the offset of the local OSD in the current replica's locations + // list + // FIXME: unify OSD IDs + final int relativeOSDNumber = details.getCurrentReplica().indexOf( + master.getConfig().getUUID()) + 1; + + long newLastObject = -1; + + if (fileSize == 0) { + // truncate to zero: remove all objects + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, "truncate to 0"); + layout.deleteAllObjects(fileId); + fi.getObjChecksums().clear(); + fi.getObjVersions().clear(); + } + + else if (fi.getFilesize() > fileSize) { + // shrink file + newLastObject = truncateShrink(fileId, fileSize, epochNumber, sp, fi, relativeOSDNumber); + } + + else if (fi.getFilesize() < fileSize) { + // extend file + newLastObject = truncateExtend(fileId, fileSize, epochNumber, sp, fi, relativeOSDNumber); + } + + else if (fi.getFilesize() == fileSize) { + Logging.logMessage(Logging.LEVEL_DEBUG, this, "truncate to local size: " + fileSize); + newLastObject = fi.getLastObjectNumber(); + } + + // set the new file size and last object number + fi.setFilesize(fileSize); + fi.setLastObjectNumber(newLastObject); + fi.setTruncateEpoch(epochNumber); + + // store the truncate epoch persistently + layout.setTruncateEpoch(fileId, epochNumber); + + // append the new file size and epoch number to the response + details.setNewFSandEpoch(JSONParser.toJSON(new Object[] { fileSize, epochNumber })); + + // relay the truncate operation to all remote OSDs + if (!intraOSD && details.getCurrentReplica().getWidth() > 1) { + sendTruncateRequests(rq, newLastObject); + return false; + } + + return true; + } + + private void processTruncateAcksReceived(StageMethod rq) throws IOException { + + for (SpeedyRequest sr : rq.getRq().getHttpRequests()) { + + // check for exception + if (sr.statusCode != HTTPUtils.SC_OKAY) { + IOException exc = new IOException("truncate on remote OSD failed: " + sr.statusCode + + " (" + new String(sr.getResponseBody()) + ")"); + Logging.logMessage(Logging.LEVEL_ERROR, this, "error " + exc); + throw exc; + } + + sr.freeBuffer(); + } + + Logging.logMessage(Logging.LEVEL_DEBUG, this, "truncate successful on all " + + rq.getRq().getHttpRequests().length + " OSDs"); + } + + // private boolean processFetchVersionNumber(StageMethod rq) + // throws IOException, JSONException { + // + // String fileId = rq.getFileId(); + // StripingPolicy sp = rq.getPolicy(); + // long objId = rq.getObjectNo(); + // + // FileInfo info = getFileInfo(sp, fileId); + // + // int vNum = info.getObjectVersion(objId); + // + // byte[] data = JSONParser.writeJSON(vNum).getBytes(HTTPUtils.ENC_UTF8); + // + // rq.setData(ReusableBuffer.wrap(data), HTTPUtils.DATA_TYPE.JSON); + // + // Logging.logMessage(Logging.LEVEL_DEBUG, this, "fetchVersionNumber rq: " + // + fileId + "-" + objId + "." + vNum); + // return true; + // }// processFetchVersionNumber + // + // private boolean processClose(StageMethod rq) { + // // this does not work since we do not have client reference counts for + // // files! + // + // // oft.close(rq.getFileId()); + // // cache.fileClosedEvent(rq.getFileId()); + // // striping.closeFile(rq); + // + // return true; + // }// processClose + + private void processFetchGmax(StageMethod rq) throws IOException { + + final String fileId = rq.getRq().getDetails().getCapability().getFileId(); + final StripingPolicy sp = rq.getRq().getDetails().getCurrentReplica().getStripingPolicy(); + final FileInfo fi = layout.getFileInfo(sp, fileId); + + final List list = new ArrayList(3); + list.add(fi.getTruncateEpoch()); + list.add(fi.getLastObjectNumber()); + list.add(fi.getFilesize()); + + try { + rq.getRq().setData( + ReusableBuffer.wrap(JSONParser.writeJSON(list).getBytes(HTTPUtils.ENC_UTF8)), + HTTPUtils.DATA_TYPE.JSON); + + } catch (JSONException ex) { + throw new RuntimeException("There was a problem with the JSONParser :" + + ex.getMessage()); + } + + if (StatisticsStage.collect_statistics) { + master.getStatistics().numGmaxReceived.incrementAndGet(); + } + } + + private void processGmaxEvent(StageMethod req) throws IOException { + striping.processGmaxMessage(req.getRq(), cache); + } + + /** + * Checks the complete file tree on the OSD for zombies. + * + * @param rq + * @throws IOException + * @throws JSONException + */ + private void processCleanUp1(StageMethod rq) throws IOException,JSONException{ + final ConcurrentFileMap fileList = layout.getAllFiles(); + Logging.logMessage(Logging.LEVEL_TRACE, this, "CleanUp: all files listed!"); + + final StorageThread thisStage = this; + final StageMethod req = rq; + + String authString = NullAuthProvider.createAuthString(master.getConfig().getUUID().toString(), master.getConfig().getUUID().toString()); + + // get the volume-locations from the directory service (DIR) + RPCResponse>> dirResponse = null; + + // for counting the answers + Set volumeIDs = fileList.unresolvedVolumeIDSet(); + final int volumesToRequest = volumeIDs.size(); + rq.getRq().setAttachment(0L); + + if (volumesToRequest == 0){ + rq.getRq().setData(null,DATA_TYPE.JSON); + methodExecutionSuccess(rq, StageResponseCode.FINISH); + } + + for(final String volumeID: volumeIDs){ + try{ + // ask the DIR for the UUID + dirResponse = master.getDIRClient().getEntities(RPCClient.generateMap("uuid", volumeID), + DIRClient.generateStringList("mrc"), + authString); + + // get the responses asynchronous + dirResponse.setResponseListener(new RPCResponseListener() { + @Override + public void responseAvailable(RPCResponse response) { + try{ + long count = (Long) req.getRq().getAttachment(); + count++; + req.getRq().setAttachment(count); + + ServiceUUID uuidService; + + Map> answer = (Map>) response.get(); + if (answer==null) throw new IOException("Answer of the request was 'null'"); + + // volume is not registered at the DIR + if (answer.get(volumeID) == null || answer.get(volumeID).get("mrc") == null){ + + // mark all files of that volume as zombies + fileList.saveAddress(volumeID, null); + }else{ + InetSocketAddress address = null; + // parse answer + try{ + URL url = new URL(((String) answer.get(volumeID).get("mrc"))); + address = new InetSocketAddress(url.getHost(),url.getPort()); + }catch (MalformedURLException mf){ + // resolve the UUID + uuidService = new ServiceUUID(((String) answer.get(volumeID).get("mrc"))); + uuidService.resolve(); + address = uuidService.getAddress(); + } + + // save the address + fileList.saveAddress(volumeID,address); + } + + // check if all responses have been received + if (volumesToRequest == count){ + Logging.logMessage(Logging.LEVEL_TRACE, this, "CleanUp: all volumes identified!"); + req.getRq().setAttachment(fileList); + thisStage.enqueueOperation(req.getRq(), STAGEOP_CLEAN_UP2, req.getCallback()); + } + }catch(UnknownUUIDException ue){ + Logging.logMessage(Logging.LEVEL_ERROR, this, "UUID could not be resolved for: '"+volumeID+"': "+ue.getMessage()); + }catch(JSONException je){ + Logging.logMessage(Logging.LEVEL_ERROR, this, "JSON Parser could not get response: "+je.getMessage()); + }catch (IOException io) { + Logging.logMessage(Logging.LEVEL_ERROR, this, "Parser could not get response: "+io.getMessage()); + }catch (InterruptedException ie){ + Logging.logMessage(Logging.LEVEL_WARN, this, "CleanUp was interrupted: "+ie.getMessage()); + }finally{ + response.freeBuffers(); + } + } + }); + }catch (InterruptedException ie) { + throw new IOException("DIRClient was interrupted while working on a CleanUp request."); + } + } + } + + /** + * Checks the complete file tree on the OSD for zombies. + * + * @param rq + * @throws IOException + * @throws JSONException + */ + private void processCleanUp2(StageMethod rq) throws IOException,JSONException{ + final List noZombieSize = new LinkedList(); + + final ConcurrentFileMap fileList = (ConcurrentFileMap) rq.getRq().getAttachment(); + final StageMethod req = rq; + + // check the files at the metaData & replica catalog (MRC) + RPCResponse mrcResponse = null; + String authString = NullAuthProvider.createAuthString(master.getConfig().getUUID().toString(), master.getConfig().getUUID().toString()); + List fileIDs; + + // for counting the answers + Set volumeIDs = fileList.volumeIDSetForRequest(); + final int volumesToRequest = volumeIDs.size(); + rq.getRq().setAttachment(0L); + + + for(final String volumeID: volumeIDs){ + fileIDs = fileList.getFileNumbers(volumeID); + + // ask the MRC whether the files in the list exist, or not + mrcResponse = new MRCClient(master.getDIRClient().getSpeedy(),60000) + .checkFileList(fileList.getAddress(volumeID), + volumeID, + fileIDs, + authString); + + mrcResponse.setAttachment(fileIDs); + + mrcResponse.setResponseListener(new RPCResponseListener() { + @Override + public void responseAvailable(RPCResponse response) { + List fileIDs = (List) response.getAttachment(); + + try{ + long count = (Long) req.getRq().getAttachment(); + count++; + req.getRq().setAttachment(count); + + // analyze the answer of the MRC + String resp = (String) response.get(); + + for (int i=0;i= lastRow; r--) { + final long rowObj = r * sp.getWidth() + relOsdId - 1; + if (rowObj == newLastObject) { + // is local and needs to be shrunk + final long newObjSize = fileSize - sp.getFirstByte(newLastObject); + truncateObject(fileId, newLastObject, sp, newObjSize, relOsdId); + } else if (rowObj > newLastObject) { + // delete objects + final int v = fi.getObjectVersion(rowObj); + layout.deleteObject(fileId, rowObj, v); + fi.deleteObject(rowObj); + } + } + + // make sure that last objects exist + for (long obj = newLastObject - 1; obj > newLastObject - sp.getWidth(); obj--) { + if (obj > 0 && sp.isLocalObject(obj, relOsdId)) { + int v = fi.getObjectVersion(obj); + if (v == 0) { + // does not exist + createPaddingObject(fileId, obj, sp, 1, sp.getStripeSize(obj), fi); + } + } + } + + return newLastObject; + } + + private long truncateExtend(String fileId, long fileSize, long epoch, StripingPolicy sp, + FileInfo fi, long relOsdId) throws IOException { + // first find out which is the new "last object" + final long newLastObject = sp.calculateLastObject(fileSize); + final long oldLastObject = fi.getLastObjectNumber(); + assert (newLastObject >= oldLastObject); + + Logging.logMessage(Logging.LEVEL_DEBUG, this, "truncate extend to: " + fileSize + + " old last: " + oldLastObject + " new last: " + newLastObject); + + if (sp.isLocalObject(newLastObject, relOsdId) && newLastObject == oldLastObject) { + // simply extend the old one + truncateObject(fileId, newLastObject, sp, fileSize - sp.getFirstByte(newLastObject), + relOsdId); + } else { + if ((oldLastObject > -1) && (sp.isLocalObject(oldLastObject, relOsdId))) { + truncateObject(fileId, oldLastObject, sp, sp.getStripeSize(oldLastObject), relOsdId); + } + + // create padding objects + if (sp.isLocalObject(newLastObject, relOsdId)) { + long objSize = fileSize - sp.getFirstByte(newLastObject); + createPaddingObject(fileId, newLastObject, sp, 1, objSize, fi); + } + + // make sure that last objects exist + for (long obj = newLastObject - 1; obj > newLastObject - sp.getWidth(); obj--) { + if (obj > 0 && sp.isLocalObject(obj, relOsdId)) { + int v = fi.getObjectVersion(obj); + if (v == 0) { + // does not exist + createPaddingObject(fileId, obj, sp, 1, sp.getStripeSize(obj), fi); + } + } + } + } + + return newLastObject; + } + + private void truncateObject(String fileId, long objNo, StripingPolicy sp, long newSize, + long relOsdId) throws IOException { + + assert (newSize > 0) : "new size is " + newSize + " but should be > 0"; + assert (newSize <= sp.getStripeSize(objNo)); + assert (objNo >= 0) : "objNo is " + objNo; + assert (sp.isLocalObject(objNo, relOsdId)); + + final FileInfo fi = layout.getFileInfo(sp, fileId); + final int version = fi.getObjectVersion(objNo); + final String checksum = fi.getObjectChecksum(objNo); + + ReusableBuffer oldData = layout.readObject(fileId, objNo, version, checksum, sp, 0); + + // test the checksum + if (!layout.checkObject(oldData, checksum)) { + Logging.logMessage(Logging.LEVEL_WARN, this, "invalid checksum: file=" + fileId + + ", objNo=" + objNo); + BufferPool.free(oldData); + throw new OSDException(ErrorClass.INTERNAL_SERVER_ERROR, "invalid checksum"); + } + + // no extension necessary when size is correct + if (oldData.capacity() == newSize) { + Logging.logMessage(Logging.LEVEL_DEBUG, this, "truncate not necessary, object " + objNo + + " is " + newSize); + BufferPool.free(oldData); + return; + } + + Logging + .logMessage(Logging.LEVEL_DEBUG, this, "truncate object " + objNo + " to " + + newSize); + + ReusableBuffer newData = BufferPool.allocate((int) newSize); + if (newSize < oldData.capacity()) { + oldData.shrink((int) newSize); + } + newData.put(oldData); + BufferPool.free(oldData); + + // fill the remaining buffer with zeros + while (newData.position() < newData.capacity()) + newData.put((byte) 0); + + layout.writeObject(fileId, objNo, newData, version + 1, 0, checksum, sp, 0); + String newChecksum = layout.createChecksum(fileId, objNo, newData, version + 1, checksum); + + BufferPool.free(newData); + + fi.getObjVersions().put(objNo, version + 1); + fi.getObjChecksums().put(objNo, newChecksum); + } + + private void createPaddingObject(String fileId, long objNo, StripingPolicy sp, int version, + long size, FileInfo fi) throws IOException { + String checksum = layout.createPaddingObject(fileId, objNo, sp, version, size); + fi.getObjVersions().put(objNo, version); + fi.getObjChecksums().put(objNo, checksum); + } + + private void sendGmaxRequests(StageMethod rq) throws IOException, JSONException { + + List gMaxReqs = striping.createGmaxRequests(rq.getRq().getDetails()); + + final StageMethod req = rq; + + SpeedyRequest[] reqs = new SpeedyRequest[gMaxReqs.size()]; + int i = 0; + for (RPCMessage msg : gMaxReqs) { + SpeedyRequest sr = msg.req; + sr.listener = new SpeedyResponseListener() { + + public void receiveRequest(SpeedyRequest theRequest) { + + // count received responses + OSDRequest osdReq = (OSDRequest) theRequest.getOriginalRequest(); + long count = (Long) osdReq.getAttachment(); + count++; + osdReq.setAttachment(count); + + // check if all responses have been received; + // if so, enqueue an operation for the next step + if (count == osdReq.getHttpRequests().length) + enqueueOperation(osdReq, STAGEOP_READ_OBJECT_GMAX_FETCHED, req + .getCallback()); + } + + }; + reqs[i++] = sr; + } + + rq.getRq().setHttpRequests(reqs); + rq.getRq().setAttachment(0L); + + Logging.logMessage(Logging.LEVEL_DEBUG, this, "sending gmax requests to remote OSDs ..."); + + for (RPCMessage msg : gMaxReqs) { + master.sendSpeedyRequest(rq.getRq(), msg.req, msg.addr); + master.getStatistics().numGmaxRPCs.incrementAndGet(); + } + } + + private void sendTruncateRequests(StageMethod rq, long newLastObject) throws IOException, + JSONException { + + List truncateReqs = striping.createTruncateRequests(rq.getRq().getDetails(), + newLastObject); + + final StorageThread thisStage = this; + final StageMethod req = rq; + + SpeedyRequest[] reqs = new SpeedyRequest[truncateReqs.size()]; + int i = 0; + for (RPCMessage msg : truncateReqs) { + SpeedyRequest sr = msg.req; + sr.listener = new SpeedyResponseListener() { + + public void receiveRequest(SpeedyRequest theRequest) { + + // count received responses + OSDRequest osdReq = (OSDRequest) theRequest.getOriginalRequest(); + long count = (Long) osdReq.getAttachment(); + count++; + osdReq.setAttachment(count); + + // check if all responses have been received; + // if so, enqueue an operation for the next step TODO + if (count == osdReq.getHttpRequests().length) + thisStage.enqueueOperation(osdReq, STAGEOP_TRUNCATE_ACKS_RECEIVED, req + .getCallback()); + } + + }; + reqs[i++] = sr; + } + + rq.getRq().setHttpRequests(reqs); + rq.getRq().setAttachment(0L); + + Logging.logMessage(Logging.LEVEL_DEBUG, this, "sending gmax requests to remote OSDs ..."); + + for (RPCMessage msg : truncateReqs) { + master.sendSpeedyRequest(rq.getRq(), msg.req, msg.addr); + master.getStatistics().numTruncateRPCs.incrementAndGet(); + } + } + + private void setReadResponse(StageMethod rq, ReusableBuffer data) { + + if (rq.getRq().getDetails().isCheckOnly()) { + rq.getRq().setData(ReusableBuffer.wrap(String.valueOf(data.capacity()).getBytes()), + HTTPUtils.DATA_TYPE.JSON); + BufferPool.free(data); + } else { + rq.getRq().setData(data, HTTPUtils.DATA_TYPE.BINARY); + if (StatisticsStage.collect_statistics) { + master.getStatistics().bytesTX.addAndGet(data.capacity()); + master.getStatistics().numReads.incrementAndGet(); + } + } + } + + protected void methodExecutionSuccess(StageMethod m, StageResponseCode code) { + if (StatisticsStage.measure_request_times) { + if (m.getRq() != null) + master.getStage(Stages.STORAGE).calcRequestDuration(m.getRq()); + } + super.methodExecutionSuccess(m, code); + } + + protected void methodExecutionFailed(StageMethod m, ErrorRecord err) { + if (StatisticsStage.measure_request_times) { + if (m.getRq() != null) + master.getStage(Stages.STORAGE).calcRequestDuration(m.getRq()); + } + super.methodExecutionFailed(m, err); + } + +} diff --git a/servers/src/org/xtreemfs/osd/storage/CowPolicy.java b/servers/src/org/xtreemfs/osd/storage/CowPolicy.java new file mode 100644 index 0000000000000000000000000000000000000000..811b8d96650c15064a4317d2ec17771922b20e08 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/storage/CowPolicy.java @@ -0,0 +1,113 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package org.xtreemfs.osd.storage; + +/** + * This class implements differen copy-on-write strategies. + * @author bjko + */ +public class CowPolicy { + + public static final CowPolicy PolicyNoCow = new CowPolicy(cowMode.NO_COW); + + public enum cowMode { + /** + * Do not copy on write, overwrite instead. + */ + NO_COW, + /** + * Create a copy only for the first write after open, + * then overwrite for each subsequent request. + */ + COW_ONCE, + /** + * Copy-on-write for all write requests. + */ + ALWAYS_COW + }; + + /** + * CoW mode to use + */ + private final cowMode mode; + + /** + * The initial number of objects. Required for + * COW_ONCE mode. + */ + private final long initialObjectCount; + + /** + * per-object flag which indicates if the object has already been copied + * (for COW_ONCE mode). Each bit is used + */ + private final byte[] cowFlags; + + /** + * Create a new cowPolicy + * @param mode can be NO_COW or ALWAYS_COW but not COW_ONCE + */ + public CowPolicy(cowMode mode) { + if (mode == cowMode.COW_ONCE) { + throw new IllegalArgumentException("Mode COW_ONCE requires initial object count!"); + } + this.mode = mode; + this.initialObjectCount = 0; + this.cowFlags = null; + } + + /** + * Creates a new cowPolicy with COW_ONCE + * @param mode mut be COW_ONCE + * @param initialObjectCount the number of objects when openening the file (added objects do not require COW) + */ + public CowPolicy(cowMode mode, long initialObjectCount) { + this.mode = mode; + this.initialObjectCount = initialObjectCount; + final int fieldLen = (int) Math.ceil(initialObjectCount/Byte.SIZE); + cowFlags = new byte[fieldLen]; + } + + /** + * Checks if an object must be copied before writing + * @param objectNumber the object to be modified + * @return true, if a new version must be created + */ + private boolean requiresCow(int objectNumber) { + assert(mode == cowMode.COW_ONCE); + //new objects do not need copy-on-write ;-) + if (objectNumber >= this.initialObjectCount) + return false; + + final int field = objectNumber / Byte.SIZE; + final int bit = objectNumber % Byte.SIZE; + return (cowFlags[field] & (0x0001 << bit)) == 0; + } + + /** + * Checks if copy-on-write is necessary for an object + * @param objectNumber the object to be modified + * @return true, if a new version must be created + */ + public boolean isCOW(int objectNumber) { + return ((mode != cowMode.NO_COW) && + ((mode == cowMode.ALWAYS_COW) || requiresCow(objectNumber)) ); + } + + /** + * toggels the written flag for an object if in COW_ONMCE mode + * @param objectNumber the object which was modified + */ + public void objectChanged(int objectNumber) { + //ignore new objects + if ((mode == cowMode.COW_ONCE) && (objectNumber < this.initialObjectCount)) { + final int field = objectNumber / Byte.SIZE; + final int bit = objectNumber % Byte.SIZE; + cowFlags[field] = (byte)(cowFlags[field] | (0x01 << bit)); + } + } + +} diff --git a/servers/src/org/xtreemfs/osd/storage/FileInfo.java b/servers/src/org/xtreemfs/osd/storage/FileInfo.java new file mode 100644 index 0000000000000000000000000000000000000000..6ba2fa26b58891a4c2e39fe4349a1e69a563bc39 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/storage/FileInfo.java @@ -0,0 +1,114 @@ +/* Copyright (c) 2008 Consiglio Nazionale delle Ricerche and + Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Eugenio Cesario (CNR), Björn Kolbeck (ZIB), Christian Lorenz (ZIB) + */ + +package org.xtreemfs.osd.storage; + +import java.util.HashMap; +import java.util.Map; + +/** + * + * @author bjko + */ +public class FileInfo { + + private Map objVersions; + + private Map objChecksums; + + private long filesize; + + private long lastObjectNumber; + + private boolean incVersionOnWrite; + + private long truncateEpoch; + + /** Creates a new instance of FileInfo */ + public FileInfo() { + objVersions = new HashMap(); + objChecksums = new HashMap(); + } + + public Map getObjVersions() { + return objVersions; + } + + public Map getObjChecksums() { + return objChecksums; + } + + public long getFilesize() { + return filesize; + } + + public void setFilesize(long filesize) { + this.filesize = filesize; + } + + public long getLastObjectNumber() { + return lastObjectNumber; + } + + public void setLastObjectNumber(long lastObjectNumber) { + this.lastObjectNumber = lastObjectNumber; + } + + public int getObjectVersion(long objId) { + Integer v = objVersions.get(objId); + return (v == null) ? 0 : v; + } + + public String getObjectChecksum(long objId) { + String c = objChecksums.get(objId); + return c; + } + + public boolean isIncVersionOnWrite() { + return incVersionOnWrite; + } + + public void setIncVersionOnWrite(boolean incVersionOnWrite) { + this.incVersionOnWrite = incVersionOnWrite; + } + + public void deleteObject(long objId) { + objVersions.remove(objId); + objChecksums.remove(objId); + } + + public String toString() { + return "fileSize=" + filesize + ", lastObjNo=" + lastObjectNumber + ", incVersionOnWrite=" + + incVersionOnWrite; + } + + public long getTruncateEpoch() { + return truncateEpoch; + } + + public void setTruncateEpoch(long truncateEpoch) { + this.truncateEpoch = truncateEpoch; + } +} diff --git a/servers/src/org/xtreemfs/osd/storage/HashStorageLayout.java b/servers/src/org/xtreemfs/osd/storage/HashStorageLayout.java new file mode 100644 index 0000000000000000000000000000000000000000..9506c4c3aaacf523ac6cd716b613f9c5a3d17943 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/storage/HashStorageLayout.java @@ -0,0 +1,660 @@ +/* Copyright (c) 2008 Consiglio Nazionale delle Ricerche and + Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Eugenio Cesario (CNR), Björn Kolbeck (ZIB), Christian Lorenz (ZIB) + */ + +package org.xtreemfs.osd.storage; + +import java.io.File; +import java.io.FileFilter; +import java.io.FileNotFoundException; +import java.io.FileReader; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.ByteBuffer; +import java.security.NoSuchAlgorithmException; + +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.checksums.ChecksumAlgorithm; +import org.xtreemfs.common.checksums.ChecksumFactory; +import org.xtreemfs.common.checksums.StringChecksumAlgorithm; +import org.xtreemfs.common.checksums.algorithms.JavaHash; +import org.xtreemfs.common.checksums.algorithms.SDBM; +import org.xtreemfs.common.clients.osd.ConcurrentFileMap; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.striping.StripeInfo; +import org.xtreemfs.common.striping.StripingPolicy; +import org.xtreemfs.osd.OSDConfig; + +/** + * + * @author clorenz + */ +public class HashStorageLayout extends StorageLayout { + + /** 32bit algorithm */ + public static final String JAVA_HASH = "Java-Hash"; + + /** 64bit algorithm */ + public static final String SDBM_HASH = "SDBM"; + + public static final int SUBDIRS_16 = 15; + + public static final int SUBDIRS_256 = 255; + + public static final int SUBDIRS_4096 = 4095; + + public static final int SUBDIRS_65535 = 65534; + + public static final int SUBDIRS_1048576 = 1048575; + + public static final int SUBDIRS_16777216 = 16777215; + + public static final String DEFAULT_HASH = JAVA_HASH; + + private static final int DEFAULT_SUBDIRS = SUBDIRS_256; + + private static final int DEFAULT_MAX_DIR_DEPTH = 4; + + private static final char VERSION_SEPARATOR = '.'; + + private static final char CHECKSUM_SEPARATOR = '-'; + + private int prefixLength; + + private StringChecksumAlgorithm hashAlgo; + + private int hashCutLength; + + private ChecksumAlgorithm checksumAlgo; + + private long _stat_fileInfoLoads; + + // object list of file IDs, ordered by volume ID + private ConcurrentFileMap fileMap; + + /** Creates a new instance of HashStorageLayout */ + public HashStorageLayout(OSDConfig config, MetadataCache cache) throws IOException { + this(config, cache, DEFAULT_HASH, DEFAULT_SUBDIRS, DEFAULT_MAX_DIR_DEPTH); + } + + /** + * Creates a new instance of HashStorageLayout. If some value is incorrect, + * the default value will be used. + * + * @param config + * @param hashAlgo + * @param maxSubdirsPerDir + * @param maxDirDepth + * @throws IOException + */ + public HashStorageLayout(OSDConfig config, MetadataCache cache, String hashAlgo, + int maxSubdirsPerDir, int maxDirDepth) throws IOException { + + super(config, cache); + + if (hashAlgo == JAVA_HASH) { + this.hashAlgo = new JavaHash(); + }else if (hashAlgo == SDBM_HASH) { + this.hashAlgo = new SDBM(); + } + + if (config.isUseChecksums()) { + + // get the algorithm from the factory + try { + checksumAlgo = ChecksumFactory.getInstance().getAlgorithm(config.getChecksumProvider()); + } catch (NoSuchAlgorithmException e) { + Logging.logMessage(Logging.LEVEL_ERROR, this, + "could not instantiate checksum algorithm '" + config.getChecksumProvider() + + "'"); + Logging.logMessage(Logging.LEVEL_ERROR, this, "OSD checksums will be switched off"); + } + } + + if (maxSubdirsPerDir != 0) { + this.prefixLength = Integer.toHexString(maxSubdirsPerDir).length(); + } else { + this.prefixLength = Integer.toHexString(DEFAULT_SUBDIRS).length(); + } + + if (maxDirDepth != 0) { + this.hashCutLength = maxDirDepth * this.prefixLength; + } else { + this.hashCutLength = DEFAULT_MAX_DIR_DEPTH * this.prefixLength; + } + + _stat_fileInfoLoads = 0; + } + + public ReusableBuffer readObject(String fileId, long objNo, int version, String checksum, + StripingPolicy sp, long osdNumber) throws IOException { + ReusableBuffer bbuf = null; + + String fileName = generateAbsolutObjectPath(fileId, objNo, version, checksum); + + File file = new File(fileName); + + if (file.exists()) { + + RandomAccessFile f = new RandomAccessFile(fileName, "r"); + + if (f.length() > 0) { + // read object data + bbuf = BufferPool.allocate((int) f.length()); + f.getChannel().read(bbuf.getBuffer()); + + } else { + // zero padding... + bbuf = BufferPool.allocate((int) sp.getStripeSize(objNo)); + for (int i = 0; i < sp.getStripeSize(objNo); i++) { + bbuf.put((byte) 0); + } + } + + f.close(); + bbuf.position(0); + + } else { + // handles the POSIX behavior of read beyond EOF + bbuf = BufferPool.allocate(0); + bbuf.position(0); + } + + return bbuf; + } + + public boolean checkObject(ReusableBuffer obj, String checksum) { + + // calculate and compare the checksum if checksumming is enabled + if (checksumAlgo != null && checksum != null) { + + // calculate the checksum + checksumAlgo.update(obj.getBuffer()); + String calcedChecksum = checksumAlgo.getValue(); + + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, "calc'ed checksum: " + calcedChecksum + + ", stored checksum: " + checksum); + + // test the checksum + return calcedChecksum.equals(checksum); + } + + return true; + } + + public void writeObject(String fileId, long objNo, ReusableBuffer data, int version, + int offset, String currentChecksum, StripingPolicy sp, long osdNumber) throws IOException { + + // ignore empty writes + if (data.capacity() > 0) { + + String relPath = generateRelativeFilePath(fileId); + new File(this.storageDir + relPath).mkdirs(); + + try { + // write file + String filename = generateAbsoluteObjectPath(relPath, objNo, version, + currentChecksum); + File file = new File(filename); + RandomAccessFile f = new RandomAccessFile(file, "rw"); + + data.position(0); + f.seek(offset); + f.getChannel().write(data.getBuffer()); + f.close(); + + if (version > 0) { + // delete old file + String fileNameOld = generateAbsoluteObjectPath(relPath, objNo, version - 1, + currentChecksum); + File oldFile = new File(fileNameOld); + oldFile.delete(); + } + + } catch (FileNotFoundException ex) { + throw new IOException("unable to create file directory or object: " + + ex.getMessage()); + } + } + } + + public String createChecksum(String fileId, long objNo, ReusableBuffer data, int version, + String currentChecksum) throws IOException { + + String relPath = generateRelativeFilePath(fileId); + new File(this.storageDir + relPath).mkdirs(); + + try { + + // if OSD checksums are enabled, calculate the checksum + if (checksumAlgo != null) { + + String filename = generateAbsoluteObjectPath(relPath, objNo, version, + currentChecksum); + File file = new File(filename); + + // if not data is provided, fetch the object from disk + if (data == null) { + + RandomAccessFile f = new RandomAccessFile(file, "rw"); + data = BufferPool.allocate((int) f.length()); + f.getChannel().read(data.getBuffer()); + f.close(); + + checksumAlgo.update(data.getBuffer()); + BufferPool.free(data); + } + + // otherwise, calculate the checksum directly from the given + // buffer + else + checksumAlgo.update(data.getBuffer()); + + // calculate the checksum and wrap it into a buffer, in + // order to write it to the object file + String checksum = checksumAlgo.getValue(); + + // encode the checksum in the file name by renaming the file + // accordingly + file.renameTo(new File( + generateAbsoluteObjectPath(relPath, objNo, version, checksum))); + + return checksum; + } + + } catch (FileNotFoundException ex) { + throw new IOException("unable to create file directory or object: " + ex.getMessage()); + } + + return null; + } + + public String createPaddingObject(String fileId, long objNo, StripingPolicy sp, int version, + long size) throws IOException { + + String relPath = generateRelativeFilePath(fileId); + new File(this.storageDir + relPath).mkdirs(); + + // calculate the checksum for the padding object if necessary + String checksum = null; + if (checksumAlgo != null) { + byte[] content = new byte[(int) size]; + checksumAlgo.update(ByteBuffer.wrap(content)); + checksum = checksumAlgo.getValue(); + } + + // write file + String filename = generateAbsoluteObjectPath(relPath, objNo, version, checksum); + if (size == sp.getStripeSize(objNo)) { + File f = new File(filename); + f.createNewFile(); + } else { + RandomAccessFile raf = new RandomAccessFile(filename, "rw"); + raf.setLength(size); + raf.close(); + } + + return checksum; + + } + + public void deleteAllObjects(String fileId) throws IOException { + File fileDir = new File(generateAbsoluteFilePath(fileId)); + File[] objs = fileDir.listFiles(); + if (objs == null) + return; + for (File obj : objs) { + obj.delete(); + } + } + + public void deleteFile(String fileId) throws IOException { + File fileDir = new File(generateAbsoluteFilePath(fileId)); + File[] objs = fileDir.listFiles(); + if (objs == null) + return; + for (File obj : objs) { + obj.delete(); + } + + // delete all empty dirs along the path + del(fileDir); + } + + private void del(File parent) { + if (parent.list().length > 1 || (parent.getAbsolutePath() + "/").equals(this.storageDir)) { + return; + } else { + parent.delete(); + del(parent.getParentFile()); + } + } + + public void deleteObject(String fileId, final long objNo) throws IOException { + File fileDir = new File(generateAbsoluteFilePath(fileId)); + File[] objs = fileDir.listFiles(new FileFilter() { + + public boolean accept(File pathname) { + return pathname.getName().startsWith("" + objNo); + } + }); + for (File obj : objs) { + obj.delete(); + } + } + + public void deleteObject(String fileId, final long objNo, final int version) throws IOException { + File fileDir = new File(generateAbsoluteFilePath(fileId)); + File[] objs = fileDir.listFiles(new FileFilter() { + + public boolean accept(File pathname) { + return pathname.getName().startsWith("" + objNo) + && pathname.getName().endsWith("" + version); + } + }); + for (File obj : objs) { + obj.delete(); + } + } + + public boolean fileExists(String fileId) { + File dir = new File(generateAbsoluteFilePath(fileId)); + return dir.exists(); + } + + protected FileInfo loadFileInfo(String fileId, StripingPolicy sp) throws IOException { + + _stat_fileInfoLoads = 0; + + FileInfo info = new FileInfo(); + + File fileDir = new File(generateAbsoluteFilePath(fileId)); + if (fileDir.exists()) { + + String[] objs = fileDir.list(); + String lastObject = null; + long lastObjNum = -1; + // long lastObjNumVer = -1; + + for (String obj : objs) { + if (obj.startsWith(".")) + continue; // ignore special files (metadata, .tepoch) + int cpos = obj.indexOf(VERSION_SEPARATOR); + int cpos2 = obj.indexOf(CHECKSUM_SEPARATOR); + String tmp = obj.substring(0, cpos); + long objNum = Long.valueOf(tmp); + tmp = cpos2 == -1 ? obj.substring(cpos + 1) : obj.substring(cpos + 1, cpos2); + int objVer = Integer.valueOf(tmp); + if (objNum > lastObjNum) { + lastObject = obj; + lastObjNum = objNum; + } + + String checksum = null; + if (cpos2 != -1) + checksum = obj.substring(cpos2 + 1); + + Integer oldver = info.getObjVersions().get(objNum); + if ((oldver == null) || (oldver < objVer)) { + info.getObjVersions().put(objNum, objVer); + info.getObjChecksums().put(objNum, checksum); + } + } + if (lastObjNum > -1) { + File lastObjFile = new File(fileDir.getAbsolutePath() + "/" + lastObject); + long lastObjSize = lastObjFile.length(); + // check for empty padding file + if (lastObjSize == 0) { + lastObjSize = sp.getStripeSize(lastObjSize); + } + long fsize = lastObjSize; + if (lastObjNum > 0) { + fsize += sp.getLastByte(lastObjNum - 1) + 1; + } + assert (fsize >= 0); + info.setFilesize(fsize); + info.setLastObjectNumber(lastObjNum); + } else { + // empty file! + info.setFilesize(0l); + info.setLastObjectNumber(-1); + } + + // read truncate epoch from file + File tepoch = new File(fileDir, TEPOCH_FILENAME); + if (tepoch.exists()) { + RandomAccessFile rf = new RandomAccessFile(tepoch, "r"); + info.setTruncateEpoch(rf.readLong()); + rf.close(); + } + + } else { + info.setFilesize(0); + info.setLastObjectNumber(-1); + } + + return info; + } + + public void setTruncateEpoch(String fileId, long newTruncateEpoch) throws IOException { + File parent = new File(generateAbsoluteFilePath(fileId)); + if (!parent.exists()) + parent.mkdirs(); + File tepoch = new File(parent, TEPOCH_FILENAME); + RandomAccessFile rf = new RandomAccessFile(tepoch, "rw"); + rf.writeLong(newTruncateEpoch); + rf.close(); + } + + private String generateAbsoluteFilePath(String fileId) { + return this.storageDir + generateRelativeFilePath(fileId); + } + + private String generateAbsolutObjectPath(String fileId, long objNo, int version, String checksum) { + StringBuilder path = new StringBuilder(generateAbsoluteFilePath(fileId)); + path.append(generateFilename(objNo, version, checksum)); + return path.toString(); + } + + private String generateAbsoluteObjectPath(String relativeFilePath, long objNo, int version, + String checksum) { + StringBuilder path = new StringBuilder(this.storageDir); + path.append(relativeFilePath); + path.append(generateFilename(objNo, version, checksum)); + return path.toString(); + } + + private String generateRelativeFilePath(String fileId) { + StringBuilder path = generateHashPath(fileId); + path.append(fileId); + path.append("/"); + return path.toString(); + } + + private StringBuilder generateFilename(long objNo, int version, String checksum) { + StringBuilder filename = new StringBuilder(); + filename.append(objNo); + filename.append(VERSION_SEPARATOR); + filename.append(version); + if (checksum != null) { + filename.append(CHECKSUM_SEPARATOR); + filename.append(checksum); + } + return filename; + } + + /** + * generates the path for the file with an "/" at the end + * + * @param fileId + * @return + */ + private StringBuilder generateHashPath(String fileId) { + StringBuilder hashPath = new StringBuilder(128); + String hash = hash(fileId); + int i = 0, j = prefixLength; + + while (j < hash.length()) { + hashPath.append(hash.subSequence(i, j)); + hashPath.append("/"); + + i += prefixLength; + j += prefixLength; + } + if (j < hash.length() + prefixLength) { + hashPath.append(hash.subSequence(i, hash.length())); + hashPath.append("/"); + } + return hashPath; + } + + /** + * computes the hash for the File + * + * @param str + * @return + */ + private String hash(String str) { + this.hashAlgo.digest(str); + String res = this.hashAlgo.getValue(); + + if (res.length() > this.hashCutLength) { + res = res.substring(0, this.hashCutLength); + } + return res; + } + + public long getFileInfoLoadCount() { + return _stat_fileInfoLoads; + } + + /** + * traverse the file tree and fill the fileMap
+ * - directory is the root node where the algorithm starts
+ * - saves the metaInfos to global fileMap
+ * - ignores files that start with '.'
+ * + * @param directory + * @throws IOException thrown by fileMap.insert() + */ + private void traverseFileTree(String directory) throws IOException{ + int PREVIEW_LENGTH = 15; + + FileReader fReader; + File f = new File(directory); + File[] sub = f.listFiles(); //there will be no error, if the directory is empty + + File newestFirst = null; + File newestLast = null; + Long objectSize = 0L; + + // go through all subs + for(int i=0;igetVersion(newestFirst)){ + newestFirst = newestLast = sub[i]; + objectSize = (objectSize>=sub[i].length()) ? objectSize : sub[i].length(); + }else if (getVersion(sub[i])==getVersion(newestFirst)){ + if (getObjectNo(sub[i])getObjectNo(newestLast)){ + newestLast = sub[i]; + } + objectSize = (objectSize>=sub[i].length()) ? objectSize : sub[i].length(); + } + }catch(NumberFormatException ne){ + Logging.logMessage(Logging.LEVEL_WARN, this, "CleanUp: an illegal file was discovered and ignored."); + } + // use the next directory as root + }else if(sub[i].isDirectory()) { + traverseFileTree(sub[i].getAbsolutePath()); + } + else assert false; //should never be reached + } + + if (newestFirst!=null){ + // get a preview from the file + fReader = new FileReader(newestFirst); + char[] preview = new char[PREVIEW_LENGTH]; + fReader.read(preview); + fReader.close(); + + // get the metaInfo from the root-directory + int stripCount = getObjectNo(newestLast); + long fileSize = (stripCount==1) ? newestFirst.length() : (objectSize*stripCount)+newestLast.length(); + + // insert the data into the fileMap + fileMap.insert(f.getName(),fileSize,String.valueOf(preview),objectSize); + } + } + + /** + * + * @param f + * @return the VersionNo of the given File. + * @throws NumberFormatException + */ + private int getVersion(File f) throws NumberFormatException{ + String name = f.getName(); + if (name.indexOf(CHECKSUM_SEPARATOR)!=-1) + return Integer.parseInt(name.substring( + name.indexOf(VERSION_SEPARATOR)+1, + name.indexOf(CHECKSUM_SEPARATOR))); + else return Integer.parseInt(name.substring( + name.indexOf(VERSION_SEPARATOR)+1, + name.length())); + } + + /** + * + * @param f + * @return the ObjectNo of the given File. + * @throws NumberFormatException + */ + private int getObjectNo(File f) throws NumberFormatException{ + return Integer.parseInt(f.getName().substring(0, f.getName().indexOf(VERSION_SEPARATOR))); + } + + /** + * + * @return list of all available files on the storage device ordered by volume IDs + * @throws IOException + * + * @throws IOException - thrown by traverseFileTree() + */ + public ConcurrentFileMap getAllFiles() throws IOException{ + this.fileMap = new ConcurrentFileMap(); + traverseFileTree(this.storageDir); + return this.fileMap; + } +} diff --git a/servers/src/org/xtreemfs/osd/storage/Metadata.java b/servers/src/org/xtreemfs/osd/storage/Metadata.java new file mode 100644 index 0000000000000000000000000000000000000000..d4761ae16c91cb252ce0a430f2a130764799c324 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/storage/Metadata.java @@ -0,0 +1,114 @@ +/* Copyright (c) 2008 Consiglio Nazionale delle Ricerche and + Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Eugenio Cesario (CNR), Björn Kolbeck (ZIB), Christian Lorenz (ZIB) + */ + +package org.xtreemfs.osd.storage; + +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; + +/** This class stores the metadata of every file stored in the OSDs + * + * @author Jesus Malo (jmalo) + */ +public class Metadata implements java.io.Serializable { + + private long knownSize = 0; + private String file; + + /** Creates a new instance of Metadata + * @param f File name where the object is stored + */ + public Metadata(String f) { + file = f; + } + + /** It stores the object in a file + */ + public void store() throws IOException { + ObjectOutputStream warehouse = new ObjectOutputStream(new FileOutputStream(file)); + + warehouse.writeObject(this); + + warehouse.close(); + } + + /** It retrieves the object stored in a file + * @return The object stored in the file + */ + public Metadata retrieve() throws IOException { + ObjectInputStream warehouse = new ObjectInputStream(new FileInputStream(file)); + + Object obj; + + try { + obj = warehouse.readObject(); + } catch (ClassNotFoundException ex) { + throw new IOException(ex.getMessage()); + } + + warehouse.close(); + + return (Metadata) obj; + } + + /** It gets the latest known size of a file + * @return The latest known size of a file + */ + public long getKnownSize() throws IOException { + try { + Metadata metadata = retrieve(); + return metadata.knownSize; + } + catch (FileNotFoundException e) { + return 0; + } + } + + /** It sets the known size of a file + * @param file The file whose known size will be set + * @param newSize The new known size of the file + */ + public void putKnownSize(long newSize) throws IOException { + + assert newSize >= 0 : "newSize = " + newSize; + + Metadata metadata; + + try { + metadata = retrieve(); + } + catch(FileNotFoundException e) { + metadata = new Metadata(file); + } + + metadata.knownSize = newSize; + metadata.store(); + } + +} diff --git a/servers/src/org/xtreemfs/osd/storage/MetadataCache.java b/servers/src/org/xtreemfs/osd/storage/MetadataCache.java new file mode 100644 index 0000000000000000000000000000000000000000..9c6ca06adce4e1070aa16d1266d41ba7d3f31d94 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/storage/MetadataCache.java @@ -0,0 +1,56 @@ +/* Copyright (c) 2008 Consiglio Nazionale delle Ricerche and + Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Eugenio Cesario (CNR), Björn Kolbeck (ZIB), Christian Lorenz (ZIB), + * Jan Stender (ZIB) + */ + +package org.xtreemfs.osd.storage; + +import java.util.Map; +import java.util.concurrent.ConcurrentSkipListMap; + + +public class MetadataCache { + + private Map metadataMap; + + /** Creates a new instance of StorageCache */ + public MetadataCache() { + metadataMap = new ConcurrentSkipListMap(); + } + + public FileInfo getFileInfo(String fileId) { + assert (fileId != null); + return metadataMap.get(fileId); + } + + public void setFileInfo(String fileId, FileInfo info) { + assert (info.getFilesize() != 0 || info.getLastObjectNumber() <= 0); + metadataMap.put(fileId, info); + } + + public void removeFileInfo(String fileId) { + metadataMap.remove(fileId); + } + +} diff --git a/servers/src/org/xtreemfs/osd/storage/SimpleStorageLayout.java b/servers/src/org/xtreemfs/osd/storage/SimpleStorageLayout.java new file mode 100644 index 0000000000000000000000000000000000000000..63c35a7492ae2c5c8ecb6bb8a01c15d355af8684 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/storage/SimpleStorageLayout.java @@ -0,0 +1,285 @@ +/* Copyright (c) 2008 Consiglio Nazionale delle Ricerche and + Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Eugenio Cesario (CNR), Björn Kolbeck (ZIB), Christian Lorenz (ZIB) + */ + +package org.xtreemfs.osd.storage; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.RandomAccessFile; + +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.clients.osd.ConcurrentFileMap; +import org.xtreemfs.common.striping.StripeInfo; +import org.xtreemfs.common.striping.StripingPolicy; +import org.xtreemfs.osd.OSDConfig; + +/** + * A very simple storage layout implementation. It stores one object per + * physical file. Objects are stored in files following the FILEID_OBJECTNO + * pattern + * + * @author bjko + */ + +@Deprecated +public class SimpleStorageLayout extends StorageLayout { + + private long _stat_fileInfoLoads; + + /** Creates a new instance of SimpleStorageLayout */ + public SimpleStorageLayout(OSDConfig config, MetadataCache cache) throws IOException { + super(config, cache); + _stat_fileInfoLoads = 0; + } + + public ReusableBuffer readObject(String fileId, long objNo, int version, String checksum, + StripingPolicy sp, long osdNumber) throws IOException { + ReusableBuffer bbuf = null; + + String fileName = storageDir + generateFilename(fileId, objNo, version); + + File file = new File(fileName); + + if (file.exists()) { + + RandomAccessFile f = new RandomAccessFile(fileName, "r"); + + if (f.length() > 0) { + // read object data + bbuf = BufferPool.allocate((int) f.length()); + + f.getChannel().read(bbuf.getBuffer()); + } else { + // zero padding... + bbuf = BufferPool.allocate((int) sp.getStripeSize(objNo)); + for (int i = 0; i < sp.getStripeSize(objNo); i++) { + bbuf.put((byte) 0); + } + } + + f.close(); + bbuf.position(0); + } else { + // It handles the POSIX behavior of read beyond EOF + bbuf = BufferPool.allocate(0); + bbuf.position(0); + } + + return bbuf; + } + + public boolean checkObject(ReusableBuffer obj, String checksum) { + return true; // no checksum support + } + + public void writeObject(String fileId, long objNo, ReusableBuffer data, int version, + int offset, String currentChecksum, StripingPolicy sp, long osdNumber) throws IOException { + + // ignore empty writes + if (data.capacity() > 0) { + + assert (offset + data.capacity() <= sp.getStripeSize(objNo)); + + String fileName = storageDir + generateFilename(fileId, objNo, version); + + File parentDir = new File(storageDir + generateParentDir(fileId)); + if (!parentDir.exists()) + parentDir.mkdir(); + + try { + // File file = new File(fileName); + + RandomAccessFile f = new RandomAccessFile(fileName, "rw"); + + data.position(0); + f.seek(offset); + f.getChannel().write(data.getBuffer()); + f.close(); + + if (version > 0) { + // delete old file + String fileNameOld = storageDir + generateFilename(fileId, objNo, version - 1); + File oldFile = new File(fileNameOld); + oldFile.delete(); + } + } catch (FileNotFoundException ex) { + throw new IOException("unable to create file directory or object: " + + ex.getMessage()); + } + } + } + + public String createChecksum(String fileId, long objNo, ReusableBuffer data, int version, + String currentChecksum) throws IOException { + return null; + } + + public void deleteFile(final String fileId) throws IOException { + File fileDir = new File(storageDir + generateParentDir(fileId)); + File[] objs = fileDir.listFiles(); + assert (objs != null); + for (File obj : objs) { + obj.delete(); + } + fileDir.delete(); + new File(generateMetadataName(fileId)).delete(); + } + + public void deleteAllObjects(final String fileId) throws IOException { + File fileDir = new File(storageDir + generateParentDir(fileId)); + File[] objs = fileDir.listFiles(); + for (File obj : objs) { + obj.delete(); + } + } + + public void deleteObject(String fileId, long objNo, int version) throws IOException { + File f = new File(storageDir + generateFilename(fileId, objNo, version)); + f.delete(); + } + + private String generateFilename(String fileId, long objNo, int version) { + return generateParentDir(fileId) + "/" + objNo + "." + version; + } + + private String generateParentDir(String fileId) { + return fileId; + } + + private String generateMetadataName(String fileId) { + return storageDir + generateParentDir(fileId) + ".metadata"; + } + + public FileInfo loadFileInfo(String fileId, StripingPolicy sp) throws IOException { + + _stat_fileInfoLoads++; + + FileInfo info = new FileInfo(); + + File fileDir = new File(storageDir + generateParentDir(fileId)); + if (fileDir.exists()) { + + String[] objs = fileDir.list(); + String lastObject = null; + long lastObjNum = -1; + // long lastObjNumVer = -1; + + for (String obj : objs) { + if (obj.startsWith(".")) + continue; // ignore special files (metadata, .tepoch) + int cpos = obj.indexOf('.'); + String tmp = obj.substring(0, cpos); + long objNum = Long.valueOf(tmp); + tmp = obj.substring(cpos + 1); + int objVer = Integer.valueOf(tmp); + if (objNum > lastObjNum) { + lastObject = obj; + lastObjNum = objNum; + } + Integer oldver = info.getObjVersions().get(objNum); + if ((oldver == null) || (oldver < objVer)) { + info.getObjVersions().put(objNum, objVer); + } + } + if (lastObjNum > -1) { + File lastObjFile = new File(storageDir + generateParentDir(fileId) + "/" + + lastObject); + long lastObjSize = lastObjFile.length(); + // check for empty padding file + if (lastObjSize == 0) + lastObjSize = sp.getStripeSize(lastObjSize); + long fsize = lastObjSize; + if (lastObjNum > 0) { + fsize += sp.getLastByte(lastObjNum -1 ) + 1; + } + assert (fsize >= 0); + info.setFilesize(fsize); + info.setLastObjectNumber(lastObjNum); + } else { + // empty file! + info.setFilesize(0l); + info.setLastObjectNumber(-1); + } + + // read truncate epoch from file + File tepoch = new File(fileDir, TEPOCH_FILENAME); + if (tepoch.exists()) { + RandomAccessFile rf = new RandomAccessFile(tepoch, "r"); + info.setTruncateEpoch(rf.readLong()); + rf.close(); + } + + } else { + fileDir.mkdir(); + info.setFilesize(0); + info.setLastObjectNumber(-1); + } + + // info.setMetadata(new Metadata(generateMetadataName(fileId))); + + return info; + } + + public boolean fileExists(String fileId) { + File dir = new File(storageDir + generateParentDir(fileId)); + return dir.exists(); + } + + public String createPaddingObject(String fileId, long objId, StripingPolicy sp, int version, + long size) throws IOException { + + if (size == sp.getStripeSize(objId)) { + File f = new File(storageDir + generateFilename(fileId, objId, version)); + f.createNewFile(); + } else { + RandomAccessFile raf = new RandomAccessFile(storageDir + + generateFilename(fileId, objId, version), "rw"); + raf.setLength(size); + raf.close(); + } + + return null; + } + + public void setTruncateEpoch(String fileId, long newTruncateEpoch) throws IOException { + File parent = new File(storageDir + generateParentDir(fileId)); + if (!parent.exists()) + parent.mkdirs(); + File tepoch = new File(parent, TEPOCH_FILENAME); + RandomAccessFile rf = new RandomAccessFile(tepoch, "rw"); + rf.writeLong(newTruncateEpoch); + rf.close(); + } + + public long getFileInfoLoadCount() { + return _stat_fileInfoLoads; + } + + public ConcurrentFileMap getAllFiles() throws IOException{ + throw new IOException("This function is not available for the deprecated SimpleStorageLayout!"); + } +} diff --git a/servers/src/org/xtreemfs/osd/storage/StorageLayout.java b/servers/src/org/xtreemfs/osd/storage/StorageLayout.java new file mode 100644 index 0000000000000000000000000000000000000000..f751b25b6560af96d7969c7463d17645e0155124 --- /dev/null +++ b/servers/src/org/xtreemfs/osd/storage/StorageLayout.java @@ -0,0 +1,300 @@ +/* Copyright (c) 2008 Consiglio Nazionale delle Ricerche and + Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Eugenio Cesario (CNR), Björn Kolbeck (ZIB), Christian Lorenz (ZIB) + */ + +package org.xtreemfs.osd.storage; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileReader; +import java.io.FileWriter; +import java.io.IOException; + +import org.xtreemfs.common.VersionManagement; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.clients.osd.ConcurrentFileMap; +import org.xtreemfs.common.striping.StripingPolicy; +import org.xtreemfs.osd.OSDConfig; + +/** + * Abstracts object data access from underlying on-disk storage layout. + * + * @author bjko + */ +public abstract class StorageLayout { + + public static final String TEPOCH_FILENAME = ".tepoch"; + + public static final String VERSION_FILENAME = ".version"; + + protected final String storageDir; + + protected final MetadataCache cache; + + protected StorageLayout(OSDConfig config, MetadataCache cache) throws IOException { + + this.cache = cache; + + // initialize the storage directory + String tmp = config.getObjDir(); + if (!tmp.endsWith("/")) + tmp = tmp + "/"; + storageDir = tmp; + File stdir = new File(storageDir); + stdir.mkdirs(); + + // check the data version + File versionMetaFile = new File(storageDir, VERSION_FILENAME); + + if (!versionMetaFile.exists()) { + FileWriter out = new FileWriter(versionMetaFile); + out.write(Long.toString(VersionManagement.getOsdDataVersion())); + out.close(); + } + + else { + BufferedReader in = new BufferedReader(new FileReader(versionMetaFile)); + String version = in.readLine(); + in.close(); + + if (!version.equals(Long.toString(VersionManagement.getOsdDataVersion()))) + throw new IOException("wrong OSD data version: " + version + + ", required version is: " + VersionManagement.getOsdDataVersion()); + } + } + + /** + * Returns cached file metadata, or loads and caches it if it is not cached. + * + * @param sp + * @param fileId + * @return + * @throws IOException + */ + public FileInfo getFileInfo(final StripingPolicy sp, final String fileId) throws IOException { + + // try to retrieve metadata from cache + FileInfo fi = cache.getFileInfo(fileId); + + // if metadata is not cached ... + if (fi == null) { + + // ... load metadata from disk + fi = loadFileInfo(fileId, sp); + + // ... cache metadata to speed up further accesses + cache.setFileInfo(fileId, fi); + } + + return fi; + } + + /** + * Loads all metadata associated with a file on the OSD from the storage + * device. Amongst others, such metadata may comprise object version numbers + * and checksums. + * + * @param fileId + * the file ID + * @param sp + * the striping policy assigned to the file + * @return a FileInfo object comprising all metadata + * associated with the file + * @throws IOException + * if an error occurred while trying to read the metadata + */ + protected abstract FileInfo loadFileInfo(String fileId, StripingPolicy sp) throws IOException; + + /** + * Reads a complete object from the storage device. + * + * @param fileId + * fileId of the object + * @param objNo + * object number + * @param version + * version to be read + * @param checksum + * the checksum currently stored with the object + * @param sp + * the striping policy assigned to the file + * @param osdNumber + * the number of the OSD assigned to the object + * @throws java.io.IOException + * when the object cannot be read + * @return a buffer containing the object, or null if the + * object does not exist + */ + public abstract ReusableBuffer readObject(String fileId, long objNo, int version, + String checksum, StripingPolicy sp, long osdNumber) throws IOException; + + /** + * Determines whether the given data has a correct checksum. + * + * @param obj + * the object data + * @param checksum + * the correct checksum + * @return true, if the given checksum matches the checksum + * of the data, false, otherwise + */ + public abstract boolean checkObject(ReusableBuffer obj, String checksum); + + /** + * Writes a partial object to the storage device. + * + * @param fileId + * the file Id the object belongs to + * @param objNo + * object number + * @param data + * buffer with the data to be written + * @param version + * the version to be written + * @param offset + * the relative offset in the object at which to write the buffer + * @param currentChecksum + * the checksum currently assigned to the object; if OSD + * checksums are disabled, null can be used + * @param sp + * the striping policy assigned to the file + * @param osdNumber + * the number of the OSD responsible for the object + * @throws java.io.IOException + * when the object cannot be written + */ + public abstract void writeObject(String fileId, long objNo, ReusableBuffer data, int version, + int offset, String currentChecksum, StripingPolicy sp, long osdNumber) throws IOException; + + /** + * Calculates and stores the checksum for an object. + * + * @param fileId + * the file Id the object belongs to + * @param objNo + * the object number + * @param data + * a buffer to calculate the checksum from. If null + * is provided, the object will be rad from the storage device + * and checksummed. + * @param version + * the version of the object + * @param currentChecksum + * the checksum currently assigned to the object; if OSD + * checksums are disabled, null can be used + * @return if OSD checksums are enabled, the newly calculated checksum; + * null, otherwise + * @throws java.io.IOException + * if an I/O error occured + */ + public abstract String createChecksum(String fileId, long objNo, ReusableBuffer data, + int version, String currentChecksum) throws IOException; + + /** + * Deletes all objects of a file. + * + * @param fileId + * the ID of the file + * @throws IOException + * if an error occurred while deleting the objects + */ + public abstract void deleteFile(String fileId) throws IOException; + + /** + * Deletes all objects of a file. + * + * @param fileId + * the ID of the file + * @throws IOException + * if an error occurred while deleting the objects + */ + public abstract void deleteAllObjects(String fileId) throws IOException; + + /** + * Deletes a single version of a single object of a file. + * + * @param fileId + * the ID of the file + * @param objNo + * the number of the object to delete + * @param version + * the version number of the object to delete + * @throws IOException + * if an error occurred while deleting the object + */ + public abstract void deleteObject(String fileId, long objNo, int version) throws IOException; + + /** + * Creates and stores a zero-padded object. + * + * @param fileId + * the ID of the file + * @param objNo + * the number of the object to create + * @param sp + * the striping policy assigned to the file + * @param version + * the version of the object to create + * @param size + * the size of the object to create + * @return if OSD checksums are enabled, the newly calculated checksum; + * null, otherwise + * @throws IOException + * if an error occurred when storing the object + */ + public abstract String createPaddingObject(String fileId, long objNo, StripingPolicy sp, + int version, long size) throws IOException; + + /** + * Persistently stores a new truncate epoch for a file. + * + * @param fileId + * the file ID + * @param newTruncateEpoch + * the new truncate epoch + * @throws IOException + * if an error occurred while storing the new epoch number + */ + public abstract void setTruncateEpoch(String fileId, long newTruncateEpoch) throws IOException; + + /** + * Checks whether the file with the given ID exists. + * + * @param fileId + * the ID of the file + * @return true, if the file exists, false, + * otherwise + */ + public abstract boolean fileExists(String fileId); + + public abstract long getFileInfoLoadCount(); + + /** + * + * @return all available files on the OSD ordered by volume IDs + * + * @throws IOException if an error occurred + */ + public abstract ConcurrentFileMap getAllFiles() throws IOException; +} diff --git a/servers/src/org/xtreemfs/osd/storage/Striping.java b/servers/src/org/xtreemfs/osd/storage/Striping.java new file mode 100644 index 0000000000000000000000000000000000000000..7176ba6f2bfd12b635a2caab92e24362a1a2aedf --- /dev/null +++ b/servers/src/org/xtreemfs/osd/storage/Striping.java @@ -0,0 +1,331 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.osd.storage; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.LinkedList; +import java.util.List; + +import org.xtreemfs.common.buffer.ASCIIString; +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.striping.Location; +import org.xtreemfs.common.uuids.ServiceUUID; +import org.xtreemfs.common.uuids.UnknownUUIDException; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.json.JSONString; +import org.xtreemfs.foundation.pinky.HTTPHeaders; +import org.xtreemfs.foundation.pinky.HTTPUtils; +import org.xtreemfs.foundation.speedy.SpeedyRequest; +import org.xtreemfs.foundation.speedy.SpeedyRequest.RequestStatus; +import org.xtreemfs.osd.RPCTokens; +import org.xtreemfs.osd.OSDRequest; +import org.xtreemfs.osd.RequestDetails; +import org.xtreemfs.osd.UDPMessageType; + +/** + * A helper class providing methods related to the OSD striping logic. + */ +public final class Striping { + + /** + * wait up to a minute for delete to return + * + */ + public static final int DELETE_TO = 60 * 1000; + + private final ServiceUUID localOSDId; + + private MetadataCache storageCache; + + public Striping(ServiceUUID localId, MetadataCache storageCache) { + this.localOSDId = localId; + this.storageCache = storageCache; + } + + public List createGmaxRequests(RequestDetails req) throws JSONException, UnknownUUIDException { + + assert (req.getCapability() != null); + assert (req.getFileId() != null); + assert (req.getLocationList() != null); + + final List osdMessages = new LinkedList(); + + for (ServiceUUID osd : req.getCurrentReplica().getOSDs()) { + + if (osd.equals(localOSDId)) + continue; + + HTTPHeaders headers = new HTTPHeaders(); + headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, req.getCapability().toString()); + headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, req.getLocationList().asJSONString() + .asString()); + headers.addHeader(HTTPHeaders.HDR_XFILEID, req.getFileId()); + + SpeedyRequest sr = new SpeedyRequest(HTTPUtils.POST_TOKEN, headers, + RPCTokens.fetchGlobalMaxToken); + InetSocketAddress addr = osd.getAddress(); + + osdMessages.add(new RPCMessage(addr, sr)); + } + + return osdMessages; + } + + public void processGmaxResponses(OSDRequest req) throws IOException { + + final RequestDetails details = req.getDetails(); + final String fileId = details.getFileId(); + final FileInfo fi = storageCache.getFileInfo(fileId); + + for (SpeedyRequest r : req.getHttpRequests()) { + + try { + if (r.status == SpeedyRequest.RequestStatus.FAILED) { + IOException exc = new IOException("request failed, cannot contact OSDs"); + Logging.logMessage(Logging.LEVEL_ERROR, this, "error " + exc); + throw exc; + } else { + + assert(r.status == RequestStatus.FINISHED); + + if (r.statusCode == HTTPUtils.SC_OKAY) { + + byte[] resp = r.getResponseBody(); + + String body = new String(resp, HTTPUtils.ENC_UTF8); + List localMax = (List) JSONParser.parseJSON(new JSONString( + body)); + + assert (localMax.size() == 3); + + long epoch = (Long) localMax.get(0); + long lastObjId = (Long) localMax.get(1); + long fileSize = (Long) localMax.get(2); + + // if a larger file size or a newer epoch has been + // received, replace gmax w/ received globalMax + if ((epoch == fi.getTruncateEpoch() && fileSize > fi.getFilesize()) + || epoch > fi.getTruncateEpoch()) { + + fi.setLastObjectNumber(lastObjId); + fi.setFilesize(fileSize); + + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, + "received more up-to-date (fs/epoch)=(" + fileSize + "/" + + epoch + ") for " + fileId + ", replacing former one on " + + localOSDId); + } + } + + else if (r.statusCode == HTTPUtils.SC_NOT_FOUND) { + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, "no gmax value for file " + + fileId + " known by " + r.getServer()); + } else { + IOException exc = new IOException(r.statusCode + + " occured when receiving gmax response"); + Logging.logMessage(Logging.LEVEL_ERROR, this, exc); + throw exc; + } + } + + } catch (IOException exc) { + throw exc; + + } catch (Exception exc) { + + Logging.logMessage(Logging.LEVEL_ERROR, this, exc); + throw new IOException("error on remote (stripe) OSD:" + exc); + + } finally { + r.freeBuffer(); + } + } + } + + public List createGmaxMessages(ASCIIString fileId, long newFS, long newLastObjNo, + long newEpoch, Location stripes) throws UnknownUUIDException { + + if (Logging.isDebug()) { + final FileInfo fi = storageCache.getFileInfo(fileId.toString()); + Logging.logMessage(Logging.LEVEL_DEBUG, this, "create GMAX message: " + newFS + + ", former fs=" + fi.getFilesize()); + } + + List msgs = new LinkedList(); + ReusableBuffer data = BufferPool.allocate(128); + data.put((byte) UDPMessageType.Striping.ordinal()); + data.putBufferBackedASCIIString(fileId); + data.putLong(newEpoch); + data.putLong(newLastObjNo); + data.putLong(newFS); + + for (ServiceUUID osd : stripes.getOSDs()) { + if (osd.equals(localOSDId)) + continue; + if (Logging.isDebug()) + Logging.logMessage(Logging.LEVEL_DEBUG, this, "sending UDP GMAX to " + osd + ": " + + newLastObjNo + " for " + fileId); + msgs.add(new UDPMessage(osd.getAddress(), data)); + } + + if (msgs.size() == 0) + BufferPool.free(data); + + return msgs; + } + + public void processGmaxMessage(OSDRequest rq, MetadataCache cache) { + // parse request + ReusableBuffer data = null; + try { + data = rq.getData(); + final ASCIIString fileId = data.getBufferBackedASCIIString(); + final long epoch = data.getLong(); + final long newLastObjNo = data.getLong(); + final long newFS = data.getLong(); + + // check if FileInfo is in cache + FileInfo fi = cache.getFileInfo(fileId.toString()); + + if (fi == null) { + // file is not open, discard GMAX + return; + } + + Logging.logMessage(Logging.LEVEL_DEBUG, this, "received new GMAX: " + newLastObjNo + + "/" + newFS + "/" + epoch + " for " + fileId); + + if ((epoch == fi.getTruncateEpoch() && fi.getFilesize() < newFS) + || epoch > fi.getTruncateEpoch()) { + + // valid file size update + fi.setFilesize(newFS); + + Logging.logMessage(Logging.LEVEL_DEBUG, this, "received GMAX is valid; for " + + fileId + ", current (fs, epoch) = (" + fi.getFilesize() + ", " + + fi.getTruncateEpoch() + ")"); + + } else { + + // outdated file size udpate + + Logging.logMessage(Logging.LEVEL_DEBUG, this, "received GMAX is outdated; for " + + fileId + ", current (fs, epoch) = (" + fi.getFilesize() + ", " + + fi.getTruncateEpoch() + ")"); + } + } finally { + BufferPool.free(data); + } + } + + public List createTruncateRequests(RequestDetails details, long lastObject) + throws IOException, JSONException { + + List osdMessages = new LinkedList(); + + for (ServiceUUID osd : details.getCurrentReplica().getOSDs()) { + + if (osd.equals(localOSDId)) + continue; + + HTTPHeaders headers = new HTTPHeaders(); + headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, details.getCapability().toString()); + headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, details.getLocationList().asJSONString() + .asString()); + headers.addHeader(HTTPHeaders.HDR_XFILEID, details.getFileId()); + + ReusableBuffer data = ReusableBuffer.wrap(JSONParser.toJSON(details.getFileId(), + Long.valueOf(details.getTruncateFileSize())).getBytes(HTTPUtils.ENC_UTF8)); + + SpeedyRequest sr = new SpeedyRequest(HTTPUtils.POST_TOKEN, + RPCTokens.truncateLocalTOKEN, null, null, data, HTTPUtils.DATA_TYPE.JSON, headers); + + InetSocketAddress addr = osd.getAddress(); + + osdMessages.add(new RPCMessage(addr, sr)); + } + + return osdMessages; + } + + public List createDeleteRequests(RequestDetails details) throws IOException, + JSONException { + + List osdMessages = new LinkedList(); + + for (ServiceUUID osd : details.getCurrentReplica().getOSDs()) { + + if (osd.equals(localOSDId)) + continue; + + HTTPHeaders headers = new HTTPHeaders(); + headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, details.getCapability().toString()); + headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, details.getLocationList().asJSONString() + .asString()); + headers.addHeader(HTTPHeaders.HDR_XFILEID, details.getFileId()); + + SpeedyRequest sr = new SpeedyRequest(HTTPUtils.POST_TOKEN, headers, + RPCTokens.deleteLocalTOKEN); + InetSocketAddress addr = osd.getAddress(); + + osdMessages.add(new RPCMessage(addr, sr)); + } + + return osdMessages; + } + + public static class RPCMessage { + + public InetSocketAddress addr; + + public SpeedyRequest req; + + public RPCMessage(InetSocketAddress addr, SpeedyRequest req) { + this.addr = addr; + this.req = req; + } + + } + + public static class UDPMessage { + + public InetSocketAddress addr; + + public ReusableBuffer buf; + + public UDPMessage(InetSocketAddress addr, ReusableBuffer buf) { + this.addr = addr; + this.buf = buf; + } + + } + +} diff --git a/servers/src/org/xtreemfs/osd/templates/status.html b/servers/src/org/xtreemfs/osd/templates/status.html new file mode 100644 index 0000000000000000000000000000000000000000..dcd1d2bd6c7578117af3b1f4797e580a4c56dfae --- /dev/null +++ b/servers/src/org/xtreemfs/osd/templates/status.html @@ -0,0 +1,156 @@ + + + XtreemFS OSD @ <!-- $UUID --> + + + + +

OSD

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Configuration +
TCP & UDP port
Directory Service
Debug Level
Statisticts
+ Load +
# HTTP connections (pinky)
HTTP server (pinky) queue length
Parser Stage queue length
Auth Stage queue length
Storage Stage queue length
Deletion Stage queue length
Open files
+ Transfer +
# object written
# object read
bytes sent
bytes received
# GMAX packets received
# GMAX requests sent
# files deleted
+ VM Info / Memory +
Free Disk Space
Memory free/max/total
Buffer Pool stats
+ Time +
global XtreeemFS time
resync interval for global time ms
local system time
local time update interval ms
+ UUID Mapping Cache +
+ + \ No newline at end of file diff --git a/servers/src/org/xtreemfs/sandbox/CreateFilePerformanceTest.java b/servers/src/org/xtreemfs/sandbox/CreateFilePerformanceTest.java new file mode 100644 index 0000000000000000000000000000000000000000..f3bdd7fb2145273fe73fa2f7486b59ad1bfc6d67 --- /dev/null +++ b/servers/src/org/xtreemfs/sandbox/CreateFilePerformanceTest.java @@ -0,0 +1,174 @@ +/* + * CreateFile.java + * JUnit based test + * + * Created on January 17, 2007, 8:39 AM + */ + +package org.xtreemfs.sandbox; + +import java.net.InetSocketAddress; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.dir.DIRConfig; +import org.xtreemfs.foundation.pinky.HTTPUtils; +import org.xtreemfs.foundation.speedy.MultiSpeedy; +import org.xtreemfs.foundation.speedy.SpeedyResponseListener; +import org.xtreemfs.foundation.speedy.SpeedyRequest; +import org.xtreemfs.mrc.MRCConfig; + +/** + * Simple test to check the performance of the MRC for file + * creates. + * @author bjko + */ +public class CreateFilePerformanceTest { + + private MRCConfig brainConfig; + + private DIRConfig dirServiceConfig; + + public static volatile int numRq = 0; + + public static boolean error = false; + + + public CreateFilePerformanceTest() throws Exception { + } + + + // TODO add test methods here. The name must begin with 'test'. For example: + // public void testHello() {} + + public void testCreateFile() throws Exception { + + final InetSocketAddress endpoint = new InetSocketAddress( + "localhost", 32636); + final MultiSpeedy client = new MultiSpeedy(); + client.registerListener(new SpeedyResponseListener() { + int numR = 0; + + public void receiveRequest(SpeedyRequest resp) { + try { + if (resp.status == SpeedyRequest.RequestStatus.FAILED) { + System.out.println("HTTP request failed for unknown reason"); + } else if (resp.status == SpeedyRequest.RequestStatus.FINISHED) { + if (resp.statusCode == 200) { + numRq++; + //System.out.println("RQID = "+resp.responseHeaders.getHeader("X-DEBUG-RQID")); + //System.out.println("SC 200: "+resp.responseBody); + } else { + error = true; + byte bdy[] = null; + + if (resp.responseBody.hasArray()) { + bdy = resp.responseBody.array(); + } else { + bdy = new byte[resp.responseBody.capacity()]; + resp.responseBody.position(0); + resp.responseBody.get(bdy); + } + + String body = new String(bdy, "ascii"); + System.out.println("ERROR: "+body); + } + } else { + System.out.println("strange status: "+resp.status); + } + + + } catch (Exception ex) { + System.out.println("Exception occurred in responseListener: "+ex); + ex.printStackTrace(); + } + } + }, endpoint); + + Thread test = new Thread(client); + test.start(); + Thread.currentThread().yield(); + + Thread secCheck = new Thread(new Runnable() { + public void run() { + int lastNum = 0; + while (true) { + try { + synchronized (this) { + this.wait(999); + } + } catch (InterruptedException ex) { + System.out.println("interrupted..."); + } + int copyNRQ = numRq; + System.out.println(">>>>>>> creates/sec = "+(copyNRQ-lastNum)); + lastNum = copyNRQ; + } + } + }); + secCheck.setPriority(Thread.MAX_PRIORITY); + secCheck.start(); + + ReusableBuffer bdy = ReusableBuffer.wrap(("[\"Blup\"]").getBytes()); + SpeedyRequest sr = new SpeedyRequest("GET","createVolume",null,"nullauth 1 1",bdy,HTTPUtils.DATA_TYPE.JSON); + //client.sendRequest(sr,endpoint); + + try { + synchronized (this) { + this.wait(5000); + } + } catch (InterruptedException ex2) { + } + //System.exit(1); + bdy = ReusableBuffer.wrap(("[\"testVolume\"]").getBytes()); + for (int i = 0; i < 20000; i++) { + bdy = ReusableBuffer.wrap(("[\"Blup/t3_"+(i+10)+"\"]").getBytes()); + sr = new SpeedyRequest("GET","createFile",null,"nullauth 1 1",bdy,HTTPUtils.DATA_TYPE.JSON); + + //bdy = ByteBuffer.wrap(("[\"TestVolume/test4_120\",false,false,false]").getBytes()); + //sr = new SpeedyRequest("GET","stat",null,"nullauth 1 1",bdy,HTTPUtils.DATA_TYPE.TEXT); + try { + //sr = new SpeedyRequest("GET","readDir",null,bdy,HTTPUtils.DATA_TYPE.TEXT); + client.sendRequest(sr,endpoint); + } catch (IllegalStateException ex) { + System.out.println("QQQQQ Q FULL"); + try { + synchronized (this) { + this.wait(20); + } + } catch (InterruptedException ex2) { + } + } + if (i%10 == 0) { + try { + synchronized (this) { + this.wait(1); + } + } catch (InterruptedException ex2) { + } + } + + if (error) { + System.out.println("Error occurred, abort!"); + break; + } + //System.out.println("testX"+i); + Thread.currentThread().yield(); + } + + if (error) + System.exit(1); + + + } + + public static void main(String[] args) { + Logging.start(Logging.LEVEL_WARN); + try { + CreateFilePerformanceTest cft = new CreateFilePerformanceTest(); + cft.testCreateFile(); + } catch (Exception ex) { + ex.printStackTrace(); + } + } + +} diff --git a/servers/src/org/xtreemfs/sandbox/DirectIOReader.java b/servers/src/org/xtreemfs/sandbox/DirectIOReader.java new file mode 100644 index 0000000000000000000000000000000000000000..9af21ce8fb3c8c363bcd26de65751d75d974c730 --- /dev/null +++ b/servers/src/org/xtreemfs/sandbox/DirectIOReader.java @@ -0,0 +1,18 @@ +package org.xtreemfs.sandbox; + +import java.nio.ByteBuffer; + +public class DirectIOReader { + + static { + System.loadLibrary("readdirect"); + } + + public static native ByteBuffer loadFile(String name); + + public static void main(String[] args) { + System.out.println("length: " + + DirectIOReader.loadFile(args[0]).capacity()); + } + +} diff --git a/servers/src/org/xtreemfs/sandbox/DummyServer.java b/servers/src/org/xtreemfs/sandbox/DummyServer.java new file mode 100644 index 0000000000000000000000000000000000000000..75ecef2215a593435b7a2e31a97a83cd8b9e536c --- /dev/null +++ b/servers/src/org/xtreemfs/sandbox/DummyServer.java @@ -0,0 +1,81 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package org.xtreemfs.sandbox; + +import java.io.IOException; +import java.util.logging.Level; +import java.util.logging.Logger; + +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.pinky.HTTPUtils; +import org.xtreemfs.foundation.pinky.PinkyRequest; +import org.xtreemfs.foundation.pinky.PinkyRequestListener; +import org.xtreemfs.foundation.pinky.PipelinedPinky; + +/** + * + * @author bjko + */ +public class DummyServer implements PinkyRequestListener { + + + public static final int size = 1024 * 1024; + + public static final int delay = 0; + + public ReusableBuffer buff; + + public PipelinedPinky pinky; + + public DummyServer(PipelinedPinky pinky) { + + buff = BufferPool.allocate(size); + + this.pinky = pinky; + } + + public void setPinky(PipelinedPinky pinky) { + this.pinky = pinky; + } + + public void receiveRequest(PinkyRequest theRequest) { + theRequest.setResponse(HTTPUtils.SC_OKAY, buff.createViewBuffer(), HTTPUtils.DATA_TYPE.BINARY); + if (delay > 0) { + synchronized(this) { + try { + Thread.currentThread().sleep(0, delay); + } catch (InterruptedException ex) { + Logger.getLogger(DummyServer.class.getName()).log(Level.SEVERE, null, ex); + } + } + } + pinky.sendResponse(theRequest); + } + + public static void main(String[] args) { + try { + + Logging.start(Logging.LEVEL_ERROR); + + DummyServer me = new DummyServer(null); + + PipelinedPinky pinky = new PipelinedPinky(32641, null, me); + + + me.setPinky(pinky); + pinky.start(); + System.out.println("pinky running on 32641 with "+size+"bytes/rq and delay of "+delay+"ns"); + } catch (IOException ex) { + Logger.getLogger(DummyServer.class.getName()).log(Level.SEVERE, null, ex); + } + + } + + + +} diff --git a/servers/src/org/xtreemfs/sandbox/JSONTest.java b/servers/src/org/xtreemfs/sandbox/JSONTest.java new file mode 100644 index 0000000000000000000000000000000000000000..a25759262c9d23f986c3e06a35def4e76946a379 --- /dev/null +++ b/servers/src/org/xtreemfs/sandbox/JSONTest.java @@ -0,0 +1,80 @@ +/* + * JSONTest.java + * + * Created on December 15, 2006, 1:58 PM + * + * To change this template, choose Tools | Template Manager + * and open the template in the editor. + */ + +package org.xtreemfs.sandbox; + +import java.io.FileNotFoundException; +import java.io.FileReader; +import org.xtreemfs.foundation.json.JSONException; + +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.json.JSONString; + +/** + * Simple (manual) test for the JSON parser. + * + * @author bjko + */ +public class JSONTest { + + /** Creates a new instance of JSONTest */ + public JSONTest() { + } + + public static void stupido() { + stupido(); + } + + /** + * @param args + * the command line arguments + */ + public static void main(String[] args) { + JSONString sr = null; + try { + // TODO code application logic here + FileReader fr = new FileReader("json.txt"); + int ch = fr.read(); + StringBuilder in = new StringBuilder(); + while (ch != -1) { + if ((ch != '\r') && (ch != '\n')) + in.append((char) ch); + ch = fr.read(); + } + fr.close(); + + long tStart = System.currentTimeMillis(); + sr = new JSONString(in.toString()); + Object o = JSONParser.parseJSON(sr); + long tEnd = System.currentTimeMillis(); + System.out.println("parsing took " + (tEnd - tStart) + "ms"); + System.out.println(""); + System.out.println(JSONParser.writeJSON(o)); + } catch (FileNotFoundException ex) { + ex.printStackTrace(); + } catch (Exception ex) { + try { + char ch = sr.read(); + System.out.print(ch); + while (sr.hasMore()) { + ch = sr.read(); + System.out.print(ch); + } + } catch(JSONException jex) { + } + + ex.printStackTrace(); + } catch (StackOverflowError e) { + System.out.println("Stack Overflow"); + e.printStackTrace(); + } + + } + +} diff --git a/servers/src/org/xtreemfs/sandbox/Main.java b/servers/src/org/xtreemfs/sandbox/Main.java new file mode 100644 index 0000000000000000000000000000000000000000..855038a74c0286220b48005fb80a2b22a458e11c --- /dev/null +++ b/servers/src/org/xtreemfs/sandbox/Main.java @@ -0,0 +1,96 @@ +/* + * Main.java + * + * Created on 8. Dezember 2006, 10:21 + * + * @author Bjoern Kolbeck, Zuse Institute Berlin (kolbeck@zib.de) + * + */ + +package org.xtreemfs.sandbox; + +import java.io.IOException; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.foundation.pinky.HTTPUtils; +import org.xtreemfs.foundation.pinky.PinkyRequest; +import org.xtreemfs.foundation.pinky.PipelinedPinky; +import org.xtreemfs.foundation.pinky.PinkyRequestListener; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.json.JSONString; + +/** + * A simple (manual) and stupid test for the Pinky Server. Receives JSON data, + * parses it, writes it to JSON and returns it. Used for performance tests. + * + * @author bjko + */ +public class Main { + + /** Creates a new instance of Main */ + public Main() { + } + + /** + * @param args + * the command line arguments + */ + public static void main(String[] args) { + Thread test; + try { + // create a new Pinky server + final PipelinedPinky sthr = new PipelinedPinky(12203, null, null); + + // register a request listener that is called by pinky when + // receiving a request + sthr.registerListener(new PinkyRequestListener() { + public void receiveRequest(PinkyRequest theRequest) { + try { + // unpack body, parse it, write back JSON and send that + // back to the client + if (theRequest.requestBody != null) { + byte bdy[] = null; + if (theRequest.requestBody.hasArray()) { + bdy = theRequest.requestBody.array(); + } else { + bdy = new byte[theRequest.requestBody + .capacity()]; + theRequest.requestBody.position(0); + theRequest.requestBody.get(bdy); + } + + String body = new String(bdy, "utf-8"); + Object o = JSONParser + .parseJSON(new JSONString(body)); + String respBdy = JSONParser.writeJSON(o); + theRequest.setResponse(HTTPUtils.SC_OKAY, + ReusableBuffer.wrap(respBdy.getBytes("utf-8")), + HTTPUtils.DATA_TYPE.JSON); + } else { + theRequest.setResponse(HTTPUtils.SC_OKAY); + } + sthr.sendResponse(theRequest); + } catch (Exception ex) { + ex.printStackTrace(); + try { + theRequest.setResponse(HTTPUtils.SC_SERVER_ERROR); + } catch (Exception e) { + // ignore that + e.printStackTrace(); + } + theRequest.setClose(true); + sthr.sendResponse(theRequest); + + } + } + }); + // start the Pinky server in a new thread + test = new Thread(sthr); + test.start(); + + } catch (IOException ex) { + ex.printStackTrace(); + } + + } + +} diff --git a/servers/src/org/xtreemfs/sandbox/SpeedyMain.java b/servers/src/org/xtreemfs/sandbox/SpeedyMain.java new file mode 100644 index 0000000000000000000000000000000000000000..153d45098ef2ba580d85f850c075422438d2150d --- /dev/null +++ b/servers/src/org/xtreemfs/sandbox/SpeedyMain.java @@ -0,0 +1,118 @@ +/* + * SpeedyMain.java + * + * Created on December 22, 2006, 1:36 PM + * + * To change this template, choose Tools | Template Manager + * and open the template in the editor. + */ + +package org.xtreemfs.sandbox; + +import java.io.IOException; +import java.net.InetSocketAddress; +import org.xtreemfs.common.buffer.ReusableBuffer; + +import org.xtreemfs.foundation.pinky.HTTPUtils; +import org.xtreemfs.foundation.speedy.MultiSpeedy; +import org.xtreemfs.foundation.speedy.SpeedyRequest; +import org.xtreemfs.foundation.speedy.SpeedyResponseListener; + +/** + * Simple (manual) test case for Pinky and Speedy. + * + * @author bjko + */ +public class SpeedyMain { + + /** Creates a new instance of SpeedyMain */ + public SpeedyMain() { + } + + /** + * @param args + * the command line arguments + */ + public static void main(String[] args) { + Thread test; + + final int NREQ = 50; + + try { + final InetSocketAddress endpoint = new InetSocketAddress( + "farnsworth.zib.de", 32636); + final MultiSpeedy client = new MultiSpeedy(); + client.registerListener(new SpeedyResponseListener() { + int numR = 0; + + public void receiveRequest(SpeedyRequest resp) { + try { + if (resp.status == SpeedyRequest.RequestStatus.FAILED) { + System.out.println("!!! request failed!"); + } else { + Long rTime = System.currentTimeMillis(); + byte bdy[] = null; + + if (resp.responseBody.hasArray()) { + bdy = resp.responseBody.array(); + } else { + bdy = new byte[resp.responseBody.capacity()]; + resp.responseBody.position(0); + resp.responseBody.get(bdy); + } + + String body = new String(bdy, "ascii"); + } + + numR++; + System.out.println("<<<< response " + numR + " took " + + (resp.received - resp.sendStart) + "ms"); + if (numR == NREQ) { + System.exit(1); + } + } catch (Exception ex) { + System.out.println("ex: " + ex); + System.exit(1); + } + } + }, endpoint); + + test = new Thread(client); + test.start(); + Thread.currentThread().yield(); + + String json = "[\"myVolume/newDir/\"]"; + ReusableBuffer rqBdy = ReusableBuffer.wrap(json.getBytes("utf-8")); + int errCnt = 0; + for (int i = 0; i < NREQ; i++) { + SpeedyRequest sr = null; + try { + sr = new SpeedyRequest("GET", "readDir", null, null, rqBdy, + HTTPUtils.DATA_TYPE.JSON); + client.sendRequest(sr, endpoint); + } catch (IllegalStateException e) { + e.printStackTrace(); + break; + } + + System.out.println(">>>> request " + (i + 1)); + + rqBdy.position(0); + + if (i % 5 == 0) { + try { + Thread.currentThread().sleep(1); + } catch (InterruptedException ex) { + } + } + // Thread.currentThread().yield(); + } + + // client.shutdown(); + } catch (IOException ex) { + ex.printStackTrace(); + System.exit(1); + } + } + +} diff --git a/servers/src/org/xtreemfs/sandbox/ThroughputTest.java b/servers/src/org/xtreemfs/sandbox/ThroughputTest.java new file mode 100644 index 0000000000000000000000000000000000000000..b319d86e72fa7161ed1a9c96aa679f97f7d97fe0 --- /dev/null +++ b/servers/src/org/xtreemfs/sandbox/ThroughputTest.java @@ -0,0 +1,257 @@ +package org.xtreemfs.sandbox; + +import java.io.FileInputStream; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.StringTokenizer; + +import org.xtreemfs.common.Capability; +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.clients.RPCResponseListener; +import org.xtreemfs.common.clients.osd.OSDClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.striping.Locations; +import org.xtreemfs.foundation.json.JSONString; +import org.xtreemfs.foundation.pinky.HTTPHeaders; + +public class ThroughputTest { + + private static int responses = 0; + + private static Object lock = new Object(); + + private static OSDClient[] clients; + + private static Map osdThroughputs; + + /** + * @param args + */ + public static void main(String[] args) throws Exception { + + if (args.length != 3) { + System.out + .println("usage: ThroughputTest "); + System.exit(1); + } + + boolean write = args[0].equalsIgnoreCase("write"); + int numOSDs = Integer.parseInt(args[1]); + + Properties props = new Properties(); + props.load(new FileInputStream(args[2])); + + final int stripeSize = Integer + .parseInt(props.getProperty("stripeSize")); + final int fileSize = Integer.parseInt(props.getProperty("fileSize")); + final boolean debug = props.getProperty("debug").equalsIgnoreCase( + "true"); + final int numClients = Integer + .parseInt(props.getProperty("numClients")); + final int rwAhead = Integer.parseInt(props.getProperty("rwAhead")); + + final int burstSize = Integer.parseInt(props.getProperty("burstSize")); + + final String secret = props.getProperty("capSecret"); + + List osds = new ArrayList(numOSDs); + for (int i = 0;; i++) { + String osd = props.getProperty("osd" + i); + if (osds.size() > 0 && osd == null) + break; + else if (osd == null) + continue; + else { + osds.add(osd); + if (osds.size() == numOSDs) + break; + } + } + + Logging.start(Logging.LEVEL_ERROR); + + InetSocketAddress[] uris = new InetSocketAddress[osds.size()]; + for (int i = 0; i < osds.size(); i++) { + StringTokenizer st = new StringTokenizer(osds.get(i), ":"); + uris[i] = new InetSocketAddress(st.nextToken(), Integer.parseInt(st + .nextToken())); + } + + final String fileId = "ABC:1"; + final Capability cap = new Capability(fileId, "w", 0, secret); + + String osdsAsList = ""; + for (int i = 0; i < osds.size(); i++) + osdsAsList += "\"http://" + osds.get(i) + "\"" + + (i < osds.size() - 1 ? ", " : ""); + + Locations loc = new Locations(new JSONString( + "[[[{\"policy\": \"RAID0\", \"stripe-size\":" + stripeSize + + ", \"width\":" + osds.size() + " }, [" + osdsAsList + + "]]], 1, \"sync\"]")); + + long t0 = System.nanoTime(); + + ReusableBuffer buf = write ? BufferPool.allocate(stripeSize * 1024) + : null; + + clients = new OSDClient[numClients]; + for (int i = 0; i < numClients; i++) + clients[i] = new OSDClient(null); + + osdThroughputs = new HashMap(); + + for (long i = 0; i < fileSize / stripeSize; i++) { + + final int osd = (int) (i % uris.length); + final int client = (int) (i % clients.length); + + HTTPHeaders headers = new HTTPHeaders(); + headers.addHeader(HTTPHeaders.HDR_XOBJECTNUMBER, i + ""); + + if (debug) + System.out.println("sending request " + i + " to OSD " + + uris[osd] + " ..."); + try { + + // send write requests + if (write) { + + ReusableBuffer viewBuf = buf.createViewBuffer(); + + viewBuf.put((byte) 66); + + final RPCResponse res = clients[client].put(uris[osd], loc, + cap, fileId, i, viewBuf); + res.setResponseListener(new RPCResponseListener() { + public void responseAvailable(RPCResponse response) { + synchronized (lock) { + + responses++; + + byte[] body = response.getSpeedyRequest() + .getResponseBody(); + if (debug) { + if (body != null) + System.out.println("body: " + + new String(body)); + System.out.println("write complete, " + + responses + " responses received"); + } + res.freeBuffers(); + lock.notify(); + } + } + }); + } + + // send read requests + else { + + final RPCResponse res = clients[client].get(uris[osd], loc, + cap, fileId, i); + res.setResponseListener(new RPCResponseListener() { + public void responseAvailable(RPCResponse response) { + synchronized (lock) { + + Object[] array = osdThroughputs.get(response + .getSpeedyRequest().getServer()); + if (array == null) { + array = new Object[] { new Long(0), + new Long(0) }; + osdThroughputs.put(response + .getSpeedyRequest().getServer(), + array); + } + try { + array[0] = new Long(((Long) array[0]) + .longValue() + + response.getBody().capacity()); + array[1] = System.nanoTime(); + } catch (Exception e) { + e.printStackTrace(); + System.exit(1); + } + + responses++; + + // byte[] body = response.getSpeedyRequest() + // .getResponseBody(); + + // ReusableBuffer buf = null; + // try { + // buf = response.getBody(); + // } catch (Exception e) { + // e.printStackTrace(); + // System.exit(1); + // } + // + // if (buf.capacity() != stripeSize * 1024) { + // System.err.println("wrong body length: " + // + buf.capacity()); + // System.exit(1); + // } + // + // if (buf.get() != (byte) 65) { + // System.err.println("invalid body content"); + // System.exit(1); + // } + + if (debug) + System.out.println("read complete, " + + responses + " responses received"); + + res.freeBuffers(); + lock.notify(); + } + } + }); + } + + if (burstSize == 0 || i % (uris.length * burstSize) == 0) { + synchronized (lock) { + while (i - responses > rwAhead) + lock.wait(); + } + } + + } catch (IOException exc) { + exc.printStackTrace(); + System.exit(1); + } + } + + synchronized (lock) { + while (responses < fileSize / stripeSize) + lock.wait(); + } + + long time = System.nanoTime() - t0; + time = time / 1000000; + System.out.println("time elapsed for reading/writing " + fileSize + + "kb in " + stripeSize + "kb stripes: " + time + "ms"); + System.out + .println(((fileSize) / (stripeSize * time / 1000)) + " ops/s"); + System.out.println((fileSize / ((float) time / 1000)) + " kb/s"); + System.out.println("throughput for OSDs:"); + for (InetSocketAddress osd : osdThroughputs.keySet()) { + Object[] array = osdThroughputs.get(osd); + double t = ((Long) array[1] - t0) / 1000000.0f; + long size = (Long) array[0] / 1000; + System.out + .println(osd + ": " + size / ((float) t / 1000) + " kb/s"); + } + + for (OSDClient client : clients) + client.shutdown(); + + System.out.println(BufferPool.getStatus()); + } +} \ No newline at end of file diff --git a/servers/src/org/xtreemfs/sandbox/ThroughputTest.properties b/servers/src/org/xtreemfs/sandbox/ThroughputTest.properties new file mode 100644 index 0000000000000000000000000000000000000000..41d2eaf82bc26139d661ace3a0050e84819e7084 --- /dev/null +++ b/servers/src/org/xtreemfs/sandbox/ThroughputTest.properties @@ -0,0 +1,29 @@ +# file size in kb +fileSize=4194304 + +# stripe size in kb +stripeSize=1024 + +# number of clients +numClients=10 + +# minimum number of pending responses at which no more requests are sent +rwAhead=1000 + +# number of lines to send between two checks of rwAhead (0 = "send single requests") +burstSize=2 + +# debug flag +debug=true + +osd0=csr-pc24.zib.de:32640 +#osd0=172.24.103.160:32641 +#osd1=172.24.103.161:32641 +#osd2=172.24.103.162:32641 +#osd3=172.24.103.163:32641 +#osd4=172.24.103.164:32641 +#osd5=172.24.103.165:32641 +#osd6=172.24.103.166:32641 +#osd7=172.24.103.167:32641 +#osd8=172.24.103.168:32641 +#osd9=172.24.103.169:32641 \ No newline at end of file diff --git a/servers/src/org/xtreemfs/sandbox/benchmark/Common.java b/servers/src/org/xtreemfs/sandbox/benchmark/Common.java new file mode 100644 index 0000000000000000000000000000000000000000..101913cf771d7a0fe1686fa2c0b0744903d35a88 --- /dev/null +++ b/servers/src/org/xtreemfs/sandbox/benchmark/Common.java @@ -0,0 +1,126 @@ +package org.xtreemfs.sandbox.benchmark; + +import java.io.BufferedWriter; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.sql.Date; +import java.sql.Time; +import java.text.DecimalFormat; +import java.util.LinkedList; +import java.util.Locale; +import java.util.Random; + +/** + * + * @author clorenz + */ +public class Common { + /** + * every dir has a "/" at the end + * @param dir + * @return + */ + public static String correctDir(String dir) { + if (!dir.endsWith("/")) { + dir = dir + "/"; + } + return dir; + } + + /** + * generates randomly filled byte-array + * + * @param length + * length of the byte-array + */ + public static byte[] generateRandomBytes(int length) { + Random r = new Random(15619681); + byte[] bytes = new byte[length]; + + r.nextBytes(bytes); + return bytes; + } + + /** + * generates randomly Filename + */ + public static String generateFileId(Random r) throws IllegalArgumentException { + String id = r.nextInt(100000000) + ":" + r.nextInt(1000000000); + return id; + } + + +/* public static void deepDirClean(String dir) { + File file = new File(dir); + if (!file.exists()) { + file.mkdir(); + } else { + for (File fileChild : file.listFiles()) { + if (fileChild.isDirectory()) { + deepDirClean(fileChild.getAbsolutePath()); + fileChild.delete(); + } else { + fileChild.delete(); + } + } + } + }*/ + + /** + * writes Results into a file (overwrites File, if exists) + * @param absFilename + * @param list + */ + public static void writeToFile(String absFilename, LinkedList list){ + BufferedWriter out = null; + try { + out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(absFilename, true))); + try { + for(String s : list){ + out.write(s + "\n"); + } + out.flush(); + } catch (IOException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + } catch (FileNotFoundException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + }finally{ + if(out!=null) + try { + out.close(); + } catch (IOException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + } + } + + /** + * formats the input for result-file + * @param bench + * @param info + * @param time + * @return + */ + public static String formatResultForFile(String bench, String info, long time){ + return new Date(System.currentTimeMillis()) + " " + new Time(System.currentTimeMillis()) + "; " + bench + "; " + info + "; " + time; + } + + /** + * converts a Number with "." into "," + * @param n + * @return + */ + public static String formatNumberToComma(long n){ + DecimalFormat df = (DecimalFormat)DecimalFormat.getInstance(Locale.GERMAN); + df.applyPattern( "#,###,##0.00" ); + String s = df.format( n ); + return s; + } + +} diff --git a/servers/src/org/xtreemfs/sandbox/benchmark/NeedetTime.java b/servers/src/org/xtreemfs/sandbox/benchmark/NeedetTime.java new file mode 100644 index 0000000000000000000000000000000000000000..b7320f7c850100f46282feecda6e79551d5048b3 --- /dev/null +++ b/servers/src/org/xtreemfs/sandbox/benchmark/NeedetTime.java @@ -0,0 +1,47 @@ +package org.xtreemfs.sandbox.benchmark; + +/** + * + * @author clorenz + */ +public class NeedetTime { + private static NeedetTime self = new NeedetTime(); + + private long start; + private long end; + private String name; + + /** + * fabric + * @return + */ + public static NeedetTime getNeedetTime(){ + return self; + } + + private NeedetTime() { + super(); + } + + public long start(String s){ + System.out.println("##### Begin: " + s); + this.name = s; + this.start = System.currentTimeMillis(); + return this.start; + } + + public long end(){ + this.end = System.currentTimeMillis(); + long time; + time = this.end-this.start; + System.out.println("##### End: " + this.name + " takes " + time + "ms (" + time/1000 + "s)"); + reset(); + return time; + } + + private void reset(){ + this.start = 0; + this.end = 0; + this.name = ""; + } +} diff --git a/servers/src/org/xtreemfs/sandbox/benchmark/OSDBenchmark.java b/servers/src/org/xtreemfs/sandbox/benchmark/OSDBenchmark.java new file mode 100644 index 0000000000000000000000000000000000000000..65ff5558b787e421b9fe4c59481a70d18e477b96 --- /dev/null +++ b/servers/src/org/xtreemfs/sandbox/benchmark/OSDBenchmark.java @@ -0,0 +1,219 @@ +package org.xtreemfs.sandbox.benchmark; + +import java.io.File; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; +import java.util.Random; + +import org.xtreemfs.common.Capability; +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.striping.Location; +import org.xtreemfs.common.striping.StripingPolicy; +import org.xtreemfs.common.util.FSUtils; +import org.xtreemfs.foundation.pinky.PinkyRequest; +import org.xtreemfs.osd.OSDConfig; + +/** + * + * @author clorenz + */ +public abstract class OSDBenchmark { +// /* +// * needed for checking the results +// */ +// protected class TestRequestController implements RequestHandler, UDPCom { +// private OSDRequest lastRequest; +// +// /** dummy */ +// public OSDId getMe() { +// return new OSDId("localhost", 32636, OSDId.SCHEME_HTTP); +// } +// +// /** +// * blocks until a Request is received +// * +// * @return last received Request +// */ +// public synchronized OSDRequest getLastRequest() { +// if (lastRequest == null) { +// try { +// this.wait(); +// } catch (InterruptedException ex) { +// ex.printStackTrace(); +// } +// } +// OSDRequest ret = lastRequest; +// lastRequest = null; +// return ret; +// } +// +// public synchronized void stageCallback(OSDRequest request) { +// lastRequest = request; +// BufferPool.free(request.getRequest().requestBody); +// BufferPool.free(request.getRequest().responseBody); +// BufferPool.free(request.getRequest().responseHeaders); +// BufferPool.free(request.getData()); +// notify(); +// } +// +// +// public void sendUDP(ReusableBuffer data, InetSocketAddress receiver) { +// throw new UnsupportedOperationException("Not supported yet."); +// } +// +// public void receiveUDP(ReusableBuffer data, InetSocketAddress sender) { +// throw new UnsupportedOperationException("Not supported yet."); +// } +// +// public void sendInternalEvent(OSDRequest event) { +// +// } +// +// } +// +// private final OSDId OSDID = new OSDId("localhost", 32636, OSDId.SCHEME_HTTP); +// +// /** +// * the controller which gets the result-requests from stages +// */ +// protected TestRequestController controller; +// +// /** +// * the OSDConfig +// */ +// protected OSDConfig config; +// +// protected StripingPolicy sp; +// +// private Location loc; +// +// /** +// * root-path for benchmarks +// */ +// protected String testDir; +// +// /** +// * a Random for generating the same filenames every benchmark +// */ +// protected Random filenameRandom; +// +// protected OSDBenchmark(String testDir, StripingPolicy sp) throws IOException { +// Logging.start(Logging.LEVEL_WARN); +// +// config = createOSDConfig(testDir); +// controller = new TestRequestController(); +// +// this.testDir = testDir; +// filenameRandom = new Random(54684651); +// +// FSTools.delTree(new File(testDir)); +// +// this.sp = sp; +// List osd = new ArrayList(); +// osd.add(OSDID); +// loc = new Location(sp, osd); +// } +// +// /** +// * run before benchmark +// */ +// protected abstract void setUp(); +// +// /** +// * run after benchmark +// */ +// protected abstract void tearDown(); +// +// /** +// * setup a WriteRequest +// */ +// protected OSDRequest createWriteRequest(String fileId, int objNo, +// int version, int dataLength) throws IllegalArgumentException { +// OSDOperation op = new OSDOperation(OperationType.WRITE, +// OperationSubType.WHOLE); +// +// OSDRequest rq = new OSDRequest(new PinkyRequest()); +// // set the needed parameters +// rq.setOSDOperation(op); +// rq.setFileId(fileId); +// rq.setObjectNo(objNo); +// rq.setVersionNo(version); +// rq.setPolicy(sp); +// rq.setLocation(loc); +// rq.setCapability(new Capability(fileId, "write", "IAmTheClient",0)); +// +// byte[] bytes = Common.generateRandomBytes(dataLength); +// ReusableBuffer buf = BufferPool.allocate(dataLength); +// buf.put(bytes); +// rq.getRequest().requestBody = buf; +// rq.getRequest().requestBdyLength = buf.capacity(); +// return rq; +// } +// +// /** +// * setup a ReadRequest +// */ +// protected OSDRequest createReadRequest(String fileId, long objNo, +// int version) throws IllegalArgumentException { +// OSDRequest rq = new OSDRequest(new PinkyRequest()); +// // set the needed parameters +// OSDOperation op = new OSDOperation(OperationType.READ, +// OperationSubType.WHOLE); +// rq.setOSDOperation(op); +// +// rq.setFileId(fileId); +// rq.setObjectNo(objNo); +// rq.setVersionNo(version); +// rq.setPolicy(sp); +// rq.setLocation(loc); +// rq.setCapability(new Capability(fileId, "read", "IAmTheClient",0)); +// +// return rq; +// } +// +// /** +// * setup a DeleteRequest +// */ +// protected OSDRequest createDeleteRequest(String fileId, long objNo, +// int version) throws IllegalArgumentException { +// OSDRequest rq = new OSDRequest(new PinkyRequest()); +// +// // set the needed parameters +// OSDOperation op = new OSDOperation(OperationType.DELETE, +// OperationSubType.WHOLE); +// rq.setOSDOperation(op); +// +// rq.setFileId(fileId); +// rq.setObjectNo(objNo); +// rq.setVersionNo(version); +// rq.setPolicy(sp); +// rq.setCapability(new Capability(fileId, "delete", "IAmTheClient",0)); +// +// return rq; +// } +// +// /** +// * setup a OSDConfig +// * +// * @param testDir +// * @return +// */ +// private static OSDConfig createOSDConfig(String testDir) throws IOException { +// Properties props = new Properties(); +// props.setProperty("dir_service.host", "localhost"); +// props.setProperty("dir_service.port", "32638"); +// props.setProperty("object_dir", testDir); +// props.setProperty("debug_level", "" + Logging.LEVEL_WARN); +// props.setProperty("listen_port", "32637"); +// props.setProperty("local_clock_renewal", "1"); +// props.setProperty("remote_time_sync", "1"); +// +// return new OSDConfig(props); +// } + +} diff --git a/servers/src/org/xtreemfs/sandbox/benchmark/StorageStageBenchmark.java b/servers/src/org/xtreemfs/sandbox/benchmark/StorageStageBenchmark.java new file mode 100644 index 0000000000000000000000000000000000000000..90a1d3b9766843fe306155d5507ff8953003bcf3 --- /dev/null +++ b/servers/src/org/xtreemfs/sandbox/benchmark/StorageStageBenchmark.java @@ -0,0 +1,249 @@ +package org.xtreemfs.sandbox.benchmark; + + +/** + * + * @author clorenz + * + */ +public class StorageStageBenchmark extends OSDBenchmark { +// private class FileInfo { +// private String fileId; +// +// private int objectAmount; +// +// private int objectsProcessed; +// +// public FileInfo(String fileId, int objAmount) { +// super(); +// this.fileId = fileId; +// if (objAmount != 0) +// this.objectAmount = objAmount; +// else +// this.objectAmount = 1; +// this.objectsProcessed = 0; +// } +// +// public String getFileId() { +// return fileId; +// } +// +// public int getObjectAmount() { +// return objectAmount; +// } +// +// public int getObjectsProcessed() { +// return objectsProcessed; +// } +// +// public void increaseObjectsProcessed() { +// this.objectsProcessed++; +// } +// +// public void resetObjectsProcessed() { +// this.objectsProcessed = 0; +// } +// } +// +// public static final int WRITE_BENCHMARK = 0; +// +// public static final int READ_BENCHMARK = 1; +// +// public static final int DELETE_BENCHMARK = 2; +// +// protected StorageStage stage; +// +// protected NeedetTime time; +// +// protected LinkedList output; +// +// private boolean randomOrder; +// +// private String benchConfig; +// +// private Random shuffleRandom = new Random(1389724); +// +// /** +// * Map contains Files for Requests FileID -> Amount of Objects +// */ +// protected LinkedList files; +// +// public StorageStageBenchmark(String testDir, boolean randomOrder) throws IOException { +// super(testDir, new RAID0(1, 1)); +// this.stage = new StorageStage(controller, controller, config, null); +// this.time = NeedetTime.getNeedetTime(); +// this.randomOrder = randomOrder; +// +// this.files = new LinkedList(); +// this.output = new LinkedList(); +// } +// +// @Override +// protected void setUp(){ +// try { +// System.out.println("tidy up the testdir"); +// FSTools.delTree(new File(testDir)); +// stage = new StorageStage(controller, controller, config, null); +// stage.start(); +// } catch (IOException ex) { +// return; +// } +// } +// +// @Override +// protected void tearDown() { +//// System.out.println("tidy up the testdir"); +//// FSTools.delTree(new File(testDir)); +// stage.shutdown(); +// stage = null; +// } +// +// private void createFileList(int fileAmount, int maxObjAmount) { +// System.out.println("create file-list"); +// Random objectsRandom = new Random(151684); +// +// String fileId; +// FileInfo file; +// for (int i = 0; i < fileAmount; i++) { +// fileId = Common.generateFileId(filenameRandom); +// file = new FileInfo(fileId, objectsRandom.nextInt(maxObjAmount)); +// files.add(file); +// } +// benchConfig = fileAmount + " files, max " + maxObjAmount +// + " objects, random order " + randomOrder; +// } +// +// /* +// * Benchmarks +// */ +// public void benchWrite() { +// System.out.println("generate write-requests"); +// bench(WRITE_BENCHMARK, "Write", benchConfig); +// } +// +// public void benchRead() { +// System.out.println("generate read-requests"); +// bench(READ_BENCHMARK, "Read", benchConfig); +// } +// +// public void benchDelete() { +// System.out.println("generate read-requests"); +// bench(DELETE_BENCHMARK, "Delete", benchConfig); +// } +// +// /** +// * run the requests and measures the time +// * +// * @param benchmark +// * @param info +// * @param random +// * TODO +// * @param requests +// */ +// protected void bench(int benchMode, String benchmark, String info) { +// OSDRequest request = null; +// long endTime = 0; +// Random objToReadInOneLoopRandom = new Random(168465); +// +// boolean minOneFileWithObjectsRemaining = true; +// int objNo, objectsToReadInOneLoop; +// // loop runs until all objects of all files has been processed +// while (minOneFileWithObjectsRemaining) { +// if (randomOrder) { +// Collections.shuffle(files, shuffleRandom); +// } +// +// time.start(benchmark); +// for (FileInfo file : files) { // loop for all files +// objectsToReadInOneLoop = objToReadInOneLoopRandom.nextInt((file +// .getObjectAmount() / 2) + 1); // min: 1 object; max: +// // 1/2 FileObjectAmount +// // loop runs until a certain number of objects has been +// // processed or all objects has been processed +// while (objectsToReadInOneLoop == 0 +// || file.getObjectsProcessed() <= file.getObjectAmount()) { +// objNo = file.getObjectsProcessed() + 1; +// +// request = createRequest(benchMode, file.getFileId(), objNo); +// stage.enqueueRequest(request); +// controller.getLastRequest(); +// +// file.increaseObjectsProcessed(); +// objectsToReadInOneLoop--; +// } +// if (file.getObjectsProcessed() <= file.getObjectAmount()) { +// minOneFileWithObjectsRemaining = true; +// } else { +// minOneFileWithObjectsRemaining = false; +// } +// } +// endTime += time.end(); +// } +// for (FileInfo file : files) { +// file.resetObjectsProcessed(); +// } +// +// output.add(Common.formatResultForFile(benchmark, info, endTime)); +// } +// +// private OSDRequest createRequest(int benchMode, String fileId, int objNo) { +// OSDRequest rq; +// switch (benchMode) { +// case 0: { +// int dataLength = 10; +// rq = createWriteRequest(fileId, objNo, 0, dataLength); +// break; +// } +// case 1: { +// rq = createReadRequest(fileId, objNo, 0); +// break; +// } +// case 2: { +// rq = createDeleteRequest(fileId, objNo, 0); +// break; +// } +// default: { +// rq = null; +// break; +// } +// } +// return rq; +// } +// +// public void startBenchmarks(int fileAmount, int maxObjectAmount) { +// setUp(); +// createFileList(fileAmount, maxObjectAmount); +// +// benchWrite(); +// benchRead(); +// // benchDelete(false); // doesn't work at the moment +// tearDown(); +// +// Common.writeToFile("StorageStageBenchmark" + "_results.csv", output); +// } +// +// /** +// * @param args +// */ +// public static void main(String[] args) { +// // TODO Auto-generated method stub +// try { +// if (args.length >= 3) { +// StorageStageBenchmark bench = new StorageStageBenchmark(args[0], +// true); +// bench.startBenchmarks(Integer.parseInt(args[1]), Integer +// .parseInt(args[2])); +// } else +// System.out +// .println("usage: java " +// + "\nhint: use the -Xmx option for more needed RAM (e.g.: -Xmx256m)"); +// } catch (NullPointerException e) { +// System.out +// .println("usage: java " +// + "\nhint: use the -Xmx option for more needed RAM (e.g.: -Xmx256m)"); +// } catch (IOException e) { +// e.printStackTrace(); +// } +// } + +} diff --git a/servers/src/org/xtreemfs/sandbox/compile-DirectIOReader.txt b/servers/src/org/xtreemfs/sandbox/compile-DirectIOReader.txt new file mode 100644 index 0000000000000000000000000000000000000000..0e970c6bb563ce8664afbbff429dbe0a3b57e066 --- /dev/null +++ b/servers/src/org/xtreemfs/sandbox/compile-DirectIOReader.txt @@ -0,0 +1,9 @@ +To compile the native library for DirectIOReader, make sure that $JAVA_HOME +points to a JDK 1.6. Invoke the following command: + +%> gcc -o libreaddirect.so -shared -Wl,-soname,libreaddirect.so \ + > -I$JAVA_HOME/include -I$JAVA_HOME/include/linux org_xtreemfs_sandbox_DirectIOReader.c + +The output will be a shared library named "libreaddirect.so". Make sure that the property +"java.library.path" points to the directory where this library is located when starting +the Java VM, e.g. by adding the option "-Djava.library.path=". \ No newline at end of file diff --git a/servers/src/org/xtreemfs/sandbox/httperf/1000files_urls b/servers/src/org/xtreemfs/sandbox/httperf/1000files_urls new file mode 100644 index 0000000000000000000000000000000000000000..75df291086f0b28195917d5d350f370bef45b5f5 --- /dev/null +++ b/servers/src/org/xtreemfs/sandbox/httperf/1000files_urls @@ -0,0 +1,4571 @@ +89188064:400686934&7&0 + 25773355:432246245&2&0 + 12019596:768955185&1&0 + 94917052:478894685&1&0 + 61460149:528663121&3&0 + 91464935:321828651&3&0 + 20573922:978527726&0&0 + 73570584:509185366&7&0 + 11736920:293374434&6&0 + 81181921:284731810&2&0 + 6942420:268694617&0&0 + 37991815:504720419&6&0 + 89695712:867781539&0&0 + 63739032:657829661&4&0 + 51615122:306136635&6&0 + 55837993:14495058&5&0 + 86157015:284717318&2&0 + 26214434:119220927&0&0 + 30357532:115565152&1&0 + 90508784:978748912&3&0 + 9661309:520108364&4&0 + 73570584:509185366&1&0 + 79077778:891358269&3&0 + 53187669:801736771&0&0 + 15143259:558664371&1&0 + 65617926:204033633&0&0 + 92316363:939734067&1&0 + 59355976:4039906&0&0 + 30029539:567591478&0&0 + 19606849:453493174&6&0 + 43827047:376424684&3&0 + 6740012:611323570&0&0 + 44598616:444589273&6&0 + 61328978:899602754&2&0 + 84998:65122273&0&0 + 61328978:899602754&0&0 + 60200852:979593575&4&0 + 49454891:478453187&3&0 + 99833532:500564025&2&0 + 47136963:959239562&3&0 + 70858651:854922152&0&0 + 36580982:275911708&1&0 + 14318176:591914522&1&0 + 43198439:662808461&2&0 + 6775183:346361619&0&0 + 63729697:265246888&4&0 + 18603837:202398774&2&0 + 61933829:8127087&5&0 + 56910010:652586984&5&0 + 41523163:330921150&1&0 + 36531221:465544958&8&0 + 93985935:867478104&3&0 + 3598178:774609763&0&0 + 30891037:703355240&0&0 + 9000299:189622129&1&0 + 87690282:479248800&4&0 + 76711123:726781890&1&0 + 43596961:420778248&0&0 + 97270881:91385809&1&0 + 80013650:102651723&0&0 + 24346272:848320174&6&0 + 37218570:381618017&0&0 + 88280032:964183211&4&0 + 88758911:732300092&0&0 + 63853711:880273408&1&0 + 81307853:323317653&4&0 + 5438539:954887323&1&0 + 61328978:899602754&7&0 + 60399439:867611170&3&0 + 29676985:956475462&2&0 + 2507552:38350344&2&0 + 47510292:774149565&2&0 + 31213691:499131596&2&0 + 77701927:738963158&1&0 + 57562731:820469772&6&0 + 4002043:406702190&7&0 + 45325818:757845469&7&0 + 99815723:590977381&4&0 + 84742010:130106102&1&0 + 36531221:465544958&7&0 + 48066323:255817938&1&0 + 14981801:6984071&2&0 + 65744787:251216899&1&0 + 36531221:465544958&2&0 + 70434522:372193649&0&0 + 84965216:463802260&2&0 + 8686319:631235672&0&0 + 52469998:590911777&0&0 + 69030160:685229886&1&0 + 49398852:890280265&1&0 + 40977332:868266272&6&0 + 90861661:377164183&1&0 + 23904648:376661712&1&0 + 52867992:64445255&3&0 + 29876640:14470514&4&0 + 84965216:463802260&6&0 + 29465223:259197434&3&0 + 98961310:319784340&0&0 + 87846747:53527409&6&0 + 53412846:338117810&2&0 + 54552902:217188624&1&0 + 73545484:102151889&5&0 + 95470049:537113142&1&0 + 28875158:282914848&0&0 + 51499587:698927989&0&0 + 44090675:343089231&3&0 + 75974979:580480563&3&0 + 90075179:164776514&0&0 + 80442241:270231675&2&0 + 13940912:909214727&2&0 + 85058382:571466364&1&0 + 24346272:848320174&5&0 + 88280032:964183211&7&0 + 17565416:593803371&1&0 + 83452622:378063536&1&0 + 54127829:84354693&0&0 + 34172870:34623997&1&0 + 57886993:212586771&0&0 + 52515016:412430656&1&0 + 76752891:299820811&7&0 + 89633329:116921545&6&0 + 15600072:946000325&4&0 + 90861661:377164183&0&0 + 93397942:110230992&0&0 + 34839256:826322060&2&0 + 69000118:268012364&2&0 + 84834024:841200594&1&0 + 82913494:615212745&1&0 + 16765477:372594907&0&0 + 49297202:349828703&6&0 + 13693527:745147566&4&0 + 63729697:265246888&0&0 + 20733614:921257806&0&0 + 58627766:871327242&2&0 + 41711686:119124692&3&0 + 83452622:378063536&6&0 + 45670446:13274522&1&0 + 61342731:246236836&1&0 + 35390877:599176194&0&0 + 42503311:427261511&0&0 + 33882509:498489183&0&0 + 8371874:517008461&1&0 + 103672:805856166&2&0 + 68544258:743179312&3&0 + 99833532:500564025&1&0 + 794440:643333917&0&0 + 53324128:609320531&3&0 + 95041258:745948755&4&0 + 20128250:722747907&0&0 + 51058391:273493813&4&0 + 82321822:968672172&1&0 + 40403702:309013849&7&0 + 94917052:478894685&0&0 + 88280032:964183211&2&0 + 50357:36778149&3&0 + 44598616:444589273&5&0 + 53808293:694574130&4&0 + 23472419:651429820&1&0 + 95181848:714820342&2&0 + 18876924:818582999&1&0 + 70289966:246764374&3&0 + 71407149:552668478&0&0 + 15600072:946000325&8&0 + 14377435:246795498&5&0 + 12494935:932668955&5&0 + 11138744:261195763&6&0 + 56122919:43052528&2&0 + 15801671:171539023&0&0 + 50602025:559678799&4&0 + 85671473:899588745&2&0 + 72275014:131214035&3&0 + 53337305:527156895&1&0 + 25285542:482312728&2&0 + 39496198:945080868&2&0 + 84896287:714382773&2&0 + 54288911:58962140&4&0 + 39436817:570472461&5&0 + 71013443:229044980&1&0 + 99986991:910170617&1&0 + 22437586:91331933&1&0 + 21012542:575718479&4&0 + 7248112:180850711&1&0 + 88970662:25547912&0&0 + 6986540:511897738&0&0 + 87953555:558848768&2&0 + 394890:702464926&2&0 + 62956693:863682188&1&0 + 80006895:539336678&1&0 + 58013677:80506859&0&0 + 97380991:698920611&2&0 + 58013677:80506859&3&0 + 984049:483901572&5&0 + 66989301:823245775&1&0 + 74789224:922087904&7&0 + 67003817:938806399&3&0 + 36531221:465544958&4&0 + 44449704:961196104&0&0 + 11736920:293374434&8&0 + 9546976:734325171&6&0 + 79670318:631438367&5&0 + 40403702:309013849&1&0 + 36475639:150298750&1&0 + 61933829:8127087&1&0 + 86157015:284717318&7&0 + 79645721:394251089&5&0 + 87443245:251462366&4&0 + 39268836:42129101&0&0 + 24378512:643438192&6&0 + 54051774:64602469&2&0 + 4002043:406702190&5&0 + 24832388:811203391&2&0 + 92388719:152303867&1&0 + 47335432:985984618&2&0 + 99842243:942096645&8&0 + 51932258:733052883&1&0 + 66272075:852533061&0&0 + 89831741:878651007&2&0 + 89188064:400686934&0&0 + 93121454:535538144&2&0 + 74789224:922087904&2&0 + 16140571:981617709&1&0 + 10912081:299073308&1&0 + 97270881:91385809&0&0 + 94427100:345106252&0&0 + 58013677:80506859&2&0 + 95101780:576073953&4&0 + 90326429:479392202&1&0 + 23038701:513125022&0&0 + 97762062:510060263&5&0 + 90508784:978748912&2&0 + 68343669:388708873&0&0 + 74812757:886509217&3&0 + 14468074:476007880&0&0 + 60064285:524769913&5&0 + 87077807:356406314&0&0 + 99794978:483568330&6&0 + 33493258:513726334&2&0 + 53412846:338117810&3&0 + 18134299:678983610&1&0 + 89188064:400686934&3&0 + 50357:36778149&1&0 + 13956173:536937059&2&0 + 51381452:363401409&2&0 + 3635379:230692518&2&0 + 30452630:493217909&5&0 + 88267404:152780511&2&0 + 43111670:582733005&0&0 + 12313712:542956627&8&0 + 68501344:633483542&2&0 + 91717739:892891474&2&0 + 10174974:120294830&3&0 + 54045147:80242617&6&0 + 79010859:477622599&2&0 + 98165980:787776889&3&0 + 53307310:462486717&1&0 + 24990550:701579152&3&0 + 53442228:374108430&4&0 + 60200852:979593575&2&0 + 10221086:708109010&0&0 + 18603837:202398774&6&0 + 67206790:870511274&7&0 + 98666811:206440428&0&0 + 49398852:890280265&2&0 + 61512395:268246479&6&0 + 49228676:90120193&5&0 + 85222108:614487320&3&0 + 15359195:473941356&1&0 + 61460149:528663121&6&0 + 25285542:482312728&3&0 + 30894037:75955676&0&0 + 86808504:337494125&0&0 + 12494935:932668955&2&0 + 93979834:814182899&1&0 + 38586569:857193927&5&0 + 8077408:486944454&1&0 + 93985935:867478104&0&0 + 16439035:301098169&0&0 + 63772983:913544890&5&0 + 9312867:605857668&0&0 + 55065825:57247164&1&0 + 20733614:921257806&3&0 + 36789634:662824796&3&0 + 90508784:978748912&1&0 + 49063958:681745900&4&0 + 54446911:376658051&2&0 + 37178590:132077611&1&0 + 36587580:228451807&2&0 + 24363729:931822610&4&0 + 24383850:838434080&1&0 + 40515987:99439735&1&0 + 14377435:246795498&0&0 + 12498479:749511858&2&0 + 8348663:925846920&0&0 + 45670446:13274522&0&0 + 56122919:43052528&4&0 + 4100953:608638082&5&0 + 94917052:478894685&3&0 + 64532124:355047058&4&0 + 36531221:465544958&6&0 + 96429072:241981909&1&0 + 93121454:535538144&1&0 + 57399441:574346626&1&0 + 67523238:697508437&4&0 + 17729199:152719649&0&0 + 13747057:28246965&4&0 + 63271831:810442761&0&0 + 12125866:427070376&2&0 + 12779856:957438307&8&0 + 14468397:543502230&6&0 + 33882509:498489183&6&0 + 89503186:878988103&1&0 + 61132974:165535142&2&0 + 54877703:634079928&4&0 + 18737604:340823539&1&0 + 44598616:444589273&4&0 + 38388251:809020096&0&0 + 94812560:257774709&2&0 + 95634901:59063091&0&0 + 56141369:829312773&0&0 + 39600542:865492623&2&0 + 58627766:871327242&3&0 + 4002043:406702190&6&0 + 24097418:205489992&1&0 + 13679271:747757662&0&0 + 10198934:426173913&3&0 + 61678898:882739587&0&0 + 81974220:715744766&1&0 + 9320061:177336824&4&0 + 49235941:65252136&0&0 + 47695679:473571694&2&0 + 19704814:290603674&3&0 + 64038790:72175167&2&0 + 40063900:620687205&0&0 + 59025297:456814031&2&0 + 89567267:335873132&1&0 + 25354103:499890095&1&0 + 52476244:931167830&0&0 + 47528821:471052035&6&0 + 50974342:597095063&2&0 + 26214434:119220927&3&0 + 10211847:108678674&0&0 + 24172877:139409016&0&0 + 18603837:202398774&1&0 + 48447057:884746929&0&0 + 93493362:592468562&0&0 + 2052336:295787981&5&0 + 70858651:854922152&6&0 + 37991815:504720419&0&0 + 73545882:550927034&2&0 + 52754617:121414498&6&0 + 95089663:763144708&2&0 + 40063900:620687205&3&0 + 53337305:527156895&3&0 + 40010549:14555157&3&0 + 58627766:871327242&0&0 + 50974342:597095063&0&0 + 8478903:846023875&0&0 + 24990550:701579152&0&0 + 37471105:762756421&1&0 + 23061216:99434729&7&0 + 63905820:224098428&0&0 + 85733564:663876045&4&0 + 39496198:945080868&5&0 + 44953983:866115077&1&0 + 88758911:732300092&3&0 + 63739032:657829661&1&0 + 20728829:488225281&0&0 + 63689658:94830853&2&0 + 95203911:601016938&2&0 + 80722375:560147575&3&0 + 89778255:397729291&2&0 + 81210421:313085869&5&0 + 7940265:732052199&3&0 + 36475639:150298750&4&0 + 81320605:90473249&1&0 + 35034292:66740643&0&0 + 46808018:336582676&3&0 + 8570864:280829001&1&0 + 81307853:323317653&1&0 + 62044501:825326152&1&0 + 58736725:737810966&3&0 + 95623061:999526611&7&0 + 17095008:149761520&4&0 + 28107596:496741880&2&0 + 17307475:921652843&3&0 + 70668519:507904197&4&0 + 15143259:558664371&4&0 + 33882509:498489183&2&0 + 81235131:446790486&0&0 + 19613859:704336702&0&0 + 34651805:97557080&1&0 + 58780166:97498602&1&0 + 95634901:59063091&1&0 + 54446911:376658051&1&0 + 68652349:438144715&5&0 + 53302195:957822333&5&0 + 30797047:115487974&2&0 + 84904995:767743372&2&0 + 11007403:300063068&1&0 + 52469998:590911777&1&0 + 35432003:282835475&1&0 + 5618809:197855825&2&0 + 2804255:949350001&4&0 + 74723689:42235917&1&0 + 52867992:64445255&4&0 + 42666123:240437475&0&0 + 99815723:590977381&6&0 + 91593406:852651754&1&0 + 20516273:952298447&0&0 + 3605565:131483740&3&0 + 78272570:576029226&4&0 + 52989796:367944781&0&0 + 43827047:376424684&4&0 + 2931488:604747700&1&0 + 6986540:511897738&3&0 + 52606765:416027911&0&0 + 51712898:317666207&0&0 + 49235941:65252136&1&0 + 26129164:375963359&7&0 + 50482922:317040887&3&0 + 52606765:416027911&1&0 + 90034775:766779686&0&0 + 19704814:290603674&2&0 + 33796722:672149182&6&0 + 43596961:420778248&2&0 + 80255325:237774223&1&0 + 64482738:623825544&3&0 + 81210421:313085869&1&0 + 90326429:479392202&0&0 + 14859108:184675170&0&0 + 49148247:892059898&4&0 + 43375304:628445689&4&0 + 82586644:976358841&1&0 + 23333846:575959031&1&0 + 86157015:284717318&4&0 + 81181921:284731810&4&0 + 77363664:153476039&0&0 + 29486221:352891995&1&0 + 39097823:700206507&6&0 + 49779369:62149871&1&0 + 47996608:352927170&6&0 + 96429072:241981909&0&0 + 37709609:639963160&1&0 + 86184757:353207740&0&0 + 80006895:539336678&2&0 + 11757315:383214052&0&0 + 86808504:337494125&3&0 + 53442228:374108430&6&0 + 72236572:366846956&2&0 + 24951969:864685429&4&0 + 68652349:438144715&3&0 + 89567267:335873132&2&0 + 43198439:662808461&8&0 + 25773355:432246245&4&0 + 92539348:968187617&3&0 + 98170290:192031081&0&0 + 63190338:539555088&6&0 + 54877703:634079928&2&0 + 20516273:952298447&6&0 + 63396630:287945650&1&0 + 26129164:375963359&3&0 + 58890455:598289383&0&0 + 43010149:109870747&2&0 + 26254724:683813336&0&0 + 63931155:270819007&6&0 + 61438049:706145983&2&0 + 63729697:265246888&6&0 + 43135756:992124594&1&0 + 91127077:413824363&3&0 + 87690282:479248800&3&0 + 37218570:381618017&5&0 + 30484610:339944899&4&0 + 33407839:558615386&5&0 + 62344657:852184446&5&0 + 5618809:197855825&5&0 + 24378512:643438192&2&0 + 85733564:663876045&7&0 + 97792263:695076704&3&0 + 10221086:708109010&1&0 + 73871399:826196232&6&0 + 93650412:797925819&0&0 + 26927277:639887490&2&0 + 89778255:397729291&1&0 + 96502711:831176956&8&0 + 52867992:64445255&0&0 + 47912996:669660571&0&0 + 38385044:451635099&0&0 + 27051694:2092817&1&0 + 18822093:257648120&4&0 + 44953983:866115077&2&0 + 83393462:733714979&1&0 + 23038701:513125022&4&0 + 43010149:109870747&3&0 + 59285880:603762424&1&0 + 24172877:139409016&7&0 + 85829140:808603613&7&0 + 94427100:345106252&6&0 + 28196376:955082045&7&0 + 79670318:631438367&1&0 + 44485622:208425683&1&0 + 51499587:698927989&2&0 + 22313523:720752569&0&0 + 53442228:374108430&0&0 + 82631241:215531913&7&0 + 42923230:468988641&5&0 + 56868152:640567118&3&0 + 47342112:257386546&3&0 + 59761096:747786818&0&0 + 14404891:512198282&3&0 + 5835305:685442396&1&0 + 11849083:752814633&8&0 + 37991815:504720419&4&0 + 24831969:679139335&1&0 + 38828138:75574381&5&0 + 76271183:785907918&5&0 + 93886887:222120256&0&0 + 45957916:710921358&1&0 + 20516273:952298447&4&0 + 37703625:857959686&1&0 + 24378512:643438192&7&0 + 24383850:838434080&6&0 + 43198934:335023352&2&0 + 47136963:959239562&6&0 + 90481479:287890425&1&0 + 95017381:969896974&0&0 + 72424811:659534862&4&0 + 67003817:938806399&0&0 + 54552902:217188624&0&0 + 66989301:823245775&3&0 + 50128363:284850957&1&0 + 56056086:479286098&1&0 + 24098346:359515341&6&0 + 63772983:913544890&6&0 + 61627279:33833788&1&0 + 32714634:291460589&1&0 + 98807218:361607752&5&0 + 11224354:610593814&2&0 + 66272075:852533061&3&0 + 80114329:544018923&0&0 + 2052336:295787981&8&0 + 51246491:77865810&2&0 + 81490586:118219082&6&0 + 63905820:224098428&2&0 + 15912561:356560921&0&0 + 55837993:14495058&4&0 + 63167734:665819705&1&0 + 63853711:880273408&0&0 + 49604511:248139084&0&0 + 48447057:884746929&2&0 + 23472419:651429820&0&0 + 45767543:834840580&0&0 + 36094705:71328450&4&0 + 11224354:610593814&4&0 + 96502711:831176956&6&0 + 25285542:482312728&4&0 + 6716617:307347947&4&0 + 81490586:118219082&2&0 + 51622589:556434188&4&0 + 39212191:907959215&0&0 + 63295000:105176636&0&0 + 77363664:153476039&1&0 + 29465223:259197434&0&0 + 26246486:906412836&5&0 + 8077408:486944454&4&0 + 73545484:102151889&3&0 + 72073917:875549213&3&0 + 394890:702464926&7&0 + 29911452:391223956&0&0 + 10912081:299073308&5&0 + 81046958:810133979&0&0 + 91938100:198737700&2&0 + 89503186:878988103&0&0 + 23590940:870866638&3&0 + 88554204:902099442&0&0 + 87314087:334750260&4&0 + 29465223:259197434&2&0 + 24383850:838434080&0&0 + 73570584:509185366&6&0 + 5835305:685442396&0&0 + 95623061:999526611&5&0 + 47528821:471052035&0&0 + 90034775:766779686&5&0 + 39738078:45613849&3&0 + 4002043:406702190&2&0 + 63931155:270819007&3&0 + 88280032:964183211&6&0 + 23333846:575959031&4&0 + 90786797:60875478&0&0 + 81490586:118219082&3&0 + 45957916:710921358&2&0 + 97762062:510060263&3&0 + 23590940:870866638&2&0 + 95017381:969896974&3&0 + 62467260:744386147&0&0 + 45423397:984954387&1&0 + 98101480:644789386&6&0 + 26138914:698492330&7&0 + 23719883:37149617&5&0 + 43111670:582733005&1&0 + 45767543:834840580&1&0 + 64532124:355047058&1&0 + 1812760:222467758&3&0 + 7235585:978931534&0&0 + 91464935:321828651&4&0 + 31055089:223681656&0&0 + 7504010:840190292&2&0 + 86808504:337494125&4&0 + 80715881:581362471&0&0 + 35995149:297934460&3&0 + 52604035:297082894&3&0 + 97270881:91385809&7&0 + 69000118:268012364&5&0 + 980829:856644254&2&0 + 56429317:209917005&2&0 + 90141696:521979907&1&0 + 26138914:698492330&6&0 + 77966981:649378990&0&0 + 4100953:608638082&0&0 + 87479706:36726780&3&0 + 13747057:28246965&8&0 + 25272781:737704078&3&0 + 31490945:628212703&0&0 + 52606765:416027911&3&0 + 64482738:623825544&2&0 + 80013650:102651723&6&0 + 31966866:496175072&4&0 + 86808504:337494125&1&0 + 98356898:511632245&4&0 + 46996637:598107124&2&0 + 52392849:874508459&7&0 + 58013677:80506859&5&0 + 24362643:580293325&1&0 + 36531221:465544958&5&0 + 22313523:720752569&2&0 + 86930254:8171039&6&0 + 61472082:859670216&4&0 + 8703246:828088125&7&0 + 53442228:374108430&2&0 + 19706683:118758656&3&0 + 64015693:886848702&4&0 + 92539348:968187617&6&0 + 92539348:968187617&4&0 + 79010859:477622599&5&0 + 54877703:634079928&6&0 + 40443723:938389955&1&0 + 36647735:47858640&0&0 + 54127829:84354693&2&0 + 49154239:880099697&0&0 + 25285542:482312728&5&0 + 34172870:34623997&2&0 + 77966981:649378990&3&0 + 97184810:598826627&3&0 + 80715881:581362471&5&0 + 98807218:361607752&4&0 + 12613730:540446356&2&0 + 11224354:610593814&0&0 + 20365333:405729388&7&0 + 38385044:451635099&3&0 + 12537680:869375833&1&0 + 86184757:353207740&5&0 + 28581534:230540665&0&0 + 87846747:53527409&1&0 + 94415205:458274448&1&0 + 13446013:369194742&3&0 + 4081641:44037476&1&0 + 19606849:453493174&4&0 + 63190338:539555088&3&0 + 14377435:246795498&2&0 + 3598178:774609763&1&0 + 74176864:6819931&4&0 + 34669023:392314594&3&0 + 53184255:926137075&5&0 + 56122919:43052528&0&0 + 78272570:576029226&0&0 + 43596961:420778248&5&0 + 67003817:938806399&1&0 + 73545484:102151889&2&0 + 48218524:204265440&4&0 + 70734204:99428613&1&0 + 73871399:826196232&3&0 + 61460149:528663121&5&0 + 53184255:926137075&6&0 + 89633329:116921545&1&0 + 24176974:995328674&1&0 + 47528821:471052035&7&0 + 984049:483901572&0&0 + 29676985:956475462&1&0 + 52989796:367944781&3&0 + 15819909:459944650&1&0 + 88080713:672772877&4&0 + 5824912:308260858&3&0 + 52754617:121414498&3&0 + 94628364:116475216&8&0 + 63271831:810442761&6&0 + 80722375:560147575&8&0 + 94493667:576440991&2&0 + 52604035:297082894&5&0 + 60200852:979593575&3&0 + 88280032:964183211&1&0 + 60811505:393000005&0&0 + 57399441:574346626&4&0 + 91975281:465006911&2&0 + 53324128:609320531&6&0 + 30313579:821675567&1&0 + 20516273:952298447&1&0 + 18071841:977748881&1&0 + 81365760:76386875&1&0 + 39344453:2844854&3&0 + 53307310:462486717&4&0 + 45889263:861295166&3&0 + 98628510:519132078&5&0 + 96695706:704933404&0&0 + 52515016:412430656&8&0 + 32683211:278821282&7&0 + 22449547:21814022&1&0 + 7598529:92723566&6&0 + 14981801:6984071&0&0 + 91938100:198737700&3&0 + 34172870:34623997&4&0 + 34051254:146783427&7&0 + 75974979:580480563&0&0 + 98165980:787776889&2&0 + 51246491:77865810&1&0 + 73545484:102151889&1&0 + 40201271:222337229&1&0 + 52392849:874508459&4&0 + 39395160:218721735&6&0 + 53987158:596068559&0&0 + 49586042:371895777&1&0 + 89771981:701416033&1&0 + 20128250:722747907&5&0 + 76735123:941678155&0&0 + 34415669:157985628&4&0 + 90481479:287890425&4&0 + 80715881:581362471&8&0 + 24951969:864685429&0&0 + 80013650:102651723&4&0 + 7857675:28742616&8&0 + 17011630:240212798&3&0 + 89567267:335873132&4&0 + 14377435:246795498&7&0 + 10198934:426173913&4&0 + 46808018:336582676&4&0 + 85995297:256528459&0&0 + 56355057:984298232&5&0 + 82321678:269000813&5&0 + 10489572:102857792&0&0 + 73545484:102151889&0&0 + 5979461:117921763&5&0 + 87709874:450837682&1&0 + 55391319:856517156&0&0 + 21088777:580911428&1&0 + 51712898:317666207&2&0 + 52989796:367944781&1&0 + 53412846:338117810&5&0 + 54051774:64602469&3&0 + 52604035:297082894&4&0 + 58116832:472277749&1&0 + 9958572:479315549&0&0 + 10912081:299073308&4&0 + 49063958:681745900&6&0 + 12489037:588374360&1&0 + 56184893:691229767&3&0 + 94628364:116475216&6&0 + 3598178:774609763&2&0 + 30029539:567591478&2&0 + 65095917:343472350&7&0 + 36580982:275911708&0&0 + 89633329:116921545&4&0 + 97106732:300324714&3&0 + 85222108:614487320&1&0 + 31490945:628212703&3&0 + 97771363:538876948&0&0 + 50406025:420292395&0&0 + 3473124:197367451&2&0 + 18737604:340823539&3&0 + 50332413:946028543&1&0 + 26207377:652343612&0&0 + 37142307:404379547&3&0 + 53307310:462486717&5&0 + 53307310:462486717&3&0 + 79670318:631438367&2&0 + 69855691:594661188&2&0 + 72236572:366846956&6&0 + 58013677:80506859&1&0 + 7449595:999493580&6&0 + 83950070:415419733&4&0 + 49235941:65252136&3&0 + 53442228:374108430&8&0 + 73042265:446129524&1&0 + 50602025:559678799&2&0 + 4361610:538004056&1&0 + 15143259:558664371&5&0 + 76711123:726781890&3&0 + 41381137:711848921&4&0 + 98101480:644789386&3&0 + 42984731:930599485&3&0 + 38526768:604092377&7&0 + 74723689:42235917&2&0 + 39528031:852171322&0&0 + 13956173:536937059&4&0 + 98666811:206440428&4&0 + 34669023:392314594&0&0 + 51615122:306136635&3&0 + 49228676:90120193&7&0 + 53337305:527156895&5&0 + 67473173:282206995&4&0 + 95623061:999526611&6&0 + 65285717:244145031&1&0 + 15600072:946000325&6&0 + 17307475:921652843&2&0 + 23590940:870866638&1&0 + 49691185:843161575&0&0 + 87443245:251462366&7&0 + 7540821:697278098&2&0 + 91717739:892891474&3&0 + 47996608:352927170&5&0 + 59958151:354869725&1&0 + 64836378:62569296&0&0 + 25354103:499890095&3&0 + 47695679:473571694&4&0 + 91593406:852651754&0&0 + 24176974:995328674&4&0 + 64836378:62569296&3&0 + 26246486:906412836&4&0 + 7857675:28742616&7&0 + 53087924:447531474&0&0 + 86930254:8171039&2&0 + 603534:903878232&4&0 + 59322751:797251769&3&0 + 46789695:572165705&1&0 + 68652349:438144715&2&0 + 23038701:513125022&7&0 + 76735123:941678155&2&0 + 30520570:686353374&1&0 + 72311045:525053734&1&0 + 24346272:848320174&2&0 + 91612396:751983860&2&0 + 30520570:686353374&4&0 + 37142307:404379547&4&0 + 76735123:941678155&5&0 + 18134299:678983610&3&0 + 76661806:9321473&0&0 + 55523564:759342342&4&0 + 90861661:377164183&5&0 + 77608268:998904048&1&0 + 95089663:763144708&3&0 + 18925324:658192454&0&0 + 14451740:824520616&6&0 + 93121454:535538144&0&0 + 39268836:42129101&1&0 + 43596961:420778248&6&0 + 36198277:46533019&3&0 + 1173145:804017165&1&0 + 50602025:559678799&0&0 + 81756244:76609808&4&0 + 12498479:749511858&6&0 + 32683211:278821282&4&0 + 63396630:287945650&2&0 + 88267404:152780511&3&0 + 67791119:50931034&3&0 + 5824912:308260858&1&0 + 40403702:309013849&3&0 + 61328978:899602754&1&0 + 99815723:590977381&8&0 + 49297202:349828703&0&0 + 51712898:317666207&1&0 + 24831969:679139335&0&0 + 4361610:538004056&2&0 + 31289263:569563214&3&0 + 98376692:994240422&1&0 + 83950070:415419733&8&0 + 49454891:478453187&0&0 + 43582325:124694496&5&0 + 92388719:152303867&5&0 + 6378094:493422886&5&0 + 29876640:14470514&3&0 + 7940265:732052199&2&0 + 30764731:669089127&0&0 + 90536414:547747038&0&0 + 7235585:978931534&1&0 + 394890:702464926&6&0 + 89567267:335873132&5&0 + 17565416:593803371&3&0 + 93565100:601725646&0&0 + 62053396:88696472&4&0 + 69000118:268012364&0&0 + 51058391:273493813&0&0 + 6942420:268694617&3&0 + 43010149:109870747&1&0 + 79010859:477622599&0&0 + 67651365:715596256&1&0 + 46808018:336582676&7&0 + 18822093:257648120&1&0 + 2895362:274278227&0&0 + 72424811:659534862&7&0 + 26040231:635673194&1&0 + 88267404:152780511&5&0 + 47912996:669660571&1&0 + 92909646:3930968&2&0 + 39268836:42129101&4&0 + 54877703:634079928&3&0 + 27178729:695470776&2&0 + 11138744:261195763&4&0 + 79077778:891358269&0&0 + 53184255:926137075&0&0 + 70408006:711179145&3&0 + 39496198:945080868&7&0 + 39436817:570472461&1&0 + 43198439:662808461&5&0 + 72424811:659534862&2&0 + 87690282:479248800&0&0 + 49604511:248139084&6&0 + 25332707:717566292&4&0 + 53442228:374108430&1&0 + 71440934:544568790&1&0 + 91464935:321828651&5&0 + 71198215:541233157&7&0 + 13747057:28246965&2&0 + 49691185:843161575&1&0 + 54714557:119789008&2&0 + 34051254:146783427&3&0 + 72063454:558851416&6&0 + 82321678:269000813&4&0 + 86930254:8171039&5&0 + 30520570:686353374&0&0 + 81320605:90473249&6&0 + 35034292:66740643&1&0 + 31044545:3085923&2&0 + 73570584:509185366&4&0 + 60399439:867611170&4&0 + 58780166:97498602&3&0 + 25918106:961366538&2&0 + 86752384:691090977&0&0 + 17011630:240212798&4&0 + 62344657:852184446&1&0 + 29676985:956475462&6&0 + 14377435:246795498&4&0 + 82126941:34276294&0&0 + 87314087:334750260&2&0 + 97792263:695076704&4&0 + 55065825:57247164&0&0 + 89778255:397729291&6&0 + 10718440:862268826&5&0 + 61627279:33833788&0&0 + 90536414:547747038&3&0 + 45325818:757845469&6&0 + 54552902:217188624&3&0 + 49154239:880099697&2&0 + 33493258:513726334&4&0 + 63190338:539555088&4&0 + 3510889:744096151&0&0 + 73042265:446129524&0&0 + 21012542:575718479&2&0 + 82292279:521629961&1&0 + 89357225:96714883&3&0 + 85671473:899588745&3&0 + 60064285:524769913&2&0 + 30894037:75955676&2&0 + 54446911:376658051&3&0 + 25272781:737704078&1&0 + 38586569:857193927&0&0 + 68544258:743179312&1&0 + 67473173:282206995&2&0 + 99794978:483568330&5&0 + 35034292:66740643&2&0 + 8703246:828088125&3&0 + 61460149:528663121&2&0 + 74529648:589538823&2&0 + 78134554:409055510&0&0 + 3532053:557574456&2&0 + 81974220:715744766&6&0 + 87706904:194426816&0&0 + 80722375:560147575&2&0 + 57399441:574346626&0&0 + 50357:36778149&0&0 + 28481574:481670154&0&0 + 27170244:552163830&0&0 + 43198723:303923997&0&0 + 49148247:892059898&5&0 + 24172877:139409016&6&0 + 24176974:995328674&8&0 + 87314087:334750260&7&0 + 25272781:737704078&5&0 + 81210421:313085869&3&0 + 87863917:656602628&1&0 + 24097418:205489992&0&0 + 20826439:301881016&0&0 + 53987158:596068559&1&0 + 55523564:759342342&1&0 + 3635379:230692518&3&0 + 99842243:942096645&5&0 + 45325818:757845469&3&0 + 55910578:615174414&0&0 + 92305231:289072305&0&0 + 47757850:199707874&1&0 + 11849083:752814633&4&0 + 64836378:62569296&2&0 + 86808504:337494125&6&0 + 53029602:150785718&1&0 + 31490945:628212703&2&0 + 56222762:962632857&1&0 + 39811033:979896270&2&0 + 25773355:432246245&1&0 + 56618482:86658284&0&0 + 10221086:708109010&2&0 + 14468397:543502230&1&0 + 34839256:826322060&0&0 + 56429317:209917005&0&0 + 6378094:493422886&3&0 + 74812757:886509217&6&0 + 26138914:698492330&2&0 + 63190338:539555088&2&0 + 11138744:261195763&5&0 + 31044545:3085923&0&0 + 30357532:115565152&3&0 + 44485622:208425683&2&0 + 42984731:930599485&0&0 + 8703246:828088125&4&0 + 96071626:992715291&3&0 + 40403702:309013849&5&0 + 80952875:582053915&0&0 + 67791119:50931034&5&0 + 39395160:218721735&5&0 + 62416945:197441685&3&0 + 74812757:886509217&1&0 + 94628364:116475216&1&0 + 44953983:866115077&6&0 + 2931488:604747700&0&0 + 25773355:432246245&5&0 + 83950070:415419733&2&0 + 61678898:882739587&1&0 + 13747057:28246965&7&0 + 47528821:471052035&5&0 + 67523238:697508437&1&0 + 1476651:489417849&2&0 + 83950070:415419733&3&0 + 54045147:80242617&4&0 + 69109134:798600716&2&0 + 74529648:589538823&3&0 + 95623061:999526611&4&0 + 63271831:810442761&4&0 + 76683504:74173528&0&0 + 11849083:752814633&3&0 + 4100953:608638082&6&0 + 34651805:97557080&4&0 + 70408006:711179145&5&0 + 89695712:867781539&3&0 + 26138914:698492330&4&0 + 2613166:672964070&2&0 + 59025297:456814031&0&0 + 15912561:356560921&2&0 + 66630681:350186035&3&0 + 44485622:208425683&5&0 + 37703625:857959686&4&0 + 84396847:258564327&1&0 + 10262184:160718981&0&0 + 39344453:2844854&0&0 + 65362834:380490486&7&0 + 984049:483901572&4&0 + 36586454:785680337&4&0 + 30452630:493217909&1&0 + 79645721:394251089&3&0 + 85829140:808603613&3&0 + 44782292:24646573&3&0 + 73570584:509185366&2&0 + 5590704:242473685&0&0 + 24362643:580293325&3&0 + 61342731:246236836&0&0 + 6378094:493422886&7&0 + 11558628:887403567&3&0 + 56056086:479286098&4&0 + 82631241:215531913&5&0 + 5618809:197855825&3&0 + 66989301:823245775&0&0 + 26801831:876221698&4&0 + 49240476:669690989&2&0 + 40201271:222337229&2&0 + 97380991:698920611&3&0 + 2613166:672964070&1&0 + 93758746:858902746&3&0 + 85829140:808603613&1&0 + 14468397:543502230&3&0 + 2052336:295787981&6&0 + 12489037:588374360&0&0 + 82762712:894811689&5&0 + 81210421:313085869&0&0 + 80715881:581362471&2&0 + 13956173:536937059&1&0 + 74812757:886509217&2&0 + 35995149:297934460&2&0 + 62416945:197441685&0&0 + 23904648:376661712&0&0 + 91717739:892891474&0&0 + 34051254:146783427&4&0 + 24176974:995328674&3&0 + 66253237:965950612&0&0 + 10912081:299073308&8&0 + 50332413:946028543&0&0 + 31213691:499131596&1&0 + 23038701:513125022&3&0 + 91604138:549050785&0&0 + 56524920:276851159&3&0 + 98101480:644789386&5&0 + 39738078:45613849&5&0 + 49148247:892059898&2&0 + 89633329:116921545&3&0 + 96502711:831176956&0&0 + 13678402:447525740&4&0 + 103672:805856166&1&0 + 78330832:171496180&0&0 + 30520570:686353374&2&0 + 15600072:946000325&7&0 + 61342731:246236836&6&0 + 63607488:414440767&2&0 + 46808018:336582676&6&0 + 72311045:525053734&3&0 + 99815723:590977381&1&0 + 99815723:590977381&0&0 + 25928233:208833681&0&0 + 42984731:930599485&5&0 + 11849083:752814633&2&0 + 40515987:99439735&0&0 + 94427100:345106252&5&0 + 26246486:906412836&6&0 + 3532053:557574456&1&0 + 41711686:119124692&1&0 + 86752384:691090977&2&0 + 65095917:343472350&4&0 + 58736725:737810966&1&0 + 38586569:857193927&4&0 + 63607488:414440767&5&0 + 71198215:541233157&6&0 + 42984731:930599485&2&0 + 47510292:774149565&0&0 + 79126099:855166627&4&0 + 94643809:143128836&0&0 + 79126099:855166627&0&0 + 26214434:119220927&4&0 + 53412846:338117810&4&0 + 16439035:301098169&4&0 + 39436817:570472461&2&0 + 82321822:968672172&3&0 + 99794978:483568330&4&0 + 9546976:734325171&0&0 + 47528821:471052035&1&0 + 4100953:608638082&3&0 + 34415669:157985628&0&0 + 43010149:109870747&4&0 + 6378094:493422886&1&0 + 5979461:117921763&6&0 + 65095917:343472350&6&0 + 72044029:940353344&3&0 + 52392849:874508459&1&0 + 8429722:117818296&2&0 + 30452630:493217909&0&0 + 80013650:102651723&2&0 + 14377435:246795498&1&0 + 8478903:846023875&1&0 + 37112930:282552949&1&0 + 68498691:682911916&0&0 + 13940912:909214727&6&0 + 43538946:567733508&0&0 + 49604511:248139084&4&0 + 22449547:21814022&0&0 + 37218570:381618017&7&0 + 10912081:299073308&0&0 + 6775183:346361619&2&0 + 46789695:572165705&6&0 + 81320605:90473249&7&0 + 58116832:472277749&0&0 + 80653443:480708149&0&0 + 93758746:858902746&0&0 + 64015693:886848702&5&0 + 97106732:300324714&0&0 + 57906673:134380185&2&0 + 49297202:349828703&7&0 + 18582816:444495921&0&0 + 28481574:481670154&1&0 + 57562731:820469772&7&0 + 79670318:631438367&8&0 + 38586569:857193927&2&0 + 34651805:97557080&2&0 + 86184757:353207740&7&0 + 80255325:237774223&2&0 + 49297202:349828703&5&0 + 1476651:489417849&3&0 + 14377435:246795498&3&0 + 70408006:711179145&4&0 + 66272075:852533061&4&0 + 75974979:580480563&1&0 + 98101480:644789386&7&0 + 86930254:8171039&0&0 + 53808293:694574130&1&0 + 50128363:284850957&4&0 + 30452630:493217909&3&0 + 54045147:80242617&0&0 + 47109679:5652390&7&0 + 35432003:282835475&4&0 + 22084553:161228022&2&0 + 71198215:541233157&3&0 + 93830543:355644973&5&0 + 53324128:609320531&0&0 + 92388719:152303867&0&0 + 59400525:37380364&4&0 + 15143259:558664371&2&0 + 56931252:531726666&0&0 + 27377446:716188923&3&0 + 90481479:287890425&3&0 + 56618482:86658284&3&0 + 83224862:719102167&3&0 + 81365760:76386875&5&0 + 27491562:354199743&2&0 + 97106732:300324714&4&0 + 77408000:80794786&3&0 + 6378094:493422886&8&0 + 45767543:834840580&5&0 + 67523238:697508437&3&0 + 73267864:103518361&1&0 + 49240476:669690989&8&0 + 14509050:708282746&1&0 + 98101480:644789386&8&0 + 73570584:509185366&8&0 + 33407839:558615386&0&0 + 10468585:807596921&0&0 + 84896287:714382773&0&0 + 59761096:747786818&7&0 + 49063958:681745900&3&0 + 73183888:803569686&1&0 + 84647254:861531665&5&0 + 47771961:238328621&4&0 + 53302195:957822333&3&0 + 45362101:919291375&0&0 + 65282844:748512676&0&0 + 94628364:116475216&2&0 + 20516273:952298447&3&0 + 99833532:500564025&0&0 + 23038701:513125022&5&0 + 44228044:620251323&1&0 + 97270881:91385809&6&0 + 49240476:669690989&7&0 + 95623061:999526611&3&0 + 72423404:42352706&0&0 + 37991815:504720419&3&0 + 93121454:535538144&3&0 + 24362643:580293325&5&0 + 47109679:5652390&0&0 + 90075179:164776514&1&0 + 52469998:590911777&4&0 + 40201271:222337229&0&0 + 59850985:154711861&1&0 + 91717739:892891474&6&0 + 29676985:956475462&0&0 + 43375304:628445689&6&0 + 98868935:587384485&0&0 + 12313712:542956627&5&0 + 73936828:572258359&1&0 + 87846747:53527409&5&0 + 54045147:80242617&2&0 + 72275014:131214035&0&0 + 26138914:698492330&0&0 + 4361610:538004056&0&0 + 75009229:50085413&2&0 + 44881433:220990270&5&0 + 29486221:352891995&4&0 + 33513632:650694600&0&0 + 99815723:590977381&3&0 + 12779856:957438307&1&0 + 75974979:580480563&2&0 + 13678402:447525740&1&0 + 7449595:999493580&1&0 + 30894037:75955676&4&0 + 78086494:834446809&0&0 + 53412846:338117810&0&0 + 10973372:252564849&3&0 + 13940912:909214727&3&0 + 38862054:672901032&0&0 + 394890:702464926&1&0 + 13956173:536937059&3&0 + 43827047:376424684&1&0 + 47109679:5652390&4&0 + 79284588:443146164&7&0 + 50831868:929132209&5&0 + 98124860:878043876&7&0 + 62256323:763647343&4&0 + 63689658:94830853&1&0 + 4506699:932037770&1&0 + 13678402:447525740&3&0 + 83950070:415419733&0&0 + 52867992:64445255&2&0 + 56222762:962632857&4&0 + 20365333:405729388&8&0 + 25332707:717566292&3&0 + 40977332:868266272&4&0 + 65362834:380490486&2&0 + 44782292:24646573&2&0 + 47695679:473571694&7&0 + 32118984:867936417&7&0 + 89961816:386494093&1&0 + 40515987:99439735&8&0 + 73443745:72138644&2&0 + 3532053:557574456&6&0 + 55837993:14495058&7&0 + 31289263:569563214&1&0 + 55837993:14495058&2&0 + 7598529:92723566&3&0 + 96275276:134838908&2&0 + 28481574:481670154&7&0 + 9546976:734325171&3&0 + 85995297:256528459&5&0 + 72834250:466779877&0&0 + 87846747:53527409&2&0 + 25332707:717566292&0&0 + 82631241:215531913&1&0 + 63772983:913544890&2&0 + 10973372:252564849&1&0 + 79670318:631438367&3&0 + 5618809:197855825&4&0 + 33493258:513726334&3&0 + 6716617:307347947&2&0 + 51622589:556434188&6&0 + 72073917:875549213&0&0 + 96502711:831176956&4&0 + 27417896:531683550&8&0 + 83022428:944078824&3&0 + 70858651:854922152&1&0 + 18822093:257648120&3&0 + 30484610:339944899&6&0 + 13678402:447525740&8&0 + 52469998:590911777&6&0 + 64015693:886848702&8&0 + 83705003:798441379&3&0 + 36999708:558428301&0&0 + 63190338:539555088&0&0 + 54288911:58962140&7&0 + 25918106:961366538&5&0 + 4002043:406702190&4&0 + 12537680:869375833&2&0 + 79284588:443146164&2&0 + 15912561:356560921&1&0 + 38526768:604092377&3&0 + 43375304:628445689&2&0 + 80952875:582053915&4&0 + 30797047:115487974&5&0 + 22066498:596848252&0&0 + 77608268:998904048&0&0 + 8703246:828088125&2&0 + 71772628:879293175&3&0 + 53087924:447531474&4&0 + 17095008:149761520&3&0 + 15399094:948770580&2&0 + 94276106:262453525&0&0 + 603534:903878232&3&0 + 59025297:456814031&5&0 + 83022428:944078824&0&0 + 24378512:643438192&8&0 + 98970477:148387964&0&0 + 87443245:251462366&0&0 + 3510889:744096151&1&0 + 83393462:733714979&0&0 + 16946277:290208549&0&0 + 21088777:580911428&4&0 + 79670318:631438367&6&0 + 56184893:691229767&4&0 + 73871399:826196232&5&0 + 96502711:831176956&1&0 + 7256242:251275077&0&0 + 68501344:633483542&1&0 + 80722375:560147575&7&0 + 94055752:200828895&2&0 + 88267404:152780511&0&0 + 64836378:62569296&1&0 + 11736920:293374434&1&0 + 81756244:76609808&5&0 + 13940912:909214727&0&0 + 69030160:685229886&2&0 + 44449704:961196104&3&0 + 35121596:578268086&1&0 + 10973372:252564849&0&0 + 18134299:678983610&0&0 + 50406025:420292395&1&0 + 49398852:890280265&4&0 + 63607488:414440767&4&0 + 10211847:108678674&4&0 + 7857675:28742616&1&0 + 54480971:66664018&1&0 + 52979381:920137846&0&0 + 63396630:287945650&0&0 + 63931155:270819007&1&0 + 58780166:97498602&5&0 + 53184255:926137075&4&0 + 44228044:620251323&0&0 + 38828138:75574381&6&0 + 15143259:558664371&0&0 + 15912561:356560921&5&0 + 40403702:309013849&6&0 + 78330832:171496180&4&0 + 95203911:601016938&1&0 + 78086494:834446809&5&0 + 16436965:771028762&0&0 + 17172949:95409758&7&0 + 47996608:352927170&3&0 + 56868152:640567118&0&0 + 36586454:785680337&2&0 + 11558628:887403567&4&0 + 36222496:482298410&5&0 + 36999708:558428301&4&0 + 81756244:76609808&7&0 + 26040231:635673194&0&0 + 69109134:798600716&3&0 + 60064285:524769913&3&0 + 95041258:745948755&3&0 + 96275276:134838908&3&0 + 20365333:405729388&0&0 + 7449595:999493580&3&0 + 84834024:841200594&2&0 + 17172949:95409758&1&0 + 66272075:852533061&5&0 + 67651365:715596256&3&0 + 49454891:478453187&1&0 + 24097418:205489992&5&0 + 56618482:86658284&4&0 + 27377446:716188923&0&0 + 3473124:197367451&1&0 + 42209307:442034417&2&0 + 56868152:640567118&4&0 + 10211847:108678674&1&0 + 95200174:697000883&4&0 + 79126099:855166627&3&0 + 81476270:156567648&2&0 + 98970477:148387964&1&0 + 54446911:376658051&4&0 + 82292279:521629961&0&0 + 61438049:706145983&0&0 + 49297202:349828703&4&0 + 13446013:369194742&5&0 + 40403702:309013849&4&0 + 72423404:42352706&1&0 + 39395160:218721735&4&0 + 59850985:154711861&3&0 + 26129164:375963359&0&0 + 59761096:747786818&6&0 + 89633329:116921545&5&0 + 80442241:270231675&5&0 + 2478110:466744145&0&0 + 94427100:345106252&4&0 + 14859108:184675170&4&0 + 24831969:679139335&2&0 + 84396847:258564327&0&0 + 90508784:978748912&6&0 + 45423397:984954387&0&0 + 56524920:276851159&6&0 + 63729697:265246888&1&0 + 98124860:878043876&6&0 + 17172949:95409758&0&0 + 72073917:875549213&4&0 + 69000118:268012364&7&0 + 17011630:240212798&5&0 + 53324128:609320531&4&0 + 84647254:861531665&3&0 + 79284588:443146164&3&0 + 38828138:75574381&3&0 + 26774964:953585124&3&0 + 31044545:3085923&3&0 + 80952875:582053915&6&0 + 98124860:878043876&5&0 + 63853711:880273408&3&0 + 39344453:2844854&6&0 + 64738196:397211282&2&0 + 71198215:541233157&2&0 + 71407149:552668478&2&0 + 1476651:489417849&4&0 + 63190338:539555088&1&0 + 71198215:541233157&4&0 + 93565100:601725646&2&0 + 7504010:840190292&0&0 + 51622589:556434188&7&0 + 95200174:697000883&2&0 + 90508784:978748912&8&0 + 93887629:500769389&5&0 + 53184255:926137075&3&0 + 90481479:287890425&6&0 + 85671473:899588745&0&0 + 62256323:763647343&2&0 + 72275014:131214035&1&0 + 94276620:842637751&2&0 + 97380991:698920611&5&0 + 36653790:724513128&0&0 + 94276106:262453525&3&0 + 24172877:139409016&4&0 + 82631241:215531913&4&0 + 60080178:449210772&3&0 + 84827871:935078132&1&0 + 9958572:479315549&2&0 + 86157015:284717318&0&0 + 92316363:939734067&3&0 + 67651365:715596256&4&0 + 78086494:834446809&1&0 + 43582325:124694496&1&0 + 72311045:525053734&5&0 + 89357225:96714883&1&0 + 82762712:894811689&6&0 + 37991815:504720419&7&0 + 34310096:11542324&2&0 + 67473173:282206995&0&0 + 88080713:672772877&3&0 + 49148247:892059898&8&0 + 61008441:281928562&1&0 + 63190338:539555088&8&0 + 56056086:479286098&3&0 + 2804255:949350001&1&0 + 46324975:493014007&0&0 + 13678402:447525740&5&0 + 61472082:859670216&3&0 + 90141696:521979907&2&0 + 13678402:447525740&0&0 + 20128250:722747907&3&0 + 54714557:119789008&3&0 + 37703625:857959686&3&0 + 51622589:556434188&1&0 + 14404891:512198282&1&0 + 63689658:94830853&3&0 + 33513632:650694600&1&0 + 44560329:295420442&1&0 + 81490586:118219082&1&0 + 14468397:543502230&4&0 + 33796722:672149182&4&0 + 57004853:103522308&0&0 + 75964724:178394015&7&0 + 35995149:297934460&1&0 + 80427722:479482848&0&0 + 63772983:913544890&3&0 + 45325818:757845469&0&0 + 12442907:500345555&3&0 + 85058382:571466364&3&0 + 76113575:707318346&0&0 + 89778255:397729291&7&0 + 19606849:453493174&3&0 + 9661309:520108364&6&0 + 81181921:284731810&3&0 + 43503868:754674599&0&0 + 74176864:6819931&2&0 + 84904995:767743372&5&0 + 18822093:257648120&0&0 + 79284588:443146164&6&0 + 65095917:343472350&0&0 + 34310096:11542324&1&0 + 52754617:121414498&4&0 + 55523564:759342342&6&0 + 86808504:337494125&5&0 + 13747057:28246965&5&0 + 68652349:438144715&1&0 + 24951969:864685429&3&0 + 77396083:265361724&1&0 + 13678402:447525740&2&0 + 61438049:706145983&1&0 + 63396630:287945650&3&0 + 97771363:538876948&1&0 + 64482738:623825544&0&0 + 63689658:94830853&6&0 + 327089:61498742&0&0 + 97106732:300324714&2&0 + 52604035:297082894&1&0 + 97184810:598826627&7&0 + 43195723:554863413&2&0 + 89633329:116921545&2&0 + 50482922:317040887&4&0 + 84288135:229207595&0&0 + 22437586:91331933&0&0 + 72311045:525053734&2&0 + 75992672:750979372&0&0 + 72424811:659534862&0&0 + 18071841:977748881&3&0 + 23333846:575959031&7&0 + 47748671:588774578&0&0 + 47510292:774149565&4&0 + 11138744:261195763&1&0 + 81756244:76609808&0&0 + 4506699:932037770&3&0 + 80845792:337623149&1&0 + 94427100:345106252&1&0 + 24378512:643438192&1&0 + 59761096:747786818&4&0 + 95634901:59063091&2&0 + 12313712:542956627&2&0 + 28554934:5179969&4&0 + 87443245:251462366&3&0 + 65744787:251216899&0&0 + 53087924:447531474&3&0 + 31966866:496175072&5&0 + 17011630:240212798&7&0 + 46996637:598107124&1&0 + 74723689:42235917&0&0 + 61342731:246236836&2&0 + 95203911:601016938&5&0 + 52774319:934997659&3&0 + 98376692:994240422&0&0 + 70734204:99428613&0&0 + 82292279:521629961&3&0 + 74789224:922087904&6&0 + 49154239:880099697&4&0 + 47695679:473571694&1&0 + 61342731:246236836&7&0 + 28481574:481670154&5&0 + 55910578:615174414&1&0 + 54383211:453535299&3&0 + 39811033:979896270&0&0 + 52604035:297082894&0&0 + 93979834:814182899&0&0 + 34172870:34623997&3&0 + 63931155:270819007&4&0 + 84904995:767743372&3&0 + 87690282:479248800&1&0 + 18737604:340823539&2&0 + 87846747:53527409&8&0 + 86451448:74852495&2&0 + 82631241:215531913&6&0 + 39811033:979896270&3&0 + 14318176:591914522&2&0 + 46996637:598107124&0&0 + 28497425:257098687&0&0 + 58163826:505032651&1&0 + 42099671:567889019&2&0 + 56429317:209917005&1&0 + 85733564:663876045&2&0 + 83022428:944078824&2&0 + 13446013:369194742&1&0 + 38358056:60833715&0&0 + 52754617:121414498&5&0 + 81974220:715744766&7&0 + 33407839:558615386&1&0 + 92388719:152303867&4&0 + 24383850:838434080&7&0 + 11714905:23536608&2&0 + 72077429:214721180&0&0 + 34172870:34623997&5&0 + 82631241:215531913&2&0 + 53307310:462486717&2&0 + 93146317:466673460&1&0 + 51381452:363401409&1&0 + 75152068:785226902&0&0 + 24378512:643438192&4&0 + 98124860:878043876&1&0 + 31289263:569563214&2&0 + 61472082:859670216&5&0 + 92716568:612076183&0&0 + 40977332:868266272&0&0 + 38586569:857193927&1&0 + 47748671:588774578&2&0 + 3605565:131483740&2&0 + 15819909:459944650&6&0 + 65362834:380490486&8&0 + 93146317:466673460&0&0 + 103672:805856166&4&0 + 30313579:821675567&2&0 + 96071626:992715291&0&0 + 81756244:76609808&1&0 + 30357532:115565152&2&0 + 90075179:164776514&5&0 + 91935567:565698614&3&0 + 24378512:643438192&0&0 + 3510889:744096151&5&0 + 2895362:274278227&2&0 + 67791119:50931034&0&0 + 56910010:652586984&2&0 + 20365333:405729388&1&0 + 32118984:867936417&5&0 + 43582325:124694496&6&0 + 53184255:926137075&2&0 + 9661309:520108364&7&0 + 17711727:259518441&3&0 + 7256242:251275077&3&0 + 56931252:531726666&1&0 + 94812560:257774709&4&0 + 27051694:2092817&4&0 + 73443745:72138644&1&0 + 35034292:66740643&4&0 + 99842243:942096645&7&0 + 7857675:28742616&5&0 + 67651365:715596256&2&0 + 40443723:938389955&4&0 + 49235941:65252136&2&0 + 98961310:319784340&1&0 + 49154239:880099697&1&0 + 24363729:931822610&0&0 + 24383850:838434080&2&0 + 17095008:149761520&5&0 + 96502711:831176956&2&0 + 84965216:463802260&5&0 + 95470049:537113142&2&0 + 24831969:679139335&4&0 + 79312448:119747799&0&0 + 49691185:843161575&2&0 + 30520570:686353374&5&0 + 44560329:295420442&2&0 + 63271831:810442761&2&0 + 69000118:268012364&1&0 + 75076890:389362&2&0 + 31213691:499131596&3&0 + 90053119:548085600&1&0 + 94812560:257774709&1&0 + 99842243:942096645&3&0 + 70734204:99428613&2&0 + 91612396:751983860&4&0 + 61008441:281928562&3&0 + 19606849:453493174&0&0 + 44881433:220990270&6&0 + 98124860:878043876&3&0 + 24217312:185633128&3&0 + 1812760:222467758&2&0 + 90053119:548085600&2&0 + 50482922:317040887&6&0 + 26774964:953585124&0&0 + 27417896:531683550&7&0 + 58627766:871327242&5&0 + 86184757:353207740&6&0 + 89961816:386494093&0&0 + 63931155:270819007&2&0 + 35483519:599366092&1&0 + 76113575:707318346&2&0 + 37218570:381618017&3&0 + 40515987:99439735&7&0 + 89188064:400686934&5&0 + 24217312:185633128&4&0 + 61472082:859670216&1&0 + 67113011:779026478&0&0 + 5590704:242473685&1&0 + 89778255:397729291&8&0 + 58736725:737810966&2&0 + 97762062:510060263&1&0 + 92792097:813125155&0&0 + 72073917:875549213&5&0 + 53029602:150785718&0&0 + 62416945:197441685&4&0 + 9320061:177336824&2&0 + 44598616:444589273&0&0 + 1173145:804017165&0&0 + 91938100:198737700&4&0 + 18071841:977748881&4&0 + 77408000:80794786&0&0 + 88280032:964183211&8&0 + 67206790:870511274&5&0 + 19704814:290603674&1&0 + 87443245:251462366&8&0 + 14451740:824520616&2&0 + 38526768:604092377&1&0 + 43538946:567733508&1&0 + 7025462:66414340&0&0 + 95203911:601016938&3&0 + 86184757:353207740&4&0 + 41272609:987790531&2&0 + 73871399:826196232&0&0 + 87709874:450837682&3&0 + 58121181:822669751&0&0 + 52515016:412430656&6&0 + 6986540:511897738&2&0 + 9000299:189622129&4&0 + 92539348:968187617&1&0 + 49454891:478453187&2&0 + 11584549:762656650&1&0 + 25918106:961366538&0&0 + 51615122:306136635&5&0 + 61460149:528663121&0&0 + 72828953:584713687&4&0 + 60013458:575282869&1&0 + 78661119:899233291&0&0 + 24363729:931822610&2&0 + 26306723:562780628&0&0 + 50482922:317040887&2&0 + 19645170:625184018&3&0 + 54051774:64602469&0&0 + 80722375:560147575&4&0 + 29676985:956475462&3&0 + 20516273:952298447&5&0 + 59322751:797251769&1&0 + 24172877:139409016&8&0 + 39212191:907959215&8&0 + 23719883:37149617&1&0 + 51622589:556434188&0&0 + 66630681:350186035&4&0 + 54383211:453535299&5&0 + 85733564:663876045&5&0 + 33882509:498489183&4&0 + 92909646:3930968&5&0 + 36789634:662824796&7&0 + 86451448:74852495&6&0 + 78134554:409055510&2&0 + 81181921:284731810&0&0 + 70408006:711179145&1&0 + 75009229:50085413&0&0 + 90034775:766779686&4&0 + 64015693:886848702&3&0 + 27051694:2092817&0&0 + 94276620:842637751&4&0 + 39212191:907959215&1&0 + 75992672:750979372&1&0 + 90075179:164776514&3&0 + 84965216:463802260&3&0 + 77396083:265361724&0&0 + 76735123:941678155&3&0 + 24098346:359515341&3&0 + 74881437:817126135&1&0 + 70289966:246764374&2&0 + 26040231:635673194&2&0 + 80845792:337623149&0&0 + 99794978:483568330&2&0 + 64532124:355047058&6&0 + 87709874:450837682&0&0 + 61132974:165535142&0&0 + 40977332:868266272&5&0 + 32118984:867936417&2&0 + 38586569:857193927&6&0 + 76113575:707318346&1&0 + 7857675:28742616&0&0 + 20516273:952298447&2&0 + 24172877:139409016&5&0 + 69855691:594661188&0&0 + 41272609:987790531&0&0 + 83452622:378063536&0&0 + 37728870:214734161&1&0 + 984049:483901572&1&0 + 59958151:354869725&4&0 + 43198723:303923997&1&0 + 34669023:392314594&2&0 + 56355057:984298232&2&0 + 26207377:652343612&2&0 + 45325818:757845469&4&0 + 794440:643333917&1&0 + 51622589:556434188&5&0 + 51932258:733052883&2&0 + 6378094:493422886&4&0 + 91938100:198737700&5&0 + 44228044:620251323&4&0 + 11757315:383214052&6&0 + 14404891:512198282&5&0 + 22004771:538942275&1&0 + 48066323:255817938&2&0 + 63345993:162207736&0&0 + 57906673:134380185&4&0 + 41272609:987790531&3&0 + 85829140:808603613&0&0 + 75964724:178394015&0&0 + 56122919:43052528&7&0 + 61933829:8127087&6&0 + 51615122:306136635&8&0 + 58013677:80506859&7&0 + 10718440:862268826&2&0 + 41736786:508203586&2&0 + 34172870:34623997&0&0 + 8703246:828088125&1&0 + 36094705:71328450&7&0 + 71655861:942015124&0&0 + 57562731:820469772&5&0 + 59400525:37380364&2&0 + 98165980:787776889&0&0 + 10718440:862268826&3&0 + 80427722:479482848&1&0 + 60399439:867611170&0&0 + 36999708:558428301&5&0 + 63729697:265246888&3&0 + 72834250:466779877&3&0 + 22084553:161228022&5&0 + 45957916:710921358&3&0 + 12019596:768955185&4&0 + 24176974:995328674&7&0 + 8348663:925846920&2&0 + 52606765:416027911&8&0 + 10718440:862268826&6&0 + 17172949:95409758&3&0 + 2804255:949350001&7&0 + 55523564:759342342&7&0 + 84998:65122273&1&0 + 26207377:652343612&4&0 + 78272570:576029226&5&0 + 90027761:755365112&0&0 + 63739032:657829661&0&0 + 74812757:886509217&0&0 + 88267404:152780511&1&0 + 77396083:265361724&4&0 + 37112930:282552949&2&0 + 72236572:366846956&5&0 + 52606765:416027911&7&0 + 83211644:742859029&4&0 + 49938088:419589252&1&0 + 20573922:978527726&4&0 + 93758746:858902746&4&0 + 43827047:376424684&2&0 + 45325818:757845469&1&0 + 60200852:979593575&0&0 + 62467260:744386147&2&0 + 29911452:391223956&4&0 + 73267864:103518361&2&0 + 36789634:662824796&1&0 + 56210307:581998616&0&0 + 52515016:412430656&3&0 + 72063454:558851416&7&0 + 66272075:852533061&2&0 + 60080178:449210772&6&0 + 26774964:953585124&5&0 + 53987158:596068559&3&0 + 17711727:259518441&5&0 + 44953983:866115077&7&0 + 57399441:574346626&3&0 + 28196376:955082045&0&0 + 28196376:955082045&4&0 + 83230378:699930271&0&0 + 75964724:178394015&1&0 + 61008441:281928562&5&0 + 5824912:308260858&0&0 + 90034775:766779686&1&0 + 3510889:744096151&2&0 + 75964724:178394015&4&0 + 34926578:601185182&0&0 + 78661119:899233291&4&0 + 13252039:525460275&0&0 + 81974220:715744766&4&0 + 12494935:932668955&4&0 + 50578643:849947573&3&0 + 63689658:94830853&4&0 + 49779369:62149871&0&0 + 36222496:482298410&6&0 + 39097823:700206507&0&0 + 90861661:377164183&4&0 + 54480971:66664018&0&0 + 40977332:868266272&1&0 + 43198934:335023352&1&0 + 39212191:907959215&2&0 + 73240167:974434349&0&0 + 44485622:208425683&4&0 + 49063958:681745900&0&0 + 11558628:887403567&5&0 + 39738078:45613849&4&0 + 62467260:744386147&1&0 + 7025462:66414340&7&0 + 7256242:251275077&4&0 + 20826439:301881016&1&0 + 74789224:922087904&3&0 + 69170173:300151437&2&0 + 56641137:834236527&2&0 + 40443723:938389955&0&0 + 603534:903878232&8&0 + 11007403:300063068&0&0 + 41736786:508203586&0&0 + 8429722:117818296&0&0 + 30839236:7551156&1&0 + 79621375:409080690&1&0 + 71198215:541233157&0&0 + 48218524:204265440&1&0 + 23719883:37149617&8&0 + 54383211:453535299&6&0 + 18134299:678983610&2&0 + 60064285:524769913&6&0 + 54127829:84354693&5&0 + 13969316:174981079&1&0 + 12498479:749511858&1&0 + 82321678:269000813&2&0 + 18932861:248967195&1&0 + 87953555:558848768&0&0 + 23719883:37149617&6&0 + 11736920:293374434&5&0 + 6716617:307347947&1&0 + 65095917:343472350&2&0 + 87479706:36726780&2&0 + 13940912:909214727&1&0 + 94276106:262453525&1&0 + 15600072:946000325&1&0 + 6740012:611323570&4&0 + 79126099:855166627&2&0 + 39212191:907959215&3&0 + 61627279:33833788&3&0 + 3510889:744096151&6&0 + 39600542:865492623&1&0 + 24383850:838434080&5&0 + 78272570:576029226&6&0 + 19645170:625184018&0&0 + 18737604:340823539&0&0 + 103672:805856166&3&0 + 49240476:669690989&0&0 + 14451740:824520616&3&0 + 6986540:511897738&5&0 + 6740012:611323570&3&0 + 1476651:489417849&1&0 + 48223600:703358029&0&0 + 90481479:287890425&7&0 + 40028054:319727191&0&0 + 88970662:25547912&1&0 + 39496198:945080868&0&0 + 79645721:394251089&2&0 + 81365760:76386875&2&0 + 14377435:246795498&6&0 + 69030160:685229886&0&0 + 24832388:811203391&1&0 + 63931155:270819007&5&0 + 51712898:317666207&6&0 + 48223600:703358029&2&0 + 50578643:849947573&0&0 + 58627766:871327242&6&0 + 93985935:867478104&1&0 + 72828953:584713687&3&0 + 94812560:257774709&3&0 + 94917052:478894685&2&0 + 99842243:942096645&0&0 + 13693527:745147566&2&0 + 49228676:90120193&4&0 + 72044029:940353344&2&0 + 47996608:352927170&7&0 + 36999708:558428301&3&0 + 29876640:14470514&5&0 + 29486221:352891995&2&0 + 84896287:714382773&3&0 + 40063900:620687205&4&0 + 26214434:119220927&6&0 + 93887629:500769389&1&0 + 49604511:248139084&5&0 + 31339622:94005024&1&0 + 65744787:251216899&5&0 + 65617926:204033633&1&0 + 17172949:95409758&5&0 + 64015693:886848702&0&0 + 70858651:854922152&3&0 + 14468074:476007880&4&0 + 16140571:981617709&0&0 + 38828138:75574381&2&0 + 6716617:307347947&3&0 + 54288911:58962140&1&0 + 7248112:180850711&2&0 + 61234255:771364363&0&0 + 33796722:672149182&0&0 + 79621375:409080690&0&0 + 20365333:405729388&3&0 + 16439035:301098169&1&0 + 90536414:547747038&1&0 + 78086494:834446809&4&0 + 32118984:867936417&3&0 + 23333846:575959031&3&0 + 25928233:208833681&1&0 + 38682489:357772603&0&0 + 67473173:282206995&3&0 + 39212191:907959215&6&0 + 30797047:115487974&1&0 + 78086494:834446809&3&0 + 79077778:891358269&4&0 + 63295000:105176636&2&0 + 26774964:953585124&2&0 + 26214434:119220927&5&0 + 19706683:118758656&6&0 + 2895362:274278227&1&0 + 99833532:500564025&3&0 + 39395160:218721735&0&0 + 84965216:463802260&4&0 + 16439035:301098169&2&0 + 25332707:717566292&1&0 + 93887629:500769389&0&0 + 24098346:359515341&7&0 + 40515987:99439735&4&0 + 84647254:861531665&2&0 + 63772983:913544890&1&0 + 40063900:620687205&5&0 + 14468397:543502230&5&0 + 52392849:874508459&3&0 + 19606849:453493174&5&0 + 25918106:961366538&3&0 + 43195723:554863413&0&0 + 31289263:569563214&4&0 + 36580982:275911708&4&0 + 39097823:700206507&1&0 + 11849083:752814633&6&0 + 10187671:981855580&0&0 + 35432003:282835475&3&0 + 2613166:672964070&3&0 + 49398852:890280265&5&0 + 40515987:99439735&5&0 + 47996608:352927170&0&0 + 11224354:610593814&3&0 + 64532124:355047058&3&0 + 11714905:23536608&0&0 + 70858651:854922152&5&0 + 33493258:513726334&1&0 + 61328978:899602754&5&0 + 80013650:102651723&8&0 + 24172877:139409016&3&0 + 81181921:284731810&1&0 + 94493667:576440991&4&0 + 9320061:177336824&1&0 + 66989301:823245775&4&0 + 72275014:131214035&5&0 + 36222496:482298410&3&0 + 13959204:581354194&0&0 + 10174974:120294830&0&0 + 93887629:500769389&4&0 + 39496198:945080868&3&0 + 60064285:524769913&1&0 + 37178590:132077611&0&0 + 46789695:572165705&2&0 + 70408006:711179145&7&0 + 4081641:44037476&2&0 + 63772983:913544890&0&0 + 54775618:458522638&0&0 + 73042265:446129524&4&0 + 99794978:483568330&1&0 + 15399094:948770580&1&0 + 52469998:590911777&5&0 + 98807218:361607752&6&0 + 89357225:96714883&6&0 + 80722375:560147575&0&0 + 91127077:413824363&1&0 + 19422185:559312438&2&0 + 51615122:306136635&7&0 + 30313579:821675567&0&0 + 96071626:992715291&2&0 + 88280032:964183211&3&0 + 49235941:65252136&4&0 + 43195723:554863413&8&0 + 94812560:257774709&0&0 + 45423397:984954387&6&0 + 17011630:240212798&6&0 + 35483519:599366092&3&0 + 41381137:711848921&2&0 + 56910010:652586984&0&0 + 35432003:282835475&0&0 + 4361610:538004056&5&0 + 80715881:581362471&7&0 + 45767543:834840580&3&0 + 86157015:284717318&5&0 + 55910578:615174414&2&0 + 74812757:886509217&5&0 + 3510889:744096151&4&0 + 97270881:91385809&8&0 + 65362834:380490486&0&0 + 6775183:346361619&3&0 + 30891037:703355240&1&0 + 47510292:774149565&3&0 + 51058391:273493813&2&0 + 72063454:558851416&3&0 + 7857675:28742616&3&0 + 12442907:500345555&1&0 + 50831868:929132209&4&0 + 30894037:75955676&3&0 + 60057868:952566551&1&0 + 2613166:672964070&4&0 + 58121181:822669751&1&0 + 42503311:427261511&1&0 + 74789224:922087904&4&0 + 86451448:74852495&1&0 + 80069366:584660319&1&0 + 81974220:715744766&0&0 + 98356898:511632245&3&0 + 81481199:950665955&2&0 + 77396083:265361724&3&0 + 39344453:2844854&5&0 + 68983075:619758957&0&0 + 3605565:131483740&1&0 + 25773355:432246245&6&0 + 93146317:466673460&3&0 + 84647254:861531665&4&0 + 89188064:400686934&6&0 + 47996608:352927170&1&0 + 45767543:834840580&4&0 + 43538946:567733508&2&0 + 54714557:119789008&5&0 + 31185492:727375067&1&0 + 12779856:957438307&5&0 + 10174974:120294830&1&0 + 98356898:511632245&5&0 + 50482922:317040887&1&0 + 44090675:343089231&1&0 + 980829:856644254&4&0 + 984049:483901572&7&0 + 47335432:985984618&5&0 + 44090675:343089231&2&0 + 47771961:238328621&5&0 + 54552902:217188624&2&0 + 980829:856644254&0&0 + 74881437:817126135&0&0 + 86718175:345640919&0&0 + 14509050:708282746&3&0 + 22084553:161228022&3&0 + 26040231:635673194&3&0 + 84965216:463802260&0&0 + 61472082:859670216&0&0 + 47912996:669660571&4&0 + 31289263:569563214&0&0 + 7449595:999493580&2&0 + 89567267:335873132&0&0 + 2507552:38350344&0&0 + 43375304:628445689&1&0 + 7540821:697278098&1&0 + 13446013:369194742&0&0 + 4081641:44037476&3&0 + 37471105:762756421&0&0 + 24310608:493410247&0&0 + 47109679:5652390&2&0 + 79010859:477622599&1&0 + 35390877:599176194&1&0 + 43582325:124694496&3&0 + 1812760:222467758&8&0 + 61234255:771364363&1&0 + 56222762:962632857&3&0 + 54051774:64602469&1&0 + 85671473:899588745&1&0 + 86184757:353207740&3&0 + 67238651:554304974&4&0 + 49228676:90120193&6&0 + 28481574:481670154&3&0 + 82913494:615212745&3&0 + 73545484:102151889&8&0 + 92539348:968187617&2&0 + 98628510:519132078&1&0 + 62044501:825326152&2&0 + 8703246:828088125&5&0 + 16946277:290208549&1&0 + 8077408:486944454&3&0 + 26246486:906412836&2&0 + 93121454:535538144&7&0 + 72828953:584713687&0&0 + 11849083:752814633&7&0 + 56210307:581998616&2&0 + 94216343:320489309&0&0 + 70858651:854922152&4&0 + 394890:702464926&5&0 + 45362101:919291375&1&0 + 94493667:576440991&0&0 + 12779856:957438307&6&0 + 34310096:11542324&3&0 + 36198277:46533019&6&0 + 61460149:528663121&7&0 + 24832388:811203391&0&0 + 49240476:669690989&1&0 + 71655861:942015124&3&0 + 52392849:874508459&6&0 + 26801831:876221698&1&0 + 24176974:995328674&5&0 + 92539348:968187617&7&0 + 27051694:2092817&2&0 + 44881433:220990270&3&0 + 94628364:116475216&5&0 + 15819909:459944650&5&0 + 61512395:268246479&0&0 + 93887629:500769389&2&0 + 59025297:456814031&3&0 + 47528821:471052035&4&0 + 43375304:628445689&7&0 + 90096071:635671989&0&0 + 97380991:698920611&1&0 + 42503311:427261511&5&0 + 61472082:859670216&2&0 + 30797047:115487974&7&0 + 58163826:505032651&0&0 + 40010549:14555157&0&0 + 99815723:590977381&7&0 + 47771961:238328621&3&0 + 90326429:479392202&5&0 + 7990169:30125528&1&0 + 93985935:867478104&6&0 + 35432003:282835475&6&0 + 97762062:510060263&2&0 + 28682278:286262596&0&0 + 49297202:349828703&1&0 + 65362834:380490486&3&0 + 394890:702464926&4&0 + 34310096:11542324&0&0 + 56618482:86658284&5&0 + 56056086:479286098&0&0 + 327089:61498742&3&0 + 86907539:328276986&0&0 + 24951969:864685429&2&0 + 14468397:543502230&2&0 + 72424811:659534862&5&0 + 53380875:56990775&2&0 + 15819909:459944650&2&0 + 29911452:391223956&1&0 + 980829:856644254&3&0 + 34172870:34623997&6&0 + 69070168:28751529&2&0 + 99247:980149413&2&0 + 5618809:197855825&0&0 + 56618482:86658284&6&0 + 72834250:466779877&2&0 + 47695679:473571694&3&0 + 49938088:419589252&2&0 + 56618482:86658284&1&0 + 33407839:558615386&4&0 + 66945097:157708511&0&0 + 37218570:381618017&2&0 + 50831868:929132209&6&0 + 94427100:345106252&3&0 + 83452622:378063536&2&0 + 91851586:307300978&2&0 + 95200174:697000883&3&0 + 93830543:355644973&0&0 + 49148247:892059898&1&0 + 67113011:779026478&2&0 + 63739032:657829661&3&0 + 91593406:852651754&6&0 + 68652349:438144715&6&0 + 77966981:649378990&5&0 + 16899265:692141683&1&0 + 3532053:557574456&3&0 + 43198439:662808461&1&0 + 34651805:97557080&7&0 + 23061216:99434729&5&0 + 53302195:957822333&1&0 + 74789224:922087904&0&0 + 1173145:804017165&3&0 + 99794978:483568330&3&0 + 42923230:468988641&4&0 + 24097418:205489992&3&0 + 43658060:564497493&0&0 + 61512395:268246479&2&0 + 68343669:388708873&3&0 + 67206790:870511274&3&0 + 24310608:493410247&1&0 + 99842243:942096645&1&0 + 9320061:177336824&5&0 + 19606849:453493174&1&0 + 89357225:96714883&7&0 + 24098346:359515341&0&0 + 28497425:257098687&2&0 + 63853711:880273408&4&0 + 43195723:554863413&1&0 + 62344657:852184446&2&0 + 12494935:932668955&0&0 + 20733614:921257806&1&0 + 64015693:886848702&6&0 + 91717739:892891474&7&0 + 81476270:156567648&3&0 + 74789224:922087904&8&0 + 65617926:204033633&4&0 + 53558840:151641187&0&0 + 67651365:715596256&5&0 + 97184810:598826627&5&0 + 56122919:43052528&1&0 + 47771961:238328621&2&0 + 44881433:220990270&1&0 + 65362834:380490486&1&0 + 59025297:456814031&1&0 + 52989796:367944781&2&0 + 9661309:520108364&3&0 + 22004771:538942275&4&0 + 92316363:939734067&0&0 + 24346272:848320174&4&0 + 81365760:76386875&0&0 + 58248124:777519906&0&0 + 47335432:985984618&4&0 + 83224862:719102167&6&0 + 26138914:698492330&8&0 + 45957916:710921358&0&0 + 76271183:785907918&4&0 + 28481574:481670154&6&0 + 90428494:316718138&1&0 + 95101780:576073953&0&0 + 36789634:662824796&0&0 + 73936828:572258359&5&0 + 60399439:867611170&2&0 + 12613730:540446356&0&0 + 90861661:377164183&2&0 + 49148247:892059898&0&0 + 40010549:14555157&2&0 + 99794978:483568330&7&0 + 15411925:53894564&1&0 + 62256323:763647343&3&0 + 55837993:14495058&8&0 + 92316363:939734067&4&0 + 65285717:244145031&2&0 + 26246486:906412836&1&0 + 82762712:894811689&2&0 + 68652349:438144715&0&0 + 82321822:968672172&0&0 + 23719883:37149617&0&0 + 39097823:700206507&3&0 + 81307853:323317653&2&0 + 87314087:334750260&5&0 + 84965216:463802260&1&0 + 23038701:513125022&1&0 + 72044029:940353344&1&0 + 89633329:116921545&0&0 + 80952875:582053915&3&0 + 89188064:400686934&1&0 + 80322143:575561122&1&0 + 44449704:961196104&5&0 + 52774319:934997659&1&0 + 83224862:719102167&1&0 + 86930254:8171039&8&0 + 95634901:59063091&3&0 + 87690282:479248800&5&0 + 14404891:512198282&4&0 + 68343669:388708873&1&0 + 28554934:5179969&2&0 + 81476270:156567648&0&0 + 80442241:270231675&3&0 + 27170244:552163830&3&0 + 53302195:957822333&2&0 + 25332707:717566292&2&0 + 26246486:906412836&0&0 + 17172949:95409758&2&0 + 25010830:69829189&1&0 + 42923230:468988641&2&0 + 26801831:876221698&2&0 + 43010149:109870747&0&0 + 36586454:785680337&3&0 + 46789695:572165705&0&0 + 88080713:672772877&2&0 + 11584549:762656650&0&0 + 39212191:907959215&5&0 + 8077408:486944454&2&0 + 81307853:323317653&0&0 + 63689658:94830853&5&0 + 11849083:752814633&1&0 + 1812760:222467758&0&0 + 42984731:930599485&4&0 + 60080178:449210772&4&0 + 1812760:222467758&6&0 + 83230378:699930271&1&0 + 71013443:229044980&2&0 + 69030160:685229886&3&0 + 45325818:757845469&2&0 + 23061216:99434729&0&0 + 38388251:809020096&1&0 + 12498479:749511858&4&0 + 34669023:392314594&1&0 + 94489661:823348737&0&0 + 42503311:427261511&6&0 + 80952875:582053915&5&0 + 52469998:590911777&3&0 + 36198277:46533019&1&0 + 794440:643333917&2&0 + 43010149:109870747&5&0 + 32714634:291460589&2&0 + 51622589:556434188&2&0 + 98628510:519132078&0&0 + 70881824:471329692&0&0 + 47335432:985984618&0&0 + 70881824:471329692&1&0 + 23719883:37149617&2&0 + 56355057:984298232&3&0 + 35034292:66740643&3&0 + 49398852:890280265&6&0 + 59400525:37380364&1&0 + 94055752:200828895&1&0 + 91127077:413824363&0&0 + 84904995:767743372&0&0 + 78661119:899233291&1&0 + 84369792:875833206&0&0 + 25347634:720073187&4&0 + 58890455:598289383&1&0 + 89778255:397729291&5&0 + 57004853:103522308&1&0 + 91851586:307300978&5&0 + 24363729:931822610&3&0 + 19704814:290603674&5&0 + 84040284:507452327&1&0 + 47912996:669660571&3&0 + 46808018:336582676&0&0 + 96071626:992715291&6&0 + 23904648:376661712&2&0 + 80013650:102651723&3&0 + 68501344:633483542&0&0 + 41381137:711848921&7&0 + 69987878:478394706&2&0 + 50831868:929132209&0&0 + 87479706:36726780&5&0 + 9958572:479315549&3&0 + 72077429:214721180&1&0 + 47510292:774149565&5&0 + 44782292:24646573&0&0 + 84647254:861531665&0&0 + 12494935:932668955&1&0 + 43198439:662808461&6&0 + 30357532:115565152&4&0 + 56641137:834236527&1&0 + 2052336:295787981&3&0 + 19422185:559312438&1&0 + 14859108:184675170&2&0 + 90481479:287890425&5&0 + 54045147:80242617&3&0 + 17307475:921652843&1&0 + 80722375:560147575&6&0 + 38964146:222356114&0&0 + 44228044:620251323&5&0 + 42209307:442034417&3&0 + 72828953:584713687&2&0 + 37178590:132077611&2&0 + 64532124:355047058&5&0 + 62053396:88696472&1&0 + 97509744:251772354&1&0 + 91612396:751983860&0&0 + 56122919:43052528&5&0 + 39395160:218721735&3&0 + 86157015:284717318&6&0 + 98666811:206440428&2&0 + 14859108:184675170&3&0 + 23061216:99434729&2&0 + 51058391:273493813&1&0 + 70881824:471329692&2&0 + 97762062:510060263&0&0 + 30797047:115487974&4&0 + 984049:483901572&6&0 + 96429072:241981909&2&0 + 60200852:979593575&6&0 + 7248112:180850711&0&0 + 54877703:634079928&0&0 + 42209307:442034417&0&0 + 32683211:278821282&6&0 + 22449547:21814022&3&0 + 73871399:826196232&2&0 + 29676985:956475462&5&0 + 47695679:473571694&0&0 + 84742010:130106102&2&0 + 63729697:265246888&5&0 + 4100953:608638082&4&0 + 91935567:565698614&0&0 + 44449704:961196104&2&0 + 61627279:33833788&2&0 + 44598616:444589273&3&0 + 29911452:391223956&3&0 + 10262184:160718981&4&0 + 73149984:677669331&4&0 + 54877703:634079928&1&0 + 55065825:57247164&2&0 + 69000118:268012364&4&0 + 87479706:36726780&6&0 + 48218524:204265440&3&0 + 50602025:559678799&3&0 + 40201271:222337229&4&0 + 63607488:414440767&0&0 + 93985935:867478104&7&0 + 85222108:614487320&4&0 + 94427100:345106252&2&0 + 20365333:405729388&2&0 + 25347634:720073187&2&0 + 92388719:152303867&3&0 + 54446911:376658051&0&0 + 98961310:319784340&2&0 + 18071841:977748881&0&0 + 45889263:861295166&0&0 + 65617926:204033633&8&0 + 5824912:308260858&4&0 + 58116832:472277749&3&0 + 61512395:268246479&1&0 + 62956693:863682188&0&0 + 98101480:644789386&4&0 + 67238651:554304974&2&0 + 32683211:278821282&0&0 + 53558840:151641187&1&0 + 97380991:698920611&4&0 + 67003817:938806399&4&0 + 73149984:677669331&0&0 + 17565416:593803371&0&0 + 54714557:119789008&0&0 + 43582325:124694496&7&0 + 61933829:8127087&0&0 + 60013458:575282869&5&0 + 34839256:826322060&4&0 + 83211644:742859029&3&0 + 81307853:323317653&3&0 + 37709609:639963160&0&0 + 81476270:156567648&1&0 + 54383211:453535299&0&0 + 55029069:947629959&0&0 + 90141696:521979907&0&0 + 29059882:874721379&2&0 + 26214434:119220927&2&0 + 18876924:818582999&0&0 + 65744787:251216899&4&0 + 78330832:171496180&2&0 + 26774964:953585124&6&0 + 27051694:2092817&3&0 + 43195723:554863413&6&0 + 49938088:419589252&3&0 + 66630681:350186035&1&0 + 56910010:652586984&3&0 + 60013458:575282869&2&0 + 68343669:388708873&2&0 + 41381137:711848921&5&0 + 98269968:342498205&0&0 + 59958151:354869725&0&0 + 93758746:858902746&2&0 + 91938100:198737700&1&0 + 43198934:335023352&0&0 + 98356898:511632245&0&0 + 74176864:6819931&1&0 + 36789634:662824796&4&0 + 70408006:711179145&6&0 + 54714557:119789008&1&0 + 97270881:91385809&2&0 + 12498479:749511858&5&0 + 41381137:711848921&3&0 + 99842243:942096645&6&0 + 85995297:256528459&4&0 + 67473173:282206995&1&0 + 52774319:934997659&2&0 + 99833532:500564025&4&0 + 36094705:71328450&0&0 + 24346272:848320174&1&0 + 92033909:698192024&0&0 + 29876640:14470514&1&0 + 17172949:95409758&4&0 + 43582325:124694496&4&0 + 64038790:72175167&1&0 + 7025462:66414340&5&0 + 7990169:30125528&0&0 + 84647254:861531665&1&0 + 88117611:411183920&0&0 + 13693527:745147566&1&0 + 53324128:609320531&2&0 + 87709874:450837682&4&0 + 24097418:205489992&2&0 + 61512395:268246479&3&0 + 13956173:536937059&5&0 + 91612396:751983860&3&0 + 17011630:240212798&2&0 + 34839256:826322060&1&0 + 34051254:146783427&5&0 + 394890:702464926&3&0 + 8478903:846023875&2&0 + 52392849:874508459&2&0 + 13969316:174981079&3&0 + 44449704:961196104&4&0 + 63739032:657829661&5&0 + 67113011:779026478&1&0 + 87443245:251462366&2&0 + 62053396:88696472&3&0 + 52606765:416027911&4&0 + 49586042:371895777&0&0 + 49063958:681745900&2&0 + 38526768:604092377&0&0 + 95101780:576073953&2&0 + 34651805:97557080&0&0 + 54446911:376658051&6&0 + 98356898:511632245&6&0 + 15600072:946000325&5&0 + 28196376:955082045&2&0 + 91851586:307300978&1&0 + 9000299:189622129&3&0 + 9546976:734325171&4&0 + 95181848:714820342&0&0 + 49938088:419589252&4&0 + 90075179:164776514&2&0 + 94276620:842637751&5&0 + 54045147:80242617&1&0 + 44449704:961196104&6&0 + 48066323:255817938&4&0 + 10489572:102857792&1&0 + 72044029:940353344&6&0 + 52604035:297082894&2&0 + 53808293:694574130&2&0 + 90027761:755365112&2&0 + 94493667:576440991&3&0 + 57095477:49273425&0&0 + 49297202:349828703&2&0 + 43198439:662808461&7&0 + 65095917:343472350&3&0 + 53987158:596068559&2&0 + 98165980:787776889&1&0 + 75964724:178394015&2&0 + 7256242:251275077&2&0 + 43582325:124694496&0&0 + 93830543:355644973&1&0 + 5979461:117921763&0&0 + 4081641:44037476&0&0 + 57886993:212586771&5&0 + 394890:702464926&0&0 + 59761096:747786818&8&0 + 28107596:496741880&0&0 + 87811192:243970863&0&0 + 25347634:720073187&3&0 + 18932861:248967195&4&0 + 10912081:299073308&6&0 + 49063958:681745900&1&0 + 88080713:672772877&5&0 + 6942420:268694617&2&0 + 28554934:5179969&0&0 + 36475639:150298750&3&0 + 30484610:339944899&5&0 + 92316363:939734067&2&0 + 88970662:25547912&2&0 + 39811033:979896270&4&0 + 96275276:134838908&0&0 + 61008441:281928562&2&0 + 80006895:539336678&3&0 + 35432003:282835475&5&0 + 28581534:230540665&2&0 + 86930254:8171039&1&0 + 17130005:271191290&2&0 + 44598616:444589273&1&0 + 32118984:867936417&6&0 + 83705003:798441379&0&0 + 45957916:710921358&7&0 + 13956173:536937059&0&0 + 25773355:432246245&3&0 + 78272570:576029226&1&0 + 69170173:300151437&0&0 + 31213691:499131596&0&0 + 98376692:994240422&2&0 + 60064285:524769913&4&0 + 95089663:763144708&1&0 + 11736920:293374434&0&0 + 12779856:957438307&7&0 + 22313523:720752569&1&0 + 4039261:950206373&0&0 + 28196376:955082045&6&0 + 91612396:751983860&1&0 + 30452630:493217909&4&0 + 65095917:343472350&5&0 + 47109679:5652390&1&0 + 71013443:229044980&4&0 + 5979461:117921763&3&0 + 67934305:221607310&1&0 + 66253237:965950612&5&0 + 71013443:229044980&5&0 + 50578643:849947573&1&0 + 32714634:291460589&0&0 + 25010830:69829189&2&0 + 13940912:909214727&5&0 + 43596961:420778248&3&0 + 20128250:722747907&6&0 + 40010549:14555157&1&0 + 91935567:565698614&1&0 + 93830543:355644973&2&0 + 94628364:116475216&4&0 + 24990550:701579152&1&0 + 30797047:115487974&3&0 + 28481574:481670154&4&0 + 27417896:531683550&6&0 + 91593406:852651754&4&0 + 56524920:276851159&5&0 + 61460149:528663121&4&0 + 93121454:535538144&5&0 + 51932258:733052883&0&0 + 90053119:548085600&0&0 + 32683211:278821282&2&0 + 26207377:652343612&3&0 + 24951969:864685429&1&0 + 56090553:13068547&1&0 + 36531221:465544958&0&0 + 26214434:119220927&1&0 + 15411925:53894564&2&0 + 43827047:376424684&0&0 + 11138744:261195763&3&0 + 65617926:204033633&3&0 + 19706683:118758656&4&0 + 64532124:355047058&0&0 + 72236572:366846956&4&0 + 12442907:500345555&5&0 + 78291109:390943293&0&0 + 99833532:500564025&5&0 + 12494935:932668955&3&0 + 7025462:66414340&2&0 + 51615122:306136635&2&0 + 34051254:146783427&6&0 + 67934305:221607310&0&0 + 55837993:14495058&1&0 + 71440934:544568790&0&0 + 64738196:397211282&1&0 + 13848200:399773117&0&0 + 80722375:560147575&5&0 + 69109134:798600716&1&0 + 27417896:531683550&0&0 + 93887629:500769389&6&0 + 25918106:961366538&1&0 + 98961310:319784340&6&0 + 2804255:949350001&3&0 + 78661119:899233291&3&0 + 99794978:483568330&0&0 + 28497425:257098687&5&0 + 58013677:80506859&8&0 + 36222496:482298410&2&0 + 19613859:704336702&1&0 + 40028054:319727191&1&0 + 55523564:759342342&8&0 + 80715881:581362471&4&0 + 20728829:488225281&4&0 + 57562731:820469772&4&0 + 67003817:938806399&2&0 + 53442228:374108430&7&0 + 54288911:58962140&3&0 + 33513632:650694600&5&0 + 89778255:397729291&0&0 + 9661309:520108364&1&0 + 33493258:513726334&0&0 + 78744549:393356282&0&0 + 43596961:420778248&4&0 + 47136963:959239562&4&0 + 37218570:381618017&6&0 + 57399441:574346626&2&0 + 38526768:604092377&4&0 + 67523238:697508437&0&0 + 91464935:321828651&2&0 + 72311045:525053734&6&0 + 83224862:719102167&2&0 + 2052336:295787981&7&0 + 93121454:535538144&6&0 + 56910010:652586984&1&0 + 20733614:921257806&2&0 + 24383850:838434080&3&0 + 52392849:874508459&5&0 + 36587580:228451807&1&0 + 26801831:876221698&0&0 + 54383211:453535299&2&0 + 3473124:197367451&3&0 + 74529648:589538823&1&0 + 30764731:669089127&2&0 + 59025297:456814031&6&0 + 82913494:615212745&0&0 + 77408000:80794786&2&0 + 39212191:907959215&7&0 + 55837993:14495058&6&0 + 31490945:628212703&1&0 + 30839236:7551156&0&0 + 72063454:558851416&5&0 + 43582325:124694496&2&0 + 56184893:691229767&2&0 + 11757315:383214052&5&0 + 63167734:665819705&2&0 + 53187669:801736771&2&0 + 53808293:694574130&0&0 + 3510889:744096151&3&0 + 87846747:53527409&3&0 + 58627766:871327242&1&0 + 17565416:593803371&4&0 + 77966981:649378990&4&0 + 49604511:248139084&1&0 + 79645721:394251089&4&0 + 24772216:857995420&0&0 + 24098346:359515341&4&0 + 22004771:538942275&3&0 + 16899265:692141683&0&0 + 38526768:604092377&8&0 + 17307475:921652843&0&0 + 63739032:657829661&2&0 + 19606849:453493174&7&0 + 93886887:222120256&1&0 + 32118984:867936417&4&0 + 87479706:36726780&1&0 + 12613730:540446356&1&0 + 89633329:116921545&8&0 + 26774964:953585124&1&0 + 63271831:810442761&3&0 + 46789695:572165705&5&0 + 18932861:248967195&0&0 + 36647735:47858640&1&0 + 2627273:910685012&0&0 + 14404891:512198282&0&0 + 62344657:852184446&3&0 + 53337305:527156895&7&0 + 85829140:808603613&6&0 + 50831868:929132209&1&0 + 27417896:531683550&4&0 + 47996608:352927170&2&0 + 33513632:650694600&6&0 + 17729199:152719649&1&0 + 17872801:743644570&1&0 + 41381137:711848921&6&0 + 49938088:419589252&5&0 + 14318176:591914522&3&0 + 51622589:556434188&3&0 + 87709874:450837682&2&0 + 36094705:71328450&5&0 + 28196376:955082045&1&0 + 83911234:123942496&0&0 + 56524920:276851159&2&0 + 49148247:892059898&7&0 + 63190338:539555088&5&0 + 9661309:520108364&2&0 + 20733614:921257806&5&0 + 26927277:639887490&1&0 + 42923230:468988641&3&0 + 603534:903878232&5&0 + 36222496:482298410&1&0 + 45670446:13274522&3&0 + 16140571:981617709&4&0 + 13959204:581354194&1&0 + 26801831:876221698&3&0 + 90053119:548085600&4&0 + 53380875:56990775&1&0 + 72236572:366846956&3&0 + 36580982:275911708&6&0 + 98124860:878043876&0&0 + 29465223:259197434&4&0 + 30029539:567591478&1&0 + 84904995:767743372&4&0 + 52867992:64445255&1&0 + 34051254:146783427&1&0 + 47695679:473571694&5&0 + 96275276:134838908&1&0 + 77966981:649378990&2&0 + 56210307:581998616&1&0 + 45832521:483204127&0&0 + 88267404:152780511&4&0 + 67206790:870511274&0&0 + 13959204:581354194&2&0 + 47109679:5652390&8&0 + 88599044:900830211&0&0 + 63295000:105176636&5&0 + 76752891:299820811&3&0 + 47109679:5652390&3&0 + 14451740:824520616&1&0 + 7940265:732052199&4&0 + 44782292:24646573&1&0 + 95634901:59063091&7&0 + 21088777:580911428&2&0 + 64532124:355047058&2&0 + 22084553:161228022&1&0 + 40201271:222337229&3&0 + 49604511:248139084&2&0 + 98807218:361607752&8&0 + 15399094:948770580&0&0 + 90034775:766779686&3&0 + 58780166:97498602&4&0 + 79284588:443146164&4&0 + 52754617:121414498&7&0 + 20365333:405729388&6&0 + 93146317:466673460&2&0 + 80845792:337623149&3&0 + 71013443:229044980&6&0 + 36580982:275911708&5&0 + 74529648:589538823&0&0 + 20365333:405729388&4&0 + 72044029:940353344&5&0 + 52515016:412430656&2&0 + 10174974:120294830&5&0 + 11138744:261195763&0&0 + 30357532:115565152&6&0 + 73545882:550927034&4&0 + 39436817:570472461&3&0 + 58780166:97498602&0&0 + 43195723:554863413&5&0 + 79670318:631438367&7&0 + 4039261:950206373&1&0 + 85829140:808603613&4&0 + 86157015:284717318&8&0 + 24172877:139409016&1&0 + 11849083:752814633&0&0 + 50332413:946028543&2&0 + 74176864:6819931&5&0 + 56524920:276851159&1&0 + 72063454:558851416&4&0 + 11757315:383214052&4&0 + 90326429:479392202&6&0 + 33915602:115995259&0&0 + 78272570:576029226&3&0 + 54127829:84354693&4&0 + 45423397:984954387&4&0 + 73936828:572258359&4&0 + 96429072:241981909&3&0 + 55837993:14495058&3&0 + 6378094:493422886&6&0 + 78330832:171496180&3&0 + 67206790:870511274&2&0 + 11138744:261195763&2&0 + 74789224:922087904&5&0 + 43476633:597256408&0&0 + 81181921:284731810&5&0 + 2052336:295787981&4&0 + 13747057:28246965&6&0 + 71013443:229044980&3&0 + 98628510:519132078&3&0 + 13678402:447525740&7&0 + 49063958:681745900&5&0 + 8371874:517008461&2&0 + 66989301:823245775&2&0 + 7504010:840190292&1&0 + 31966866:496175072&6&0 + 87479706:36726780&0&0 + 57886993:212586771&2&0 + 18603837:202398774&0&0 + 32683211:278821282&5&0 + 12125866:427070376&1&0 + 19706683:118758656&0&0 + 53087924:447531474&1&0 + 59761096:747786818&2&0 + 47912996:669660571&2&0 + 87314087:334750260&1&0 + 71655861:942015124&1&0 + 26306723:562780628&1&0 + 19706683:118758656&5&0 + 26129164:375963359&1&0 + 95200174:697000883&1&0 + 1173145:804017165&4&0 + 91851586:307300978&6&0 + 10198934:426173913&2&0 + 53184255:926137075&1&0 + 63167734:665819705&0&0 + 13679271:747757662&1&0 + 19645170:625184018&1&0 + 70408006:711179145&2&0 + 53187669:801736771&1&0 + 7598529:92723566&4&0 + 56184893:691229767&0&0 + 7449595:999493580&0&0 + 984049:483901572&3&0 + 72044029:940353344&7&0 + 86451448:74852495&0&0 + 11224354:610593814&1&0 + 87863917:656602628&0&0 + 3473124:197367451&7&0 + 45670446:13274522&4&0 + 30484610:339944899&2&0 + 10174974:120294830&4&0 + 73936828:572258359&3&0 + 12313712:542956627&3&0 + 16439035:301098169&5&0 + 85222108:614487320&0&0 + 39395160:218721735&1&0 + 42209307:442034417&1&0 + 19706683:118758656&1&0 + 39600542:865492623&0&0 + 72073917:875549213&2&0 + 53380875:56990775&0&0 + 11849083:752814633&5&0 + 23061216:99434729&4&0 + 42648500:359068147&0&0 + 37728870:214734161&0&0 + 39496198:945080868&4&0 + 57906673:134380185&3&0 + 90326429:479392202&2&0 + 30484610:339944899&8&0 + 24383850:838434080&4&0 + 33882509:498489183&5&0 + 51499587:698927989&4&0 + 31935567:622957495&0&0 + 98970477:148387964&2&0 + 4506699:932037770&0&0 + 36999708:558428301&2&0 + 78291109:390943293&3&0 + 24097418:205489992&4&0 + 93121454:535538144&4&0 + 82631241:215531913&3&0 + 36586454:785680337&1&0 + 61132974:165535142&3&0 + 24172877:139409016&2&0 + 62344657:852184446&6&0 + 41381137:711848921&0&0 + 80322143:575561122&0&0 + 91604138:549050785&1&0 + 97184810:598826627&0&0 + 26774964:953585124&7&0 + 40977332:868266272&2&0 + 63295000:105176636&3&0 + 89695712:867781539&2&0 + 54446911:376658051&5&0 + 39268836:42129101&3&0 + 89778255:397729291&4&0 + 72044029:940353344&0&0 + 50482922:317040887&5&0 + 37703625:857959686&0&0 + 60080178:449210772&1&0 + 21012542:575718479&0&0 + 7857675:28742616&2&0 + 40063900:620687205&2&0 + 81320605:90473249&5&0 + 43198439:662808461&4&0 + 82321822:968672172&2&0 + 94276620:842637751&1&0 + 37142307:404379547&2&0 + 33407839:558615386&6&0 + 60080178:449210772&0&0 + 76752891:299820811&5&0 + 87953555:558848768&3&0 + 82762712:894811689&7&0 + 29465223:259197434&5&0 + 44485622:208425683&3&0 + 2804255:949350001&2&0 + 61342731:246236836&5&0 + 31966866:496175072&2&0 + 9661309:520108364&0&0 + 50831868:929132209&3&0 + 29876640:14470514&7&0 + 5979461:117921763&2&0 + 67238651:554304974&1&0 + 49228676:90120193&3&0 + 82631241:215531913&0&0 + 12019596:768955185&0&0 + 85222108:614487320&2&0 + 76752891:299820811&0&0 + 63772983:913544890&7&0 + 3532053:557574456&4&0 + 9546976:734325171&5&0 + 36580982:275911708&2&0 + 58116832:472277749&2&0 + 72236572:366846956&0&0 + 98807218:361607752&2&0 + 97792263:695076704&1&0 + 14451740:824520616&8&0 + 83950070:415419733&1&0 + 78134554:409055510&1&0 + 53184255:926137075&7&0 + 22084553:161228022&4&0 + 84834024:841200594&0&0 + 38828138:75574381&1&0 + 25630966:184579301&0&0 + 11757315:383214052&2&0 + 64738196:397211282&0&0 + 83022428:944078824&1&0 + 18603837:202398774&3&0 + 93985935:867478104&5&0 + 22004771:538942275&0&0 + 27491562:354199743&1&0 + 13969316:174981079&2&0 + 38385044:451635099&4&0 + 98807218:361607752&3&0 + 13011405:50988962&0&0 + 91851586:307300978&0&0 + 10489572:102857792&2&0 + 57562731:820469772&8&0 + 76735123:941678155&1&0 + 83211644:742859029&2&0 + 80722375:560147575&1&0 + 46808018:336582676&1&0 + 94628364:116475216&7&0 + 87846747:53527409&4&0 + 65617926:204033633&7&0 + 9009964:237260131&0&0 + 24217312:185633128&0&0 + 42923230:468988641&1&0 + 93830543:355644973&3&0 + 45957916:710921358&6&0 + 62956693:863682188&2&0 + 90481479:287890425&0&0 + 70434522:372193649&1&0 + 74176864:6819931&6&0 + 91938100:198737700&7&0 + 59400525:37380364&3&0 + 57004853:103522308&2&0 + 69000118:268012364&3&0 + 18925324:658192454&1&0 + 16140571:981617709&2&0 + 35951923:278612299&0&0 + 89695712:867781539&6&0 + 83550795:756455161&0&0 + 38526768:604092377&6&0 + 53442228:374108430&5&0 + 80952875:582053915&1&0 + 77558669:132694436&1&0 + 38586569:857193927&3&0 + 65362834:380490486&4&0 + 14404891:512198282&2&0 + 95634901:59063091&4&0 + 77558669:132694436&0&0 + 71407149:552668478&1&0 + 83211644:742859029&0&0 + 64038790:72175167&0&0 + 53087924:447531474&2&0 + 91975281:465006911&1&0 + 36094705:71328450&6&0 + 79670318:631438367&4&0 + 69987878:478394706&0&0 + 52754617:121414498&0&0 + 49398852:890280265&0&0 + 54383211:453535299&4&0 + 61933829:8127087&3&0 + 98961310:319784340&4&0 + 40443723:938389955&3&0 + 60399439:867611170&6&0 + 92792097:813125155&2&0 + 56641137:834236527&0&0 + 40063900:620687205&1&0 + 26129164:375963359&4&0 + 33513632:650694600&3&0 + 98124860:878043876&8&0 + 5979461:117921763&1&0 + 87314087:334750260&0&0 + 48066323:255817938&3&0 + 97270881:91385809&3&0 + 27170244:552163830&2&0 + 33882509:498489183&3&0 + 39496198:945080868&6&0 + 87846747:53527409&7&0 + 31055089:223681656&1&0 + 55523564:759342342&3&0 + 2507552:38350344&3&0 + 95623061:999526611&2&0 + 77701927:738963158&2&0 + 17872801:743644570&0&0 + 63689658:94830853&0&0 + 9320061:177336824&3&0 + 39344453:2844854&4&0 + 82292279:521629961&5&0 + 7449595:999493580&4&0 + 53337305:527156895&2&0 + 67791119:50931034&2&0 + 76735123:941678155&6&0 + 34926578:601185182&1&0 + 10973372:252564849&2&0 + 7940265:732052199&1&0 + 30484610:339944899&1&0 + 40403702:309013849&8&0 + 81365760:76386875&4&0 + 51615122:306136635&4&0 + 52754617:121414498&1&0 + 41381137:711848921&8&0 + 61008441:281928562&4&0 + 49398852:890280265&7&0 + 45423397:984954387&5&0 + 93985935:867478104&8&0 + 29059882:874721379&1&0 + 89831741:878651007&3&0 + 25347634:720073187&1&0 + 51712898:317666207&5&0 + 66355368:273926038&0&0 + 80715881:581362471&3&0 + 15359195:473941356&2&0 + 4361610:538004056&3&0 + 86157015:284717318&1&0 + 69070168:28751529&1&0 + 80699596:295049809&3&0 + 81756244:76609808&3&0 + 58627766:871327242&7&0 + 14318176:591914522&4&0 + 75974979:580480563&4&0 + 9546976:734325171&2&0 + 83705003:798441379&1&0 + 27178729:695470776&0&0 + 75009229:50085413&1&0 + 97509744:251772354&0&0 + 42099671:567889019&1&0 + 28581534:230540665&1&0 + 75974979:580480563&5&0 + 39344453:2844854&1&0 + 85829140:808603613&2&0 + 32683211:278821282&1&0 + 10718440:862268826&0&0 + 37991815:504720419&2&0 + 58248124:777519906&3&0 + 57562731:820469772&2&0 + 103672:805856166&5&0 + 55523564:759342342&2&0 + 103672:805856166&0&0 + 62256323:763647343&1&0 + 54127829:84354693&1&0 + 9000299:189622129&2&0 + 17711727:259518441&2&0 + 49297202:349828703&3&0 + 93886887:222120256&2&0 + 47695679:473571694&8&0 + 49228676:90120193&1&0 + 12313712:542956627&6&0 + 93758746:858902746&1&0 + 12537680:869375833&3&0 + 7598529:92723566&0&0 + 98539129:180493998&0&0 + 59025297:456814031&7&0 + 58627766:871327242&4&0 + 79670318:631438367&0&0 + 7025462:66414340&4&0 + 82762712:894811689&0&0 + 36198277:46533019&4&0 + 94415205:458274448&0&0 + 63295000:105176636&1&0 + 13747057:28246965&0&0 + 47136963:959239562&1&0 + 29059882:874721379&0&0 + 53307310:462486717&0&0 + 3605565:131483740&0&0 + 36653790:724513128&1&0 + 7598529:92723566&7&0 + 98101480:644789386&2&0 + 14451740:824520616&4&0 + 38385044:451635099&1&0 + 46789695:572165705&4&0 + 64482738:623825544&1&0 + 94276106:262453525&2&0 + 76683504:74173528&2&0 + 57562731:820469772&0&0 + 75656718:615770374&0&0 + 24362643:580293325&4&0 + 4100953:608638082&2&0 + 62416945:197441685&1&0 + 10262184:160718981&1&0 + 23061216:99434729&1&0 + 99842243:942096645&2&0 + 12498479:749511858&0&0 + 91464935:321828651&0&0 + 73545882:550927034&0&0 + 23061216:99434729&3&0 + 30894037:75955676&1&0 + 76271183:785907918&1&0 + 65744787:251216899&3&0 + 77701927:738963158&0&0 + 11558628:887403567&2&0 + 31490945:628212703&5&0 + 66253237:965950612&2&0 + 30797047:115487974&0&0 + 80013650:102651723&1&0 + 25354103:499890095&4&0 + 37218570:381618017&4&0 + 35483519:599366092&0&0 + 6740012:611323570&2&0 + 82321678:269000813&0&0 + 49148247:892059898&6&0 + 93830543:355644973&4&0 + 54288911:58962140&0&0 + 46324975:493014007&2&0 + 81974220:715744766&5&0 + 17095008:149761520&1&0 + 84904995:767743372&1&0 + 61438049:706145983&3&0 + 3532053:557574456&0&0 + 11736920:293374434&2&0 + 59025297:456814031&4&0 + 8348663:925846920&1&0 + 39811033:979896270&1&0 + 73545484:102151889&4&0 + 90428494:316718138&0&0 + 82321678:269000813&1&0 + 76752891:299820811&4&0 + 78086494:834446809&2&0 + 36789634:662824796&6&0 + 7025462:66414340&6&0 + 67238651:554304974&3&0 + 54127829:84354693&3&0 + 95623061:999526611&1&0 + 86802339:723326550&0&0 + 83950070:415419733&6&0 + 55029069:947629959&1&0 + 65362834:380490486&5&0 + 5618809:197855825&7&0 + 76683504:74173528&1&0 + 17172949:95409758&6&0 + 61933829:8127087&4&0 + 36647735:47858640&3&0 + 3510889:744096151&7&0 + 603534:903878232&6&0 + 36475639:150298750&2&0 + 1812760:222467758&5&0 + 56090553:13068547&0&0 + 73267864:103518361&0&0 + 31966866:496175072&0&0 + 24831969:679139335&5&0 + 50482922:317040887&7&0 + 2052336:295787981&1&0 + 14468074:476007880&2&0 + 39097823:700206507&5&0 + 73871399:826196232&1&0 + 47528821:471052035&2&0 + 73570584:509185366&3&0 + 94628364:116475216&0&0 + 1662198:99105714&0&0 + 39436817:570472461&0&0 + 11714905:23536608&1&0 + 32118984:867936417&0&0 + 91464935:321828651&7&0 + 58013677:80506859&4&0 + 36094705:71328450&8&0 + 72423404:42352706&3&0 + 7540821:697278098&0&0 + 6740012:611323570&1&0 + 66272075:852533061&1&0 + 29423857:674311569&1&0 + 30452630:493217909&2&0 + 12537680:869375833&4&0 + 98124860:878043876&2&0 + 83452622:378063536&4&0 + 6986540:511897738&4&0 + 72424811:659534862&3&0 + 16765477:372594907&4&0 + 93886887:222120256&3&0 + 26129164:375963359&2&0 + 28481574:481670154&2&0 + 69070168:28751529&0&0 + 68544258:743179312&2&0 + 20128250:722747907&8&0 + 70858651:854922152&2&0 + 23590940:870866638&0&0 + 17307475:921652843&4&0 + 74874855:576341906&0&0 + 4539010:724872795&0&0 + 12537680:869375833&0&0 + 97792263:695076704&2&0 + 59850985:154711861&2&0 + 73545484:102151889&6&0 + 25918106:961366538&4&0 + 53412846:338117810&6&0 + 28107596:496741880&1&0 + 29911452:391223956&2&0 + 43195723:554863413&7&0 + 47528821:471052035&3&0 + 9000299:189622129&0&0 + 73936828:572258359&0&0 + 88080713:672772877&1&0 + 52515016:412430656&5&0 + 9320061:177336824&0&0 + 66253237:965950612&1&0 + 1812760:222467758&4&0 + 90326429:479392202&4&0 + 34051254:146783427&2&0 + 44090675:343089231&0&0 + 49604511:248139084&3&0 + 24346272:848320174&0&0 + 39097823:700206507&2&0 + 17307475:921652843&5&0 + 7940265:732052199&0&0 + 12792478:599931585&0&0 + 80699596:295049809&2&0 + 30520570:686353374&3&0 + 34415669:157985628&2&0 + 67473173:282206995&5&0 + 66945097:157708511&1&0 + 84834024:841200594&3&0 + 60399439:867611170&1&0 + 47109679:5652390&5&0 + 22449547:21814022&4&0 + 39738078:45613849&0&0 + 43198439:662808461&0&0 + 70668519:507904197&1&0 + 22437586:91331933&3&0 + 61328978:899602754&3&0 + 91612396:751983860&6&0 + 86157015:284717318&3&0 + 84904995:767743372&6&0 + 85995297:256528459&1&0 + 36198277:46533019&0&0 + 94276620:842637751&0&0 + 81756244:76609808&6&0 + 10912081:299073308&2&0 + 54288911:58962140&2&0 + 80006895:539336678&4&0 + 86451448:74852495&4&0 + 63931155:270819007&0&0 + 58121181:822669751&2&0 + 81476270:156567648&5&0 + 26306723:562780628&2&0 + 26207377:652343612&1&0 + 57095477:49273425&1&0 + 87443245:251462366&1&0 + 85995297:256528459&3&0 + 44881433:220990270&0&0 + 52774319:934997659&0&0 + 90508784:978748912&7&0 + 13678402:447525740&6&0 + 62416945:197441685&2&0 + 50128363:284850957&8&0 + 17130005:271191290&1&0 + 26138914:698492330&1&0 + 83950070:415419733&5&0 + 40443723:938389955&5&0 + 18925324:658192454&2&0 + 35995149:297934460&0&0 + 53442228:374108430&3&0 + 23333846:575959031&6&0 + 44485622:208425683&0&0 + 8429722:117818296&3&0 + 57906673:134380185&0&0 + 26138914:698492330&5&0 + 99986991:910170617&2&0 + 78291109:390943293&4&0 + 89357225:96714883&4&0 + 43111670:582733005&2&0 + 80255325:237774223&3&0 + 57906673:134380185&5&0 + 85733564:663876045&6&0 + 90096071:635671989&1&0 + 25347634:720073187&6&0 + 44953983:866115077&5&0 + 18822093:257648120&2&0 + 11736920:293374434&7&0 + 65362834:380490486&6&0 + 58736725:737810966&0&0 + 10718440:862268826&4&0 + 17011630:240212798&0&0 + 53412846:338117810&7&0 + 79010859:477622599&4&0 + 48066323:255817938&5&0 + 74176864:6819931&0&0 + 59400525:37380364&5&0 + 77408000:80794786&5&0 + 46324975:493014007&1&0 + 73149984:677669331&3&0 + 17872801:743644570&2&0 + 34415669:157985628&1&0 + 44782292:24646573&4&0 + 49240476:669690989&3&0 + 81307853:323317653&5&0 + 33796722:672149182&3&0 + 25285542:482312728&0&0 + 40515987:99439735&2&0 + 82631241:215531913&8&0 + 56910010:652586984&4&0 + 61460149:528663121&1&0 + 59761096:747786818&1&0 + 43198723:303923997&2&0 + 92305231:289072305&2&0 + 98124860:878043876&4&0 + 94216343:320489309&1&0 + 52469998:590911777&2&0 + 33407839:558615386&2&0 + 31490945:628212703&7&0 + 3532053:557574456&7&0 + 76711123:726781890&2&0 + 96071626:992715291&4&0 + 6716617:307347947&0&0 + 16436965:771028762&1&0 + 29876640:14470514&0&0 + 24378512:643438192&5&0 + 76735123:941678155&4&0 + 93887629:500769389&3&0 + 53412846:338117810&1&0 + 71772628:879293175&1&0 + 89188064:400686934&4&0 + 35121596:578268086&0&0 + 61678898:882739587&2&0 + 28554934:5179969&3&0 + 43195723:554863413&4&0 + 39436817:570472461&4&0 + 73149984:677669331&1&0 + 98666811:206440428&3&0 + 12019596:768955185&3&0 + 47771961:238328621&1&0 + 72109484:180208828&0&0 + 56367326:635551081&0&0 + 56355057:984298232&4&0 + 24832388:811203391&3&0 + 12779856:957438307&4&0 + 47335432:985984618&1&0 + 72063454:558851416&2&0 + 11736920:293374434&4&0 + 80069366:584660319&2&0 + 99842243:942096645&4&0 + 21088777:580911428&0&0 + 78291109:390943293&6&0 + 63190338:539555088&7&0 + 73545882:550927034&3&0 + 65095917:343472350&1&0 + 60064285:524769913&0&0 + 90075179:164776514&4&0 + 62053396:88696472&0&0 + 18071841:977748881&2&0 + 94643809:143128836&1&0 + 82321678:269000813&6&0 + 47342112:257386546&1&0 + 91935567:565698614&2&0 + 50578643:849947573&2&0 + 81046958:810133979&3&0 + 85733564:663876045&8&0 + 80952875:582053915&2&0 + 30764731:669089127&1&0 + 14451740:824520616&0&0 + 72828953:584713687&5&0 + 72063454:558851416&8&0 + 4361610:538004056&4&0 + 44881433:220990270&7&0 + 83950070:415419733&7&0 + 72063454:558851416&0&0 + 80427722:479482848&2&0 + 81210421:313085869&4&0 + 44560329:295420442&4&0 + 60399439:867611170&5&0 + 33513632:650694600&2&0 + 20128250:722747907&1&0 + 99247:980149413&0&0 + 45832521:483204127&1&0 + 51148580:919061695&0&0 + 92539348:968187617&0&0 + 26138914:698492330&3&0 + 80255325:237774223&0&0 + 87479706:36726780&7&0 + 59304564:855907470&0&0 + 75840307:566220872&0&0 + 3320498:743283783&0&0 + 10262184:160718981&2&0 + 11757315:383214052&1&0 + 18932861:248967195&3&0 + 65617926:204033633&6&0 + 43375304:628445689&3&0 + 20728829:488225281&2&0 + 55523564:759342342&5&0 + 3605565:131483740&7&0 + 20128250:722747907&2&0 + 72828953:584713687&6&0 + 94493667:576440991&1&0 + 73545882:550927034&1&0 + 98628510:519132078&4&0 + 7857675:28742616&4&0 + 87846747:53527409&0&0 + 8429722:117818296&1&0 + 69030160:685229886&4&0 + 95470049:537113142&0&0 + 88758911:732300092&1&0 + 980829:856644254&1&0 + 80699596:295049809&0&0 + 28554934:5179969&1&0 + 80013650:102651723&7&0 + 45889263:861295166&1&0 + 9958572:479315549&1&0 + 82783201:654508086&0&0 + 73936828:572258359&2&0 + 65095917:343472350&8&0 + 46808018:336582676&2&0 + 46324975:493014007&3&0 + 7025462:66414340&1&0 + 63271831:810442761&7&0 + 22004771:538942275&5&0 + 86184757:353207740&2&0 + 10211847:108678674&3&0 + 88554204:902099442&1&0 + 38385044:451635099&2&0 + 3473124:197367451&6&0 + 14468074:476007880&1&0 + 4002043:406702190&1&0 + 33915602:115995259&1&0 + 97184810:598826627&4&0 + 66630681:350186035&2&0 + 96502711:831176956&3&0 + 57095477:49273425&2&0 + 45957916:710921358&4&0 + 13417156:59130835&0&0 + 1173145:804017165&2&0 + 77966981:649378990&1&0 + 29465223:259197434&1&0 + 43658060:564497493&1&0 + 7598529:92723566&8&0 + 45325818:757845469&5&0 + 18932861:248967195&2&0 + 80442241:270231675&1&0 + 15819909:459944650&3&0 + 57562731:820469772&3&0 + 76752891:299820811&6&0 + 90508784:978748912&5&0 + 27491562:354199743&0&0 + 7850646:133651823&0&0 + 91593406:852651754&2&0 + 36580982:275911708&3&0 + 47771961:238328621&0&0 + 48218524:204265440&0&0 + 64015693:886848702&7&0 + 3635379:230692518&0&0 + 17011630:240212798&1&0 + 44953983:866115077&4&0 + 5979461:117921763&4&0 + 23333846:575959031&2&0 + 78291109:390943293&2&0 + 56868152:640567118&2&0 + 73042265:446129524&2&0 + 73871399:826196232&4&0 + 36222496:482298410&0&0 + 81320605:90473249&4&0 + 39738078:45613849&2&0 + 89503186:878988103&3&0 + 47510292:774149565&1&0 + 41711686:119124692&0&0 + 80442241:270231675&4&0 + 43375304:628445689&0&0 + 65285717:244145031&0&0 + 54045147:80242617&5&0 + 95041258:745948755&5&0 + 8371874:517008461&0&0 + 82913494:615212745&2&0 + 96502711:831176956&5&0 + 61512395:268246479&5&0 + 50128363:284850957&3&0 + 34651805:97557080&6&0 + 82762712:894811689&4&0 + 85058382:571466364&2&0 + 19704814:290603674&4&0 + 15399094:948770580&3&0 + 59025297:456814031&8&0 + 15819909:459944650&0&0 + 60200852:979593575&8&0 + 49240476:669690989&6&0 + 53987158:596068559&4&0 + 70991407:799821548&1&0 + 24098346:359515341&5&0 + 80715881:581362471&6&0 + 99247:980149413&1&0 + 98868935:587384485&2&0 + 73042265:446129524&3&0 + 84742010:130106102&0&0 + 20365333:405729388&5&0 + 46808018:336582676&5&0 + 97270881:91385809&4&0 + 44560329:295420442&3&0 + 89771981:701416033&0&0 + 10221086:708109010&4&0 + 37991815:504720419&1&0 + 98269968:342498205&3&0 + 81490586:118219082&4&0 + 81320605:90473249&3&0 + 50602025:559678799&1&0 + 3605565:131483740&5&0 + 7809298:903959913&0&0 + 11736920:293374434&3&0 + 63729697:265246888&2&0 + 96695706:704933404&1&0 + 85995297:256528459&2&0 + 70408006:711179145&0&0 + 26774964:953585124&4&0 + 49154239:880099697&5&0 + 93830543:355644973&6&0 + 23333846:575959031&0&0 + 98269968:342498205&1&0 + 2507552:38350344&1&0 + 984049:483901572&2&0 + 51712898:317666207&7&0 + 65617926:204033633&2&0 + 43135756:992124594&2&0 + 82621112:629155746&0&0 + 13446013:369194742&6&0 + 69000118:268012364&6&0 + 34839256:826322060&3&0 + 70734204:99428613&3&0 + 89771981:701416033&2&0 + 76271183:785907918&2&0 + 83211644:742859029&1&0 + 89778255:397729291&3&0 + 31966866:496175072&7&0 + 49148247:892059898&3&0 + 56122919:43052528&3&0 + 38828138:75574381&4&0 + 9661309:520108364&5&0 + 61342731:246236836&3&0 + 52515016:412430656&7&0 + 55910578:615174414&3&0 + 94415205:458274448&4&0 + 92909646:3930968&0&0 + 33865818:786355636&0&0 + 19706683:118758656&2&0 + 93886887:222120256&4&0 + 10211847:108678674&2&0 + 12313712:542956627&1&0 + 51381452:363401409&0&0 + 69987878:478394706&1&0 + 76752891:299820811&2&0 + 98807218:361607752&7&0 + 28497425:257098687&4&0 + 51058391:273493813&3&0 + 30357532:115565152&0&0 + 95634901:59063091&5&0 + 58780166:97498602&2&0 + 72828953:584713687&1&0 + 15600072:946000325&0&0 + 94628364:116475216&3&0 + 25285542:482312728&1&0 + 59322751:797251769&0&0 + 31966866:496175072&3&0 + 7025462:66414340&3&0 + 39097823:700206507&7&0 + 69987878:478394706&3&0 + 31966866:496175072&1&0 + 65744787:251216899&6&0 + 8077408:486944454&0&0 + 44560329:295420442&0&0 + 2804255:949350001&0&0 + 87811192:243970863&3&0 + 79645721:394251089&1&0 + 91717739:892891474&1&0 + 33505131:269951519&0&0 + 18603837:202398774&5&0 + 37142307:404379547&0&0 + 65744787:251216899&2&0 + 47109679:5652390&6&0 + 14468397:543502230&0&0 + 36198277:46533019&2&0 + 26246486:906412836&7&0 + 21012542:575718479&1&0 + 57399441:574346626&5&0 + 8570864:280829001&0&0 + 44881433:220990270&4&0 + 29876640:14470514&2&0 + 26801831:876221698&6&0 + 47342112:257386546&0&0 + 90521109:601133989&0&0 + 46789695:572165705&3&0 + 60080178:449210772&5&0 + 95200174:697000883&0&0 + 83705003:798441379&2&0 + 17711727:259518441&4&0 + 24176974:995328674&0&0 + 24310608:493410247&2&0 + 78330832:171496180&1&0 + 73936828:572258359&7&0 + 81974220:715744766&3&0 + 13693527:745147566&0&0 + 31339622:94005024&2&0 + 18932861:248967195&5&0 + 88267404:152780511&7&0 + 20728829:488225281&1&0 + 72109484:180208828&1&0 + 12537680:869375833&5&0 + 40443723:938389955&2&0 + 91593406:852651754&7&0 + 81756244:76609808&8&0 + 603534:903878232&1&0 + 44881433:220990270&8&0 + 80845792:337623149&2&0 + 72311045:525053734&4&0 + 36999708:558428301&1&0 + 87706904:194426816&1&0 + 29486221:352891995&0&0 + 28196376:955082045&3&0 + 86752384:691090977&1&0 + 70289966:246764374&0&0 + 57562731:820469772&1&0 + 42923230:468988641&0&0 + 73545484:102151889&7&0 + 70668519:507904197&3&0 + 49240476:669690989&5&0 + 7449595:999493580&5&0 + 51246491:77865810&0&0 + 24363729:931822610&1&0 + 15359195:473941356&0&0 + 90034775:766779686&2&0 + 49154239:880099697&3&0 + 57906673:134380185&1&0 + 14468074:476007880&6&0 + 3473124:197367451&8&0 + 50128363:284850957&6&0 + 24362643:580293325&2&0 + 18737604:340823539&4&0 + 89633329:116921545&7&0 + 39097823:700206507&4&0 + 81320605:90473249&2&0 + 90536414:547747038&2&0 + 71013443:229044980&0&0 + 25347634:720073187&5&0 + 82762712:894811689&8&0 + 29676985:956475462&4&0 + 11138744:261195763&7&0 + 83224862:719102167&5&0 + 72311045:525053734&0&0 + 49228676:90120193&2&0 + 43195723:554863413&3&0 + 11558628:887403567&0&0 + 3605565:131483740&6&0 + 37142307:404379547&1&0 + 3635379:230692518&1&0 + 96695706:704933404&3&0 + 5618809:197855825&6&0 + 75964724:178394015&6&0 + 85058382:571466364&0&0 + 39528031:852171322&2&0 + 52606765:416027911&5&0 + 69109134:798600716&0&0 + 13446013:369194742&4&0 + 98970477:148387964&3&0 + 59400525:37380364&0&0 + 58890455:598289383&2&0 + 31935567:622957495&1&0 + 99247:980149413&3&0 + 61512395:268246479&4&0 + 47136963:959239562&7&0 + 91127077:413824363&2&0 + 67238651:554304974&0&0 + 79312448:119747799&1&0 + 62344657:852184446&0&0 + 90861661:377164183&3&0 + 8703246:828088125&6&0 + 22449547:21814022&5&0 + 13417156:59130835&2&0 + 36198277:46533019&5&0 + 4002043:406702190&3&0 + 12779856:957438307&2&0 + 47136963:959239562&0&0 + 12498479:749511858&3&0 + 73149984:677669331&2&0 + 75964724:178394015&5&0 + 60200852:979593575&7&0 + 15912561:356560921&3&0 + 27417896:531683550&5&0 + 36586454:785680337&0&0 + 47335432:985984618&6&0 + 36475639:150298750&0&0 + 81320605:90473249&0&0 + 19704814:290603674&0&0 + 70668519:507904197&2&0 + 51615122:306136635&0&0 + 94276620:842637751&3&0 + 88080713:672772877&0&0 + 94055752:200828895&0&0 + 97762062:510060263&6&0 + 39528031:852171322&1&0 + 91464935:321828651&6&0 + 72109484:180208828&2&0 + 86930254:8171039&4&0 + 43111670:582733005&3&0 + 4361610:538004056&6&0 + 45670446:13274522&2&0 + 76711123:726781890&0&0 + 72063454:558851416&1&0 + 91717739:892891474&4&0 + 98356898:511632245&1&0 + 85829140:808603613&5&0 + 31490945:628212703&4&0 + 98356898:511632245&2&0 + 73570584:509185366&5&0 + 51381452:363401409&3&0 + 59958151:354869725&2&0 + 78661119:899233291&2&0 + 81307853:323317653&6&0 + 26246486:906412836&3&0 + 7809298:903959913&2&0 + 95041258:745948755&2&0 + 60200852:979593575&1&0 + 33407839:558615386&3&0 + 83705003:798441379&4&0 + 85733564:663876045&3&0 + 36789634:662824796&5&0 + 83452622:378063536&3&0 + 3473124:197367451&0&0 + 50128363:284850957&2&0 + 30452630:493217909&6&0 + 31935567:622957495&2&0 + 72424811:659534862&1&0 + 30484610:339944899&3&0 + 43135756:992124594&0&0 + 30484610:339944899&0&0 + 54288911:58962140&6&0 + 97762062:510060263&4&0 + 93985935:867478104&4&0 + 14859108:184675170&1&0 + 7256242:251275077&1&0 + 8348663:925846920&3&0 + 8957654:762419159&1&0 + 93565100:601725646&1&0 + 52515016:412430656&0&0 + 31185492:727375067&0&0 + 89357225:96714883&2&0 + 95041258:745948755&1&0 + 80069366:584660319&0&0 + 51615122:306136635&1&0 + 89503186:878988103&2&0 + 68267108:271765889&0&0 + 79126099:855166627&1&0 + 5438539:954887323&2&0 + 10198934:426173913&0&0 + 27170244:552163830&1&0 + 38246054:596394882&0&0 + 66253237:965950612&4&0 + 15411925:53894564&0&0 + 61342731:246236836&8&0 + 94489661:823348737&1&0 + 48223600:703358029&1&0 + 53337305:527156895&4&0 + 603534:903878232&7&0 + 77966981:649378990&6&0 + 91464935:321828651&1&0 + 36094705:71328450&1&0 + 43596961:420778248&1&0 + 36531221:465544958&3&0 + 75263372:318059986&0&0 + 86451448:74852495&3&0 + 30357532:115565152&5&0 + 39212191:907959215&4&0 + 15819909:459944650&4&0 + 50974342:597095063&1&0 + 25354103:499890095&2&0 + 10912081:299073308&7&0 + 74176864:6819931&3&0 + 55837993:14495058&0&0 + 16765477:372594907&1&0 + 79312448:119747799&2&0 + 78291109:390943293&1&0 + 97792263:695076704&0&0 + 20733614:921257806&4&0 + 92909646:3930968&1&0 + 99815723:590977381&2&0 + 41272609:987790531&1&0 + 10221086:708109010&3&0 + 24217312:185633128&1&0 + 61328978:899602754&6&0 + 80699596:295049809&1&0 + 51712898:317666207&4&0 + 25918106:961366538&6&0 + 90053119:548085600&3&0 + 67791119:50931034&4&0 + 36647735:47858640&4&0 + 98961310:319784340&3&0 + 75152068:785226902&1&0 + 80006895:539336678&0&0 + 15359195:473941356&3&0 + 70289966:246764374&1&0 + 45889263:861295166&2&0 + 49398852:890280265&3&0 + 7809298:903959913&3&0 + 60013458:575282869&4&0 + 12019596:768955185&2&0 + 98807218:361607752&0&0 + 99833532:500564025&6&0 + 8703246:828088125&0&0 + 10198934:426173913&1&0 + 12779856:957438307&0&0 + 49883953:73861939&0&0 + 12313712:542956627&7&0 + 12442907:500345555&0&0 + 71198215:541233157&1&0 + 35483519:599366092&2&0 + 92909646:3930968&3&0 + 50831868:929132209&2&0 + 63905820:224098428&1&0 + 12489037:588374360&2&0 + 22084553:161228022&0&0 + 27051694:2092817&5&0 + 33796722:672149182&1&0 + 52606765:416027911&6&0 + 86930254:8171039&3&0 + 82292279:521629961&6&0 + 32683211:278821282&3&0 + 86808504:337494125&2&0 + 15912561:356560921&4&0 + 87077807:356406314&1&0 + 34415669:157985628&3&0 + 77396083:265361724&2&0 + 63607488:414440767&1&0 + 54383211:453535299&1&0 + 87953555:558848768&1&0 + 59285880:603762424&0&0 + 23333846:575959031&5&0 + 70881824:471329692&3&0 + 7598529:92723566&1&0 + 47342112:257386546&2&0 + 81046958:810133979&1&0 + 34651805:97557080&3&0 + 74874855:576341906&1&0 + 20128250:722747907&4&0 + 98628510:519132078&2&0 + 47136963:959239562&8&0 + 43375304:628445689&5&0 + 23719883:37149617&7&0 + 41381137:711848921&1&0 + 67206790:870511274&1&0 + 87443245:251462366&5&0 + 89180997:205955188&0&0 + 67791119:50931034&1&0 + 31044545:3085923&1&0 + 3473124:197367451&4&0 + 16765477:372594907&3&0 + 53337305:527156895&0&0 + 36647735:47858640&2&0 + 50357:36778149&2&0 + 4506699:932037770&2&0 + 24346272:848320174&3&0 + 50128363:284850957&7&0 + 17711727:259518441&1&0 + 56355057:984298232&0&0 + 14509050:708282746&2&0 + 63271831:810442761&5&0 + 92305231:289072305&1&0 + 13693527:745147566&3&0 + 42503311:427261511&3&0 + 71772628:879293175&2&0 + 60013458:575282869&0&0 + 73183888:803569686&0&0 + 95017381:969896974&1&0 + 92909646:3930968&4&0 + 20728829:488225281&3&0 + 30797047:115487974&6&0 + 25918106:961366538&7&0 + 91938100:198737700&6&0 + 62044501:825326152&0&0 + 4002043:406702190&0&0 + 89695712:867781539&1&0 + 39268836:42129101&2&0 + 40403702:309013849&0&0 + 27377446:716188923&1&0 + 36587580:228451807&0&0 + 41711686:119124692&4&0 + 22066498:596848252&1&0 + 25272781:737704078&4&0 + 79010859:477622599&3&0 + 40977332:868266272&3&0 + 24176974:995328674&6&0 + 85405701:29449864&0&0 + 43198439:662808461&3&0 + 62053396:88696472&2&0 + 13747057:28246965&3&0 + 33513632:650694600&4&0 + 17095008:149761520&0&0 + 603534:903878232&2&0 + 25272781:737704078&0&0 + 87479706:36726780&8&0 + 87811192:243970863&2&0 + 76752891:299820811&1&0 + 88758911:732300092&4&0 + 24378512:643438192&3&0 + 24772216:857995420&1&0 + 98666811:206440428&1&0 + 97106732:300324714&1&0 + 45957916:710921358&5&0 + 58248124:777519906&1&0 + 56222762:962632857&2&0 + 57886993:212586771&1&0 + 24098346:359515341&1&0 + 7598529:92723566&5&0 + 17011630:240212798&8&0 + 97184810:598826627&2&0 + 98961310:319784340&5&0 + 47757850:199707874&0&0 + 51712898:317666207&3&0 + 42503311:427261511&4&0 + 52515016:412430656&4&0 + 88280032:964183211&5&0 + 35432003:282835475&2&0 + 72275014:131214035&4&0 + 81210421:313085869&2&0 + 6775183:346361619&1&0 + 89831741:878651007&1&0 + 36999708:558428301&6&0 + 71198215:541233157&5&0 + 13417156:59130835&1&0 + 14468074:476007880&3&0 + 63853711:880273408&2&0 + 41523163:330921150&0&0 + 1812760:222467758&1&0 + 37112930:282552949&0&0 + 81046958:810133979&2&0 + 91851586:307300978&4&0 + 36985293:339387194&0&0 + 75964724:178394015&3&0 + 23719883:37149617&3&0 + 603534:903878232&0&0 + 25272781:737704078&2&0 + 3473124:197367451&5&0 + 83224862:719102167&0&0 + 78086494:834446809&6&0 + 95089663:763144708&0&0 + 82762712:894811689&3&0 + 18603837:202398774&4&0 + 79077778:891358269&2&0 + 41736786:508203586&1&0 + 29644936:620390517&0&0 + 44449704:961196104&1&0 + 17130005:271191290&0&0 + 60080178:449210772&2&0 + 50482922:317040887&8&0 + 31684459:285426099&0&0 + 42923230:468988641&6&0 + 48066323:255817938&0&0 + 20573922:978527726&5&0 + 28196376:955082045&5&0 + 96502711:831176956&7&0 + 13848200:399773117&1&0 + 8957654:762419159&0&0 + 72236572:366846956&1&0 + 81481199:950665955&1&0 + 81974220:715744766&8&0 + 64015693:886848702&1&0 + 86930254:8171039&7&0 + 81490586:118219082&0&0 + 4100953:608638082&1&0 + 33513632:650694600&7&0 + 79713776:956513829&0&0 + 87443245:251462366&6&0 + 25354103:499890095&0&0 + 82762712:894811689&1&0 + 94415205:458274448&3&0 + 13969316:174981079&4&0 + 88117611:411183920&1&0 + 27377446:716188923&2&0 + 73149984:677669331&5&0 + 327089:61498742&1&0 + 47335432:985984618&3&0 + 81476270:156567648&4&0 + 37218570:381618017&1&0 + 89188064:400686934&2&0 + 1662198:99105714&1&0 + 36094705:71328450&2&0 + 82292279:521629961&2&0 + 28581534:230540665&3&0 + 26158676:93276038&0&0 + 28497425:257098687&3&0 + 69000118:268012364&8&0 + 34651805:97557080&5&0 + 14318176:591914522&0&0 + 29423857:674311569&0&0 + 17565416:593803371&2&0 + 61342731:246236836&4&0 + 2613166:672964070&0&0 + 97380991:698920611&0&0 + 92792097:813125155&1&0 + 6942420:268694617&1&0 + 63295000:105176636&4&0 + 89695712:867781539&4&0 + 67206790:870511274&4&0 + 6378094:493422886&2&0 + 27417896:531683550&1&0 + 18925324:658192454&3&0 + 97792263:695076704&5&0 + 49228676:90120193&0&0 + 92388719:152303867&2&0 + 82292279:521629961&4&0 + 12442907:500345555&2&0 + 53808293:694574130&3&0 + 52754617:121414498&2&0 + 43538946:567733508&3&0 + 17095008:149761520&2&0 + 45957916:710921358&8&0 + 72044029:940353344&4&0 + 7598529:92723566&2&0 + 49779369:62149871&2&0 + 20128250:722747907&7&0 + 54877703:634079928&5&0 + 14981801:6984071&4&0 + 27417896:531683550&2&0 + 23061216:99434729&6&0 + 70991407:799821548&0&0 + 53337305:527156895&6&0 + 38526768:604092377&2&0 + 98628510:519132078&7&0 + 88758911:732300092&2&0 + 6986540:511897738&1&0 + 87314087:334750260&3&0 + 90508784:978748912&0&0 + 44953983:866115077&3&0 + 54996919:77055753&1&0 + 68652349:438144715&4&0 + 36653790:724513128&2&0 + 38828138:75574381&0&0 + 95041258:745948755&0&0 + 10912081:299073308&3&0 + 19645170:625184018&2&0 + 97766795:513119457&0&0 + 20573922:978527726&3&0 + 34651805:97557080&8&0 + 24098346:359515341&2&0 + 76271183:785907918&0&0 + 19704814:290603674&6&0 + 95181848:714820342&1&0 + 99815723:590977381&5&0 + 95101780:576073953&3&0 + 26306723:562780628&3&0 + 48066323:255817938&6&0 + 94415205:458274448&2&0 + 13969316:174981079&0&0 + 92539348:968187617&5&0 + 39738078:45613849&1&0 + 12313712:542956627&0&0 + 95017381:969896974&2&0 + 84827871:935078132&0&0 + 89695712:867781539&5&0 + 63271831:810442761&1&0 + 86451448:74852495&5&0 + 45423397:984954387&3&0 + 30484610:339944899&7&0 + 32118984:867936417&1&0 + 79126099:855166627&5&0 + 2804255:949350001&5&0 + 80715881:581362471&1&0 + 84896287:714382773&1&0 + 42503311:427261511&2&0 + 82126941:34276294&1&0 + 47136963:959239562&5&0 + 56056086:479286098&2&0 + 94643809:143128836&2&0 + 90508784:978748912&4&0 + 72073917:875549213&1&0 + 97184810:598826627&1&0 + 394890:702464926&8&0 + 94695079:929047434&1&0 + 24362643:580293325&0&0 + 94427100:345106252&7&0 + 67651365:715596256&6&0 + 15359195:473941356&4&0 + 39395160:218721735&2&0 + 95634901:59063091&6&0 + 60080178:449210772&7&0 + 34051254:146783427&0&0 + 60057868:952566551&0&0 + 78272570:576029226&2&0 + 50128363:284850957&0&0 + 26927277:639887490&0&0 + 13940912:909214727&4&0 + 86718175:345640919&2&0 + 30797047:115487974&8&0 + 66253237:965950612&3&0 + 47748671:588774578&1&0 + 98807218:361607752&1&0 + 6378094:493422886&0&0 + 18876924:818582999&2&0 + 22437586:91331933&2&0 + 14981801:6984071&1&0 + 33796722:672149182&2&0 + 87479706:36726780&4&0 + 90027761:755365112&1&0 + 15269918:261254233&0&0 + 12125866:427070376&0&0 + 98376692:994240422&3&0 + 52606765:416027911&2&0 + 13940912:909214727&7&0 + 45423397:984954387&2&0 + 72424811:659534862&6&0 + 56141369:829312773&1&0 + 95203911:601016938&0&0 + 14981801:6984071&3&0 + 77408000:80794786&1&0 + 38526768:604092377&5&0 + 93985935:867478104&2&0 + 54996919:77055753&0&0 + 31339622:94005024&0&0 + 57886993:212586771&3&0 + 89567267:335873132&3&0 + 15143259:558664371&6&0 + 57886993:212586771&4&0 + 47996608:352927170&4&0 + 81365760:76386875&3&0 + 23038701:513125022&6&0 + 14492160:265031588&0&0 + 79645721:394251089&0&0 + 56141369:829312773&2&0 + 37703625:857959686&2&0 + 96695706:704933404&2&0 + 5835305:685442396&2&0 + 12779856:957438307&3&0 + 23061216:99434729&8&0 + 73936828:572258359&6&0 + 28554934:5179969&5&0 + 14451740:824520616&7&0 + 72423404:42352706&2&0 + 14468074:476007880&5&0 + 94276620:842637751&6&0 + 5438539:954887323&0&0 + 73570584:509185366&0&0 + 74812757:886509217&4&0 + 14451740:824520616&5&0 + 3605565:131483740&4&0 + 52392849:874508459&0&0 + 2052336:295787981&0&0 + 1476651:489417849&0&0 + 1173145:804017165&5&0 + 58013677:80506859&6&0 + 47342112:257386546&4&0 + 40515987:99439735&6&0 + 88267404:152780511&6&0 + 36789634:662824796&2&0 + 10262184:160718981&3&0 + 55523564:759342342&0&0 + 24217312:185633128&2&0 + 86184757:353207740&1&0 + 24176974:995328674&2&0 + 90141696:521979907&3&0 + 16765477:372594907&2&0 + 14509050:708282746&0&0 + 5824912:308260858&2&0 + 33796722:672149182&5&0 + 54051774:64602469&4&0 + 15801671:171539023&1&0 + 40403702:309013849&2&0 + 84040284:507452327&0&0 + 84369792:875833206&2&0 + 15143259:558664371&3&0 + 95203911:601016938&6&0 + 3532053:557574456&5&0 + 37947746:717603805&0&0 + 5618809:197855825&1&0 + 37178590:132077611&3&0 + 44598616:444589273&2&0 + 39496198:945080868&1&0 + 91612396:751983860&5&0 + 51499587:698927989&3&0 + 21012542:575718479&3&0 + 26801831:876221698&5&0 + 90481479:287890425&2&0 + 327089:61498742&2&0 + 1812760:222467758&7&0 + 32118984:867936417&8&0 + 59850985:154711861&0&0 + 10718440:862268826&1&0 + 49240476:669690989&4&0 + 36531221:465544958&1&0 + 88280032:964183211&0&0 + 60200852:979593575&5&0 + 94493667:576440991&5&0 + 69170173:300151437&1&0 + 18925324:658192454&4&0 + 67113011:779026478&3&0 + 45767543:834840580&2&0 + 61008441:281928562&0&0 + 96071626:992715291&5&0 + 59322751:797251769&2&0 + 71655861:942015124&2&0 + 89831741:878651007&0&0 + 78291109:390943293&5&0 + 80845792:337623149&4&0 + 31684459:285426099&1&0 + 80013650:102651723&5&0 + 94276106:262453525&4&0 + 52979381:920137846&1&0 + 70668519:507904197&0&0 + 67238651:554304974&5&0 + 53302195:957822333&0&0 + 14318176:591914522&5&0 + 65282844:748512676&1&0 + 74723689:42235917&3&0 + 61132974:165535142&1&0 + 980829:856644254&5&0 + 90326429:479392202&3&0 + 97184810:598826627&6&0 + 39344453:2844854&2&0 + 53302195:957822333&4&0 + 96429072:241981909&4&0 + 91975281:465006911&0&0 + 19613859:704336702&2&0 + 26129164:375963359&6&0 + 53324128:609320531&5&0 + 62256323:763647343&0&0 + 13446013:369194742&2&0 + 95203911:601016938&4&0 + 63772983:913544890&4&0 + 95101780:576073953&1&0 + 87314087:334750260&8&0 + 79077778:891358269&1&0 + 16439035:301098169&3&0 + 59761096:747786818&5&0 + 7809298:903959913&4&0 + 44953983:866115077&0&0 + 24362643:580293325&6&0 + 9546976:734325171&1&0 + 75840307:566220872&1&0 + 54714557:119789008&4&0 + 47695679:473571694&6&0 + 82321678:269000813&3&0 + 84369792:875833206&1&0 + 1173145:804017165&6&0 + 63271831:810442761&8&0 + 93830543:355644973&7&0 + 67523238:697508437&2&0 + 23719883:37149617&4&0 + 61328978:899602754&8&0 + 37991815:504720419&5&0 + 48066323:255817938&7&0 + 58248124:777519906&2&0 + 42984731:930599485&1&0 + 44228044:620251323&2&0 + 44881433:220990270&2&0 + 33882509:498489183&1&0 + 71772628:879293175&0&0 + 62344657:852184446&4&0 + 67206790:870511274&6&0 + 26129164:375963359&5&0 + 44228044:620251323&3&0 + 63607488:414440767&3&0 + 50866553:408454502&0&0 + 36094705:71328450&3&0 + 76271183:785907918&3&0 + 35483519:599366092&4&0 + 56184893:691229767&1&0 + 99986991:910170617&0&0 + 60013458:575282869&3&0 + 98356898:511632245&7&0 + 72834250:466779877&1&0 + 42099671:567889019&0&0 + 84998:65122273&2&0 + 75076890:389362&1&0 + 82586644:976358841&0&0 + 27417896:531683550&3&0 + 56122919:43052528&6&0 + 30357532:115565152&7&0 + 56524920:276851159&0&0 + 69855691:594661188&1&0 + 41711686:119124692&2&0 + 98101480:644789386&0&0 + 25510203:423901950&0&0 + 2804255:949350001&6&0 + 15600072:946000325&2&0 + 49938088:419589252&0&0 + 28497425:257098687&1&0 + 79126099:855166627&6&0 + 29876640:14470514&6&0 + 66630681:350186035&0&0 + 11558628:887403567&6&0 + 97270881:91385809&5&0 + 64015693:886848702&2&0 + 22066498:596848252&2&0 + 13747057:28246965&1&0 + 83224862:719102167&4&0 + 5590704:242473685&2&0 + 3532053:557574456&8&0 + 59958151:354869725&3&0 + 79284588:443146164&1&0 + 91938100:198737700&0&0 + 20573922:978527726&1&0 + 24990550:701579152&2&0 + 96071626:992715291&1&0 + 53187669:801736771&3&0 + 79284588:443146164&5&0 + 81974220:715744766&2&0 + 12313712:542956627&4&0 + 61933829:8127087&2&0 + 25773355:432246245&0&0 + 56429317:209917005&3&0 + 62416945:197441685&5&0 + 87314087:334750260&6&0 + 54775618:458522638&1&0 + 69855691:594661188&3&0 + 51499587:698927989&1&0 + 76661806:9321473&1&0 + 91593406:852651754&5&0 + 75076890:389362&0&0 + 47136963:959239562&2&0 + 22004771:538942275&2&0 + 67238651:554304974&6&0 + 98269968:342498205&2&0 + 98868935:587384485&1&0 + 68544258:743179312&0&0 + 72275014:131214035&2&0 + 68544258:743179312&4&0 + 53324128:609320531&1&0 + 98628510:519132078&6&0 + 80274960:294383912&0&0 + 7857675:28742616&6&0 + 86718175:345640919&1&0 + 91851586:307300978&3&0 + 2052336:295787981&2&0 + 91717739:892891474&5&0 + 50482922:317040887&0&0 + 12442907:500345555&4&0 + 52867992:64445255&5&0 + 25010830:69829189&0&0 + 79284588:443146164&0&0 + 81481199:950665955&0&0 + 91593406:852651754&3&0 + 74789224:922087904&1&0 + 85733564:663876045&0&0 + 42503311:427261511&7&0 + 11558628:887403567&1&0 + 87690282:479248800&2&0 + 48447057:884746929&1&0 + 27178729:695470776&1&0 + 85829140:808603613&8&0 + 11757315:383214052&3&0 + 36222496:482298410&4&0 + 81756244:76609808&2&0 + 61627279:33833788&4&0 + 56122919:43052528&8&0 + 29486221:352891995&3&0 + 25347634:720073187&0&0 + 59761096:747786818&3&0 + 16140571:981617709&3&0 + 19606849:453493174&2&0 + 81490586:118219082&5&0 + 58890455:598289383&3&0 + 89357225:96714883&5&0 + 15600072:946000325&3&0 + 83452622:378063536&5&0 + 89357225:96714883&0&0 + 54288911:58962140&5&0 + 85733564:663876045&1&0 + 56222762:962632857&0&0 + 95623061:999526611&0&0 + 80442241:270231675&0&0 + 7809298:903959913&1&0 + 56524920:276851159&4&0 + 17711727:259518441&0&0 + 48218524:204265440&2&0 + 4361610:538004056&7&0 + 56868152:640567118&1&0 + 90861661:377164183&6&0 + 56618482:86658284&2&0 + 21088777:580911428&3&0 + 75152068:785226902&2&0 + 72424811:659534862&8&0 + 31490945:628212703&6&0 + 20573922:978527726&2&0 + 73443745:72138644&0&0 + 10174974:120294830&2&0 + 87811192:243970863&1&0 + 50128363:284850957&5&0 + 65617926:204033633&5&0 + 56355057:984298232&1&0 + 23038701:513125022&8&0 + 94695079:929047434&0&0 + 98101480:644789386&1&0 + 77408000:80794786&4&0 + 49938088:419589252&6&0 + 67651365:715596256&0&0 + 24831969:679139335&3&0 + 23038701:513125022&2&0 + 8348663:925846920&4&0 + 12498479:749511858&7&0 + 61328978:899602754&4&0 + 22449547:21814022&2&0 + 19422185:559312438&0&0 + 40515987:99439735&3&0 diff --git a/servers/src/org/xtreemfs/sandbox/httperf/100files_urls b/servers/src/org/xtreemfs/sandbox/httperf/100files_urls new file mode 100644 index 0000000000000000000000000000000000000000..b28ab821d9850ef9d35724d4eaaf5030e0f053c4 --- /dev/null +++ b/servers/src/org/xtreemfs/sandbox/httperf/100files_urls @@ -0,0 +1,428 @@ +96071626:992715291&6&0 + 80013650:102651723&0&0 + 56122919:43052528&3&0 + 86752384:691090977&0&0 + 52515016:412430656&5&0 + 11736920:293374434&8&0 + 82586644:976358841&1&0 + 30452630:493217909&2&0 + 10211847:108678674&4&0 + 2507552:38350344&3&0 + 96071626:992715291&5&0 + 11736920:293374434&7&0 + 87811192:243970863&2&0 + 49779369:62149871&2&0 + 30894037:75955676&2&0 + 27377446:716188923&2&0 + 984049:483901572&7&0 + 40515987:99439735&6&0 + 59285880:603762424&0&0 + 44953983:866115077&2&0 + 34051254:146783427&4&0 + 34051254:146783427&5&0 + 59285880:603762424&1&0 + 42923230:468988641&1&0 + 86752384:691090977&2&0 + 72044029:940353344&4&0 + 42923230:468988641&6&0 + 56122919:43052528&1&0 + 61678898:882739587&0&0 + 61678898:882739587&1&0 + 95101780:576073953&3&0 + 79645721:394251089&1&0 + 10211847:108678674&2&0 + 30452630:493217909&3&0 + 984049:483901572&6&0 + 70991407:799821548&1&0 + 14509050:708282746&0&0 + 984049:483901572&5&0 + 37218570:381618017&3&0 + 22004771:538942275&0&0 + 30452630:493217909&5&0 + 78661119:899233291&4&0 + 10211847:108678674&3&0 + 82586644:976358841&0&0 + 19706683:118758656&1&0 + 14451740:824520616&0&0 + 19706683:118758656&6&0 + 14451740:824520616&1&0 + 29059882:874721379&2&0 + 56910010:652586984&1&0 + 34415669:157985628&2&0 + 39528031:852171322&2&0 + 56122919:43052528&7&0 + 96429072:241981909&2&0 + 12494935:932668955&5&0 + 44228044:620251323&0&0 + 56910010:652586984&5&0 + 68652349:438144715&6&0 + 2613166:672964070&4&0 + 19606849:453493174&0&0 + 61132974:165535142&2&0 + 56122919:43052528&8&0 + 95101780:576073953&0&0 + 34051254:146783427&1&0 + 36789634:662824796&0&0 + 89633329:116921545&2&0 + 96071626:992715291&1&0 + 18737604:340823539&3&0 + 34051254:146783427&7&0 + 19606849:453493174&4&0 + 81490586:118219082&4&0 + 68983075:619758957&0&0 + 44953983:866115077&3&0 + 89633329:116921545&7&0 + 14451740:824520616&3&0 + 28196376:955082045&1&0 + 44228044:620251323&4&0 + 38862054:672901032&0&0 + 57886993:212586771&4&0 + 56910010:652586984&3&0 + 18737604:340823539&2&0 + 89633329:116921545&6&0 + 40515987:99439735&8&0 + 59025297:456814031&5&0 + 27491562:354199743&2&0 + 42923230:468988641&0&0 + 40515987:99439735&4&0 + 95017381:969896974&1&0 + 37218570:381618017&2&0 + 28196376:955082045&6&0 + 3320498:743283783&0&0 + 794440:643333917&1&0 + 44953983:866115077&0&0 + 17011630:240212798&0&0 + 11736920:293374434&0&0 + 15600072:946000325&1&0 + 44953983:866115077&1&0 + 89633329:116921545&1&0 + 36475639:150298750&3&0 + 14451740:824520616&5&0 + 6740012:611323570&2&0 + 10973372:252564849&0&0 + 9009964:237260131&0&0 + 11736920:293374434&5&0 + 96071626:992715291&4&0 + 52515016:412430656&1&0 + 87811192:243970863&1&0 + 59025297:456814031&8&0 + 49779369:62149871&0&0 + 34415669:157985628&3&0 + 22004771:538942275&2&0 + 2507552:38350344&1&0 + 19606849:453493174&6&0 + 27377446:716188923&1&0 + 17565416:593803371&1&0 + 87811192:243970863&0&0 + 42923230:468988641&4&0 + 57886993:212586771&1&0 + 44228044:620251323&3&0 + 3635379:230692518&2&0 + 14509050:708282746&1&0 + 17565416:593803371&4&0 + 10973372:252564849&2&0 + 96695706:704933404&2&0 + 2507552:38350344&2&0 + 94276106:262453525&0&0 + 15600072:946000325&2&0 + 39496198:945080868&3&0 + 36475639:150298750&4&0 + 11736920:293374434&4&0 + 57886993:212586771&0&0 + 22004771:538942275&3&0 + 96429072:241981909&4&0 + 37218570:381618017&6&0 + 68544258:743179312&4&0 + 80013650:102651723&8&0 + 53187669:801736771&1&0 + 34415669:157985628&1&0 + 52515016:412430656&7&0 + 19606849:453493174&3&0 + 56122919:43052528&4&0 + 70434522:372193649&0&0 + 30452630:493217909&1&0 + 98970477:148387964&0&0 + 13956173:536937059&5&0 + 81490586:118219082&0&0 + 29059882:874721379&1&0 + 78661119:899233291&3&0 + 22004771:538942275&5&0 + 44228044:620251323&1&0 + 79645721:394251089&4&0 + 36789634:662824796&1&0 + 96695706:704933404&1&0 + 89633329:116921545&0&0 + 91604138:549050785&0&0 + 99986991:910170617&1&0 + 95017381:969896974&2&0 + 34415669:157985628&4&0 + 40515987:99439735&5&0 + 27377446:716188923&0&0 + 80013650:102651723&5&0 + 81490586:118219082&6&0 + 79645721:394251089&0&0 + 3635379:230692518&3&0 + 90027761:755365112&0&0 + 34415669:157985628&0&0 + 30894037:75955676&1&0 + 95017381:969896974&0&0 + 56122919:43052528&2&0 + 44228044:620251323&5&0 + 39496198:945080868&2&0 + 17011630:240212798&1&0 + 68652349:438144715&0&0 + 19706683:118758656&2&0 + 95101780:576073953&4&0 + 79645721:394251089&5&0 + 65285717:244145031&1&0 + 19706683:118758656&5&0 + 53187669:801736771&3&0 + 94276106:262453525&3&0 + 39496198:945080868&5&0 + 30452630:493217909&6&0 + 81490586:118219082&3&0 + 10973372:252564849&1&0 + 794440:643333917&0&0 + 14451740:824520616&8&0 + 34051254:146783427&3&0 + 86752384:691090977&1&0 + 984049:483901572&2&0 + 89633329:116921545&5&0 + 43827047:376424684&4&0 + 44953983:866115077&4&0 + 2931488:604747700&0&0 + 36789634:662824796&4&0 + 40515987:99439735&1&0 + 72044029:940353344&6&0 + 72044029:940353344&0&0 + 98970477:148387964&1&0 + 96071626:992715291&3&0 + 13956173:536937059&0&0 + 42923230:468988641&5&0 + 61678898:882739587&2&0 + 81235131:446790486&0&0 + 72044029:940353344&1&0 + 79645721:394251089&3&0 + 96071626:992715291&2&0 + 81490586:118219082&1&0 + 22004771:538942275&4&0 + 26158676:93276038&0&0 + 56910010:652586984&2&0 + 14509050:708282746&2&0 + 984049:483901572&1&0 + 40515987:99439735&0&0 + 70434522:372193649&1&0 + 96071626:992715291&0&0 + 14509050:708282746&3&0 + 61132974:165535142&1&0 + 95017381:969896974&3&0 + 2613166:672964070&0&0 + 56122919:43052528&0&0 + 15600072:946000325&8&0 + 96429072:241981909&0&0 + 52515016:412430656&2&0 + 87811192:243970863&3&0 + 28196376:955082045&4&0 + 34051254:146783427&0&0 + 89633329:116921545&8&0 + 53187669:801736771&2&0 + 90027761:755365112&2&0 + 37218570:381618017&0&0 + 59025297:456814031&7&0 + 52515016:412430656&6&0 + 45832521:483204127&0&0 + 72044029:940353344&2&0 + 72044029:940353344&7&0 + 96429072:241981909&3&0 + 80013650:102651723&2&0 + 81490586:118219082&5&0 + 68652349:438144715&4&0 + 57886993:212586771&5&0 + 95101780:576073953&1&0 + 28196376:955082045&3&0 + 68544258:743179312&0&0 + 44228044:620251323&2&0 + 68544258:743179312&2&0 + 13956173:536937059&1&0 + 80013650:102651723&4&0 + 96695706:704933404&0&0 + 59025297:456814031&6&0 + 30894037:75955676&4&0 + 78661119:899233291&0&0 + 17011630:240212798&6&0 + 80013650:102651723&3&0 + 10211847:108678674&0&0 + 65285717:244145031&2&0 + 49454891:478453187&0&0 + 14451740:824520616&7&0 + 56122919:43052528&5&0 + 44953983:866115077&6&0 + 99986991:910170617&0&0 + 94276106:262453525&1&0 + 79645721:394251089&2&0 + 91975281:465006911&0&0 + 56910010:652586984&4&0 + 28196376:955082045&2&0 + 10973372:252564849&3&0 + 19606849:453493174&1&0 + 34051254:146783427&2&0 + 37218570:381618017&7&0 + 79713776:956513829&0&0 + 11736920:293374434&1&0 + 44953983:866115077&5&0 + 70991407:799821548&0&0 + 10211847:108678674&1&0 + 22004771:538942275&1&0 + 59025297:456814031&2&0 + 984049:483901572&0&0 + 36789634:662824796&2&0 + 19606849:453493174&5&0 + 3635379:230692518&1&0 + 12494935:932668955&2&0 + 91975281:465006911&1&0 + 94276106:262453525&4&0 + 18737604:340823539&0&0 + 93979834:814182899&0&0 + 17011630:240212798&5&0 + 36475639:150298750&2&0 + 17565416:593803371&2&0 + 40515987:99439735&3&0 + 45832521:483204127&1&0 + 29059882:874721379&0&0 + 49454891:478453187&1&0 + 94276106:262453525&2&0 + 78661119:899233291&1&0 + 59025297:456814031&1&0 + 15600072:946000325&6&0 + 65285717:244145031&0&0 + 37218570:381618017&5&0 + 42923230:468988641&3&0 + 15600072:946000325&5&0 + 6740012:611323570&1&0 + 2613166:672964070&2&0 + 36789634:662824796&3&0 + 59025297:456814031&4&0 + 14451740:824520616&4&0 + 68544258:743179312&3&0 + 984049:483901572&4&0 + 39496198:945080868&7&0 + 12494935:932668955&3&0 + 30452630:493217909&4&0 + 19606849:453493174&2&0 + 36475639:150298750&0&0 + 59025297:456814031&0&0 + 34051254:146783427&6&0 + 68652349:438144715&1&0 + 89633329:116921545&3&0 + 15600072:946000325&7&0 + 39496198:945080868&4&0 + 40515987:99439735&2&0 + 91975281:465006911&2&0 + 93979834:814182899&1&0 + 36789634:662824796&5&0 + 12494935:932668955&4&0 + 56122919:43052528&6&0 + 96429072:241981909&1&0 + 2931488:604747700&1&0 + 39528031:852171322&1&0 + 68652349:438144715&2&0 + 80013650:102651723&1&0 + 17565416:593803371&3&0 + 42923230:468988641&2&0 + 99986991:910170617&2&0 + 49454891:478453187&2&0 + 36789634:662824796&6&0 + 14451740:824520616&6&0 + 61132974:165535142&0&0 + 18737604:340823539&4&0 + 49454891:478453187&3&0 + 11736920:293374434&6&0 + 49883953:73861939&0&0 + 56910010:652586984&0&0 + 43827047:376424684&2&0 + 11736920:293374434&3&0 + 98970477:148387964&2&0 + 13956173:536937059&2&0 + 2613166:672964070&3&0 + 57886993:212586771&3&0 + 6740012:611323570&4&0 + 19606849:453493174&7&0 + 49779369:62149871&1&0 + 83550795:756455161&0&0 + 52515016:412430656&0&0 + 13252039:525460275&0&0 + 17011630:240212798&3&0 + 43827047:376424684&0&0 + 19706683:118758656&4&0 + 27491562:354199743&0&0 + 18737604:340823539&1&0 + 38682489:357772603&0&0 + 17011630:240212798&2&0 + 17011630:240212798&4&0 + 52515016:412430656&4&0 + 68544258:743179312&1&0 + 17011630:240212798&7&0 + 36475639:150298750&1&0 + 64738196:397211282&2&0 + 13956173:536937059&4&0 + 64738196:397211282&1&0 + 39496198:945080868&6&0 + 72044029:940353344&3&0 + 15600072:946000325&4&0 + 27377446:716188923&3&0 + 12494935:932668955&1&0 + 39528031:852171322&0&0 + 40515987:99439735&7&0 + 78661119:899233291&2&0 + 17565416:593803371&0&0 + 98970477:148387964&3&0 + 19706683:118758656&0&0 + 89633329:116921545&4&0 + 68652349:438144715&3&0 + 80013650:102651723&7&0 + 984049:483901572&3&0 + 6740012:611323570&3&0 + 794440:643333917&2&0 + 15600072:946000325&0&0 + 80013650:102651723&6&0 + 28196376:955082045&0&0 + 13956173:536937059&3&0 + 90027761:755365112&1&0 + 59025297:456814031&3&0 + 28196376:955082045&7&0 + 91604138:549050785&1&0 + 61132974:165535142&3&0 + 44953983:866115077&7&0 + 56367326:635551081&0&0 + 27491562:354199743&1&0 + 52515016:412430656&3&0 + 95101780:576073953&2&0 + 52515016:412430656&8&0 + 68652349:438144715&5&0 + 64738196:397211282&0&0 + 11736920:293374434&2&0 + 2613166:672964070&1&0 + 14451740:824520616&2&0 + 17011630:240212798&8&0 + 57886993:212586771&2&0 + 39496198:945080868&1&0 + 43827047:376424684&1&0 + 43827047:376424684&3&0 + 28196376:955082045&5&0 + 37218570:381618017&4&0 + 30894037:75955676&3&0 + 30894037:75955676&0&0 + 30452630:493217909&0&0 + 39496198:945080868&0&0 + 6740012:611323570&0&0 + 2507552:38350344&0&0 + 19706683:118758656&3&0 + 36789634:662824796&7&0 + 37218570:381618017&1&0 + 3635379:230692518&0&0 + 96695706:704933404&3&0 + 72044029:940353344&5&0 + 81490586:118219082&2&0 + 53187669:801736771&0&0 + 15600072:946000325&3&0 + 12494935:932668955&0&0 diff --git a/servers/src/org/xtreemfs/sandbox/httperf/10files_urls b/servers/src/org/xtreemfs/sandbox/httperf/10files_urls new file mode 100644 index 0000000000000000000000000000000000000000..07dd6ba7f40a03ddf35b09440d7dd5a6995b8a9f --- /dev/null +++ b/servers/src/org/xtreemfs/sandbox/httperf/10files_urls @@ -0,0 +1,11 @@ +96071626:992715291&6&0 + 80013650:102651723&0&0 + 56122919:43052528&3&0 + 86752384:691090977&0&0 + 52515016:412430656&5&0 + 11736920:293374434&8&0 + 82586644:976358841&1&0 + 30452630:493217909&2&0 + 10211847:108678674&4&0 + 2507552:38350344&3&0 + diff --git a/servers/src/org/xtreemfs/sandbox/httperf/HttPerfRequestController.java b/servers/src/org/xtreemfs/sandbox/httperf/HttPerfRequestController.java new file mode 100644 index 0000000000000000000000000000000000000000..1b6c5e720904ed19e1016986ac7f0731f636a9e6 --- /dev/null +++ b/servers/src/org/xtreemfs/sandbox/httperf/HttPerfRequestController.java @@ -0,0 +1,322 @@ +/* + * RequestController.java + * + * Created on June 20, 2007, 12:36 PM + * + * @author Bjoern Kolbeck (bjoern@xtreemfs.com) + * copyright 2006, 2007. + * + */ + +package org.xtreemfs.sandbox.httperf; + + +public class HttPerfRequestController { + + +///** +// * This class is the responsible of handling the request from clients. It acts +// * like a monitor : the different stages execute the methods in its threads when +// * methods are called +// */ +//public class HttPerfRequestController implements PinkyRequestListener, RequestHandler, UDPCom { +// +// // Stages of OSD +// private final AuthenticationStage stAuth; +// +// private final LeaseManagerStage stLease; +// +// private final StorageStage stStorage; +// +// private final ParserStage stParser; +// +// private final ReplicationStage stReplication; +// +// private final PipelinedPinky stPinky; // Stage with input requests +// +// private final MultiSpeedy stSpeedy; +// +// private final OSDConfig config; +// +// private final OSDId me; +// +// private Location loc; +// +// private RAID0 sp; +// +// private OSDOperation op; +// +// private Locations locs; +// +///* public RequestControllerForHttPerf() throws IOException { +// this(new OSDConfig("/"), new OSDId("localhost")); +// }*/ +// +// /** +// * Creates a new instance of RequestController +// * +// * @param config +// * OSD's setup +// * @param me +// * Identifier of this OSD +// */ +// public HttPerfRequestController(OSDConfig config, OSDId me) throws IOException { +// +// this.me = me; +// this.config = config; +// +// // create stages an register this controller as event listener +// stSpeedy = new MultiSpeedy(); +// OSDClient client = new OSDClient(stSpeedy); +// +// stParser = new ParserStage(this, config); +// stAuth = new AuthenticationStage(this, config); +// stLease = new LeaseManagerStage(this, this, config, me.toString(), me); +// stStorage = new StorageStage(this, this, config, stSpeedy); +// stReplication = new ReplicationStage(this, client, config); +// +// stPinky = new PipelinedPinky(config.getPort(), null, this); +// +// // debugStuff = new DebugStuff(this, stPinky, stAuth, stLease); +// +// generateOSDarrangementsForHttperf(config); +// } +// +// /** +// * It starts the execution of the OSD +// */ +// public void start() { +// stSpeedy.start(); +// stParser.start(); +// stAuth.start(); +// stLease.start(); +// stStorage.start(); +// stReplication.start(); +// stPinky.start(); +// +// try { +// stSpeedy.waitForStartup(); +// stParser.waitForStartup(); +// stAuth.waitForStartup(); +// stLease.waitForStartup(); +// stStorage.waitForStartup(); +// stReplication.waitForStartup(); +// stPinky.waitForStartup(); +// } catch (Exception exc) { +// Logging.logMessage(Logging.LEVEL_ERROR, this, exc); +// } +// } +// +// /** +// * It gets a request from Pinky and begins its processing. This is the entry +// * point for Pinky +// * +// * @param theRequest +// * The request to proccess. +// */ +// public void receiveRequest(PinkyRequest theRequest) { +// // Logging.logMessage(Logging.LEVEL_DEBUG,this,"Received request : " + +// // theRequest.toString()); +// +// OSDRequest rq = new OSDRequest(theRequest); +// rq.tRecv = System.currentTimeMillis(); +// +// generateOSDRequestFromHttperfRequest(theRequest, rq); +// +// stageCallback(rq); +// } +// +// /** +// * shuts down all stages and the HTTP server +// */ +// public void shutdown() { +// stPinky.shutdown(); +// stParser.shutdown(); +// stAuth.shutdown(); +// stLease.shutdown(); +// stStorage.shutdown(); +// stReplication.shutdown(); +// stSpeedy.shutdown(); +// +// try { +// stPinky.waitForShutdown(); +// stParser.waitForShutdown(); +// stAuth.waitForShutdown(); +// stLease.waitForShutdown(); +// stStorage.waitForShutdown(); +// stReplication.waitForShutdown(); +// stSpeedy.waitForStartup(); +// } catch (Exception exc) { +// Logging.logMessage(Logging.LEVEL_ERROR, this, exc); +// } +// } +// +// public OSDId getMe() { +// return me; +// } +// +// public void stageCallback(OSDRequest request) { +// +// request.tParse = System.currentTimeMillis(); +// +// switch (request.getStatus()) { +// +// case AUTHENTICATED: +// +// // check where the request has to be enqueued, depending on the +// // requested operation +// switch (request.getOSDOperation().getOpType()) { +// +// case READ: +// // open operations and fully-synchronous read operations are +// // directly enqueued at the storage stage, while any other +// // read requests first have to be enqueued at the lease stage +///* if (request.getOSDOperation().getOpSubType() == OperationSubType.OPEN +// || (request.getLocations().getRepUpdatePolicy().equals( +// Locations.RUP_SYNC) && request.getLocations() +// .getRepUpdatePolicySyncLevel() == request +// .getLocations().size()))*/ +// stStorage.enqueueRequest(request); +///* else +// stLease.enqueueRequest(request);*/ +// break; +// +// default: +// throw new RuntimeException("illegal request status: " +// + request.getStatus()); +// } +// break; +// +// case PERSISTED: +///* if (request.getLocations() != null +// && request.getLocations().size() > 1 +// && ((request.getOSDOperation().getOpType() == OperationType.WRITE && request +// .getLocations().getRepUpdatePolicy().equals( +// Locations.RUP_SYNC)) +// || request.getOSDOperation().getOpType() == OperationType.DELETE || (request +// .getOSDOperation().getOpType() == OperationType.RPC && request +// .getOSDOperation().getOpSubType() == OperationSubType.TRUNCATE))) { +// stReplication.enqueueRequest(request); +// } else {*/ +// response(request); +//// } +// break; +// +// case NOTFOUND: +// case FAILED: +// response(request); +// break; +// +// default: +// throw new RuntimeException("illegal request status: " +// + request.getStatus()); +// } +// } +// +// /** +// * Sends back the client response. +// * +// * @param rq +// * the request +// */ +// private void response(OSDRequest rq) { +// PinkyRequest answer = rq.getRequest(); +// +// // Logging.logMessage(Logging.LEVEL_INFO,this,"stage duration +// // (auth/parse/lease/repl/store): "+ +// // (rq.tAuth-rq.tRecv)+"/"+ +// // (rq.tParse-rq.tRecv)+"/"+ +// // (rq.tLease-rq.tRecv)+"/"+ +// // (rq.tRepl-rq.tRecv)+"/"+ +// // (rq.tStore-rq.tRecv)); +// +// switch (rq.getStatus()) { +// +// case NOTFOUND: +// answer.setResponse(HTTPUtils.SC_NOT_FOUND, rq.getErrorMsg()); +// break; +// +// case PERSISTED: +// if (rq.getData() != null) { +// answer.setResponse(HTTPUtils.SC_OKAY, rq.getData(), rq +// .getDataType()); +// } else { +// String newFileSize = rq.getNewFileSize(); +// if (newFileSize != null) { +// HTTPHeaders newFileSizeHeader = new HTTPHeaders(); +// newFileSizeHeader.addHeader(HTTPHeaders.HDR_XNEWFILESIZE, +// newFileSize.toString()); +// +// answer.setResponse(HTTPUtils.SC_OKAY, null, +// HTTPUtils.DATA_TYPE.JSON, newFileSizeHeader); +// } else { +// answer.setResponse(HTTPUtils.SC_OKAY); +// } +// } +// break; +// +// case FAILED: +// answer.setResponse(HTTPUtils.SC_BAD_REQUEST, rq.getErrorMsg()); +// break; +// +// default: +// // This is a bug because the request isn't any of allowed ones +// throw new RuntimeException( +// "The status of the request was not a final one"); +// } +// +// Logging.logMessage(Logging.LEVEL_DEBUG, this, "Processed request : " +// + answer.toString()); +// stPinky.sendResponse(answer); +// } +// +// /** +// * It gets the storageStage of the controller +// * +// * @return It returns the storage stage used by the controller +// */ +// public StorageStage getStorage() { +// return stStorage; +// } +// +// private void generateOSDarrangementsForHttperf(OSDConfig config) { +// op = new OSDOperation(OperationType.READ, +// OperationSubType.WHOLE); +// List osd = new ArrayList(); +// osd.add(new OSDId("localhost", config.getPort(), OSDId.SCHEME_HTTP)); +// sp = new RAID0(1,1); +// loc = new Location(sp, osd); +// ArrayList list = new ArrayList(2); +// list.add(loc); +// locs = new Locations(list); +// } +// +// private void generateOSDRequestFromHttperfRequest(PinkyRequest theRequest, OSDRequest rq) { +// String[] uri = theRequest.requestURI.split("&"); +// String fileId = uri[0]; +// int objNo = Integer.parseInt(uri[1]); +// int version = Integer.parseInt(uri[2]); +// +// rq.setFileId(fileId); +// rq.setObjectNo(objNo); +// rq.setVersionNo(version); +// +// rq.setOSDOperation(op); +// rq.setPolicy(sp); +// rq.setLocation(loc); +// rq.setLocations(locs); +// rq.setCapability(new Capability(rq.getFileId(), "read", "IAmTheClient", 0)); +// rq.setStatus(OSDRequest.Status.AUTHENTICATED); +// } +// +// public void sendUDP(ReusableBuffer data, InetSocketAddress receiver) { +// throw new UnsupportedOperationException("Not supported yet."); +// } +// +// public void receiveUDP(ReusableBuffer data, InetSocketAddress sender) { +// throw new UnsupportedOperationException("Not supported yet."); +// } +// +// public void sendInternalEvent(OSDRequest event) { +// } +} diff --git a/servers/src/org/xtreemfs/sandbox/httperf/HttPerfRequestControllerMultithreaded.java b/servers/src/org/xtreemfs/sandbox/httperf/HttPerfRequestControllerMultithreaded.java new file mode 100644 index 0000000000000000000000000000000000000000..152c72e2ea294ab6f7938404240071e83ac3091c --- /dev/null +++ b/servers/src/org/xtreemfs/sandbox/httperf/HttPerfRequestControllerMultithreaded.java @@ -0,0 +1,326 @@ +/* + * HttPerfRequestControllerMultithreaded.java + * + * Created on December 22, 2007, 15:28 PM + * + * @author Eugenio Cesario + * + * + */ + +package org.xtreemfs.sandbox.httperf; + + +public class HttPerfRequestControllerMultithreaded { + +} + +///** +// * This class is the responsible of handling the request from clients. It acts +// * like a monitor : the different stages execute the methods in its threads when +// * methods are called +// */ +//public class HttPerfRequestControllerMultithreaded implements PinkyRequestListener, RequestHandler, UDPCom { +// +// // Stages of OSD +// private final AuthenticationStage stAuth; +// +// private final LeaseManagerStage stLease; +// +// //private final StorageStage stStorage; +// private final MultithreadedStorageStage stStorage; +// +// private final ParserStage stParser; +// +// private final ReplicationStage stReplication; +// +// private final PipelinedPinky stPinky; // Stage with input requests +// +// private final MultiSpeedy stSpeedy; +// +// private final OSDConfig config; +// +// private final OSDId me; +// +// private Location loc; +// +// private RAID0 sp; +// +// private OSDOperation op; +// +// private Locations locs; +// +///* public RequestControllerForHttPerf() throws IOException { +// this(new OSDConfig("/"), new OSDId("localhost")); +// }*/ +// +// /** +// * Creates a new instance of RequestController +// * +// * @param config +// * OSD's setup +// * @param me +// * Identifier of this OSD +// */ +// public HttPerfRequestControllerMultithreaded(OSDConfig config, OSDId me) throws IOException { +// +// this.me = me; +// this.config = config; +// +// // create stages an register this controller as event listener +// stSpeedy = new MultiSpeedy(); +// OSDClient client = new OSDClient(stSpeedy); +// +// stParser = new ParserStage(this, config); +// stAuth = new AuthenticationStage(this, config); +// stLease = new LeaseManagerStage(this, this, config, me.toString(), me); +// //stStorage = new StorageStage(this, this, config, stSpeedy); +// stStorage = new MultithreadedStorageStage(this, this, config, stSpeedy,10); +// stReplication = new ReplicationStage(this, client, config); +// +// stPinky = new PipelinedPinky(config.getPort(), null, this); +// +// // debugStuff = new DebugStuff(this, stPinky, stAuth, stLease); +// +// generateOSDarrangementsForHttperf(config); +// } +// +// /** +// * It starts the execution of the OSD +// */ +// public void start() { +// stSpeedy.start(); +// stParser.start(); +// stAuth.start(); +// stLease.start(); +// stStorage.start(); +// stReplication.start(); +// stPinky.start(); +// +// try { +// stSpeedy.waitForStartup(); +// stParser.waitForStartup(); +// stAuth.waitForStartup(); +// stLease.waitForStartup(); +// stStorage.waitForStartup(); +// stReplication.waitForStartup(); +// stPinky.waitForStartup(); +// } catch (Exception exc) { +// Logging.logMessage(Logging.LEVEL_ERROR, this, exc); +// } +// } +// +// /** +// * It gets a request from Pinky and begins its processing. This is the entry +// * point for Pinky +// * +// * @param theRequest +// * The request to proccess. +// */ +// public void receiveRequest(PinkyRequest theRequest) { +// // Logging.logMessage(Logging.LEVEL_DEBUG,this,"Received request : " + +// // theRequest.toString()); +// +// OSDRequest rq = new OSDRequest(theRequest); +// rq.tRecv = System.currentTimeMillis(); +// +// generateOSDRequestFromHttperfRequest(theRequest, rq); +// +// stageCallback(rq); +// } +// +// /** +// * shuts down all stages and the HTTP server +// */ +// public void shutdown() { +// stPinky.shutdown(); +// stParser.shutdown(); +// stAuth.shutdown(); +// stLease.shutdown(); +// stStorage.shutdown(); +// stReplication.shutdown(); +// stSpeedy.shutdown(); +// +// try { +// stPinky.waitForShutdown(); +// stParser.waitForShutdown(); +// stAuth.waitForShutdown(); +// stLease.waitForShutdown(); +// stStorage.waitForShutdown(); +// stReplication.waitForShutdown(); +// stSpeedy.waitForStartup(); +// } catch (Exception exc) { +// Logging.logMessage(Logging.LEVEL_ERROR, this, exc); +// } +// } +// +// public OSDId getMe() { +// return me; +// } +// +// public void stageCallback(OSDRequest request) { +// +// request.tParse = System.currentTimeMillis(); +// +// switch (request.getStatus()) { +// +// case AUTHENTICATED: +// +// // check where the request has to be enqueued, depending on the +// // requested operation +// switch (request.getOSDOperation().getOpType()) { +// +// case READ: +// // open operations and fully-synchronous read operations are +// // directly enqueued at the storage stage, while any other +// // read requests first have to be enqueued at the lease stage +///* if (request.getOSDOperation().getOpSubType() == OperationSubType.OPEN +// || (request.getLocations().getRepUpdatePolicy().equals( +// Locations.RUP_SYNC) && request.getLocations() +// .getRepUpdatePolicySyncLevel() == request +// .getLocations().size()))*/ +// stStorage.enqueueRequest(request); +///* else +// stLease.enqueueRequest(request);*/ +// break; +// +// default: +// throw new RuntimeException("illegal request status: " +// + request.getStatus()); +// } +// break; +// +// case PERSISTED: +///* if (request.getLocations() != null +// && request.getLocations().size() > 1 +// && ((request.getOSDOperation().getOpType() == OperationType.WRITE && request +// .getLocations().getRepUpdatePolicy().equals( +// Locations.RUP_SYNC)) +// || request.getOSDOperation().getOpType() == OperationType.DELETE || (request +// .getOSDOperation().getOpType() == OperationType.RPC && request +// .getOSDOperation().getOpSubType() == OperationSubType.TRUNCATE))) { +// stReplication.enqueueRequest(request); +// } else {*/ +// response(request); +//// } +// break; +// +// case NOTFOUND: +// case FAILED: +// response(request); +// break; +// +// default: +// throw new RuntimeException("illegal request status: " +// + request.getStatus()); +// } +// } +// +// /** +// * Sends back the client response. +// * +// * @param rq +// * the request +// */ +// private void response(OSDRequest rq) { +// PinkyRequest answer = rq.getRequest(); +// +// // Logging.logMessage(Logging.LEVEL_INFO,this,"stage duration +// // (auth/parse/lease/repl/store): "+ +// // (rq.tAuth-rq.tRecv)+"/"+ +// // (rq.tParse-rq.tRecv)+"/"+ +// // (rq.tLease-rq.tRecv)+"/"+ +// // (rq.tRepl-rq.tRecv)+"/"+ +// // (rq.tStore-rq.tRecv)); +// +// switch (rq.getStatus()) { +// +// case NOTFOUND: +// answer.setResponse(HTTPUtils.SC_NOT_FOUND, rq.getErrorMsg()); +// break; +// +// case PERSISTED: +// if (rq.getData() != null) { +// answer.setResponse(HTTPUtils.SC_OKAY, rq.getData(), rq +// .getDataType()); +// } else { +// String newFileSize = rq.getNewFileSize(); +// if (newFileSize != null) { +// HTTPHeaders newFileSizeHeader = new HTTPHeaders(); +// newFileSizeHeader.addHeader(HTTPHeaders.HDR_XNEWFILESIZE, +// newFileSize.toString()); +// +// answer.setResponse(HTTPUtils.SC_OKAY, null, +// HTTPUtils.DATA_TYPE.JSON, newFileSizeHeader); +// } else { +// answer.setResponse(HTTPUtils.SC_OKAY); +// } +// } +// break; +// +// case FAILED: +// answer.setResponse(HTTPUtils.SC_BAD_REQUEST, rq.getErrorMsg()); +// break; +// +// default: +// // This is a bug because the request isn't any of allowed ones +// throw new RuntimeException( +// "The status of the request was not a final one"); +// } +// +// Logging.logMessage(Logging.LEVEL_DEBUG, this, "Processed request : " +// + answer.toString()); +// stPinky.sendResponse(answer); +// } +// +// /** +// * It gets the storageStage of the controller +// * +// * @return It returns the storage stage used by the controller +// */ +// public MultithreadedStorageStage getStorage() { +// return stStorage; +// } +// +// private void generateOSDarrangementsForHttperf(OSDConfig config) { +// op = new OSDOperation(OperationType.READ, +// OperationSubType.WHOLE); +// List osd = new ArrayList(); +// osd.add(new OSDId("localhost", config.getPort(), OSDId.SCHEME_HTTP)); +// sp = new RAID0(1,1); +// loc = new Location(sp, osd); +// ArrayList list = new ArrayList(2); +// list.add(loc); +// locs = new Locations(list); +// } +// +// private void generateOSDRequestFromHttperfRequest(PinkyRequest theRequest, OSDRequest rq) { +// String[] uri = theRequest.requestURI.split("&"); +// String fileId = uri[0]; +// int objNo = Integer.parseInt(uri[1]); +// int version = Integer.parseInt(uri[2]); +// +// rq.setFileId(fileId); +// rq.setObjectNo(objNo); +// rq.setVersionNo(version); +// +// rq.setOSDOperation(op); +// rq.setPolicy(sp); +// rq.setLocation(loc); +// rq.setLocations(locs); +// rq.setCapability(new Capability(rq.getFileId(), "read", "IAmTheClient", 0)); +// rq.setStatus(OSDRequest.Status.AUTHENTICATED); +// } +// +// public void sendUDP(ReusableBuffer data, InetSocketAddress receiver) { +// throw new UnsupportedOperationException("Not supported yet."); +// } +// +// public void receiveUDP(ReusableBuffer data, InetSocketAddress sender) { +// throw new UnsupportedOperationException("Not supported yet."); +// } +// +// public void sendInternalEvent(OSDRequest event) { +// throw new UnsupportedOperationException("Not supported yet."); +// } +//} diff --git a/servers/src/org/xtreemfs/sandbox/httperf/HttperfOSD.java b/servers/src/org/xtreemfs/sandbox/httperf/HttperfOSD.java new file mode 100644 index 0000000000000000000000000000000000000000..4e44f6b1e3477b1f032d0b1b6bc99ecffefc65e3 --- /dev/null +++ b/servers/src/org/xtreemfs/sandbox/httperf/HttperfOSD.java @@ -0,0 +1,180 @@ +/* + * Main.java + * + * Created on 8. Dezember 2006, 10:21 + * + * @author Bjoern Kolbeck, Zuse Institute Berlin (kolbeck@zib.de) + * + */ + +package org.xtreemfs.sandbox.httperf; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import java.util.HashMap; +import org.xtreemfs.common.TimeSync; +import org.xtreemfs.common.clients.RPCClient; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.clients.dir.DIRClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.json.JSONException; + +import org.xtreemfs.osd.OSDConfig; +import org.xtreemfs.sandbox.httperf.HttPerfRequestController; + +/** + * A sample OSD. + * + * @author bjko + */ +public class HttperfOSD { + + +// private HttPerfRequestController controller; +// +// private HeartbeatThread heartbeatThread = null; +// +// public static final String PROTOCOL = "http://"; +// +// private DIRClient client; +// +// /** +// * Creates a new instance of Main +// */ +// public HttperfOSD(OSDConfig config, OSDId me, boolean dirServiceInUse) { +// try { +// client = new DIRClient(null,config.getDirectoryService()); +// TimeSync.initialize(client,config.getRemoteTimeSync(),config.getLocalClockRenew(),"nullauth bla bla"); +// controller = new HttPerfRequestController(config,me); +// controller.start(); +// +// if (dirServiceInUse) { +// heartbeatThread = new HeartbeatThread(client,me.toString()); +// heartbeatThread.start(); +// } +// } catch (IOException ex) { +// Logging.logMessage(Logging.LEVEL_ERROR,this,ex); +// } +// } +// +// public void shutdown() { +// controller.shutdown(); +// if (heartbeatThread != null) { +// heartbeatThread.shutdown(); +// try { +// heartbeatThread.join(); +// } catch (InterruptedException e) { +// // TODO Auto-generated catch block +// e.printStackTrace(); +// } +// } +// client.shutdown(); +// } +// +// public HttPerfRequestController getController() { +// return controller; +// } +// +// /** +// * Main routine +// * +// * @param args +// * the command line arguments +// */ +// public static void main(String[] args) throws Exception { +// +// String fname = (args.length > 0) ? args[0] : "../config/osdconfig.properties"; +// +// OSDConfig config = new OSDConfig(fname); +// +// Logging.start(config.getDebugLevel()); +// +// Thread.currentThread().setName("OSD thr."); +// +// String me = PROTOCOL; +// try { +// me += InetAddress.getLocalHost().getCanonicalHostName(); +// } catch (UnknownHostException ex) { +// Logging.logMessage(Logging.LEVEL_ERROR,null,ex); +// return; +// } +// +// me += ":" + config.getPort(); +// Logging.logMessage(Logging.LEVEL_INFO,null,"my ID is "+me); +// +// new HttperfOSD(config,new OSDId(me, OSDId.SCHEME_HTTP), true); +// } +// +// static class HeartbeatThread extends Thread { +// +// private InetSocketAddress dirserv; +// +// private DIRClient client; +// +// private boolean finished; +// +// private final String uuid; +// +// public HeartbeatThread(DIRClient client, String uuid) { +// super("OSD HB thr."); +// this.client = client; +// this.uuid = uuid; +// } +// +// public void run() { +// +// boolean register = true; +// try { +// //deregister old data +// RPCResponse r = client.deregisterEntity(uuid,"nullauth " + uuid); +// r.waitForResponse(); +// r.freeBuffers(); +// } catch (Exception ex) { +// ex.printStackTrace(); +// } +// +// while (!finished) { +// try { +// +// if (register) { +// // update the timestamp +// RPCResponse response = client.registerEntity(uuid, RPCClient.generateMap("uuid",uuid,"type","OSD","free","1000000"), "nullauth " + uuid); +// response.waitForResponse(); +// response.freeBuffers(); +// register = false; +// Logging.logMessage(Logging.LEVEL_INFO,this,"registered with directory service"); +// } else { +// // update the timestamp +// RPCResponse response = client.registerEntity(uuid, new HashMap(), "nullauth " + uuid); +// response.waitForResponse(); +// response.freeBuffers(); +// Logging.logMessage(Logging.LEVEL_DEBUG,this,"sent heartbeat signal to directory service"); +// } +// +// } catch (IOException ex) { +// Logging.logMessage(Logging.LEVEL_ERROR, this, ex); +// } catch (JSONException ex) { +// Logging.logMessage(Logging.LEVEL_ERROR, this, ex); +// } catch (InterruptedException ex) { +// Logging.logMessage(Logging.LEVEL_ERROR, this, ex); +// } +// +// try { +// synchronized (this) { +// this.wait(1 * 60 * 1000); +// } +// } catch (InterruptedException ex) { +// // ignore +// } +// } +// } +// +// public void shutdown() { +// finished = true; +// this.interrupt(); +// } +// } + +} diff --git a/servers/src/org/xtreemfs/sandbox/httperf/HttperfOSDMultithreaded.java b/servers/src/org/xtreemfs/sandbox/httperf/HttperfOSDMultithreaded.java new file mode 100644 index 0000000000000000000000000000000000000000..231334f5ecbe613fef54042d67d1993fa21007be --- /dev/null +++ b/servers/src/org/xtreemfs/sandbox/httperf/HttperfOSDMultithreaded.java @@ -0,0 +1,177 @@ +/* + * Main.java + * + * HttPerfRequestControllerMultithreaded.java + * + * Created on December 22, 2007, 17:03 PM + * + * @author Eugenio Cesario + */ + +package org.xtreemfs.sandbox.httperf; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import java.util.HashMap; +import org.xtreemfs.common.TimeSync; +import org.xtreemfs.common.clients.RPCClient; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.clients.dir.DIRClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.json.JSONException; + +import org.xtreemfs.osd.OSDConfig; +import org.xtreemfs.sandbox.httperf.HttPerfRequestController; + + +public class HttperfOSDMultithreaded { + + +// private HttPerfRequestControllerMultithreaded controller; +// +// private HeartbeatThread heartbeatThread = null; +// +// public static final String PROTOCOL = "http://"; +// +// private DIRClient client; +// +// /** +// * Creates a new instance of Main +// */ +// public HttperfOSDMultithreaded(OSDConfig config, OSDId me, boolean dirServiceInUse) { +// try { +// client = new DIRClient(null,config.getDirectoryService()); +// TimeSync.initialize(client,config.getRemoteTimeSync(),config.getLocalClockRenew(),"nullauth bla bla"); +// controller = new HttPerfRequestControllerMultithreaded(config,me); +// controller.start(); +// +// if (dirServiceInUse) { +// heartbeatThread = new HeartbeatThread(client,me.toString()); +// heartbeatThread.start(); +// } +// } catch (IOException ex) { +// Logging.logMessage(Logging.LEVEL_ERROR,this,ex); +// } +// } +// +// public void shutdown() { +// controller.shutdown(); +// if (heartbeatThread != null) { +// heartbeatThread.shutdown(); +// try { +// heartbeatThread.join(); +// } catch (InterruptedException e) { +// // TODO Auto-generated catch block +// e.printStackTrace(); +// } +// } +// client.shutdown(); +// } +// +// public HttPerfRequestControllerMultithreaded getController() { +// return controller; +// } +// +// /** +// * Main routine +// * +// * @param args +// * the command line arguments +// */ +// public static void main(String[] args) throws Exception { +// +// String fname = (args.length > 0) ? args[0] : "../config/osdconfig.properties"; +// +// OSDConfig config = new OSDConfig(fname); +// +// Logging.start(config.getDebugLevel()); +// +// Thread.currentThread().setName("OSD thr."); +// +// String me = PROTOCOL; +// try { +// me += InetAddress.getLocalHost().getCanonicalHostName(); +// } catch (UnknownHostException ex) { +// Logging.logMessage(Logging.LEVEL_ERROR,null,ex); +// return; +// } +// +// me += ":" + config.getPort(); +// Logging.logMessage(Logging.LEVEL_INFO,null,"my ID is "+me); +// +// new HttperfOSDMultithreaded(config,new OSDId(me, OSDId.SCHEME_HTTP), true); +// } +// +// static class HeartbeatThread extends Thread { +// +// private InetSocketAddress dirserv; +// +// private DIRClient client; +// +// private boolean finished; +// +// private final String uuid; +// +// public HeartbeatThread(DIRClient client, String uuid) { +// super("OSD HB thr."); +// this.client = client; +// this.uuid = uuid; +// } +// +// public void run() { +// +// boolean register = true; +// try { +// //deregister old data +// RPCResponse r = client.deregisterEntity(uuid,"nullauth bla"); +// r.waitForResponse(); +// r.freeBuffers(); +// } catch (Exception ex) { +// ex.printStackTrace(); +// } +// +// while (!finished) { +// try { +// +// if (register) { +// // update the timestamp +// RPCResponse response = client.registerEntity(uuid, RPCClient.generateMap("uuid",uuid,"type","OSD","free","1000000"), "nullauth bla"); +// response.waitForResponse(); +// response.freeBuffers(); +// register = false; +// Logging.logMessage(Logging.LEVEL_INFO,this,"registered with directory service"); +// } else { +// // update the timestamp +// RPCResponse response = client.registerEntity(uuid, new HashMap(), "nullauth bla"); +// response.waitForResponse(); +// response.freeBuffers(); +// Logging.logMessage(Logging.LEVEL_DEBUG,this,"sent heartbeat signal to directory service"); +// } +// +// } catch (IOException ex) { +// Logging.logMessage(Logging.LEVEL_ERROR, this, ex); +// } catch (JSONException ex) { +// Logging.logMessage(Logging.LEVEL_ERROR, this, ex); +// } catch (InterruptedException ex) { +// Logging.logMessage(Logging.LEVEL_ERROR, this, ex); +// } +// +// try { +// synchronized (this) { +// this.wait(1 * 60 * 1000); +// } +// } catch (InterruptedException ex) { +// // ignore +// } +// } +// } +// +// public void shutdown() { +// finished = true; +// this.interrupt(); +// } +// } + +} diff --git a/servers/src/org/xtreemfs/sandbox/httperf/WriteFilesToOSD.java b/servers/src/org/xtreemfs/sandbox/httperf/WriteFilesToOSD.java new file mode 100644 index 0000000000000000000000000000000000000000..64013c1a9e0d782528645be28ebe566cbef9b157 --- /dev/null +++ b/servers/src/org/xtreemfs/sandbox/httperf/WriteFilesToOSD.java @@ -0,0 +1,203 @@ +package org.xtreemfs.sandbox.httperf; + +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.OutputStreamWriter; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; +import java.util.Random; + +import org.xtreemfs.common.Capability; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.clients.HttpErrorException; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.clients.osd.OSDClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.striping.Location; +import org.xtreemfs.common.striping.Locations; +import org.xtreemfs.common.striping.RAID0; +import org.xtreemfs.common.striping.StripingPolicy; +import org.xtreemfs.common.uuids.ServiceUUID; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.speedy.MultiSpeedy; + +/** + * writes the files to the OSD which httperf uses for tests later + * @author clorenz + */ +public class WriteFilesToOSD { + + private final long STRIPE_SIZE = 2; + + private Locations loc; + private OSDClient client; + private String capSecret; + InetSocketAddress osdAddr; + + ReusableBuffer buf; + + public WriteFilesToOSD(String host, int port, int filesize, String capSecret) throws IOException{ + Logging.start(Logging.LEVEL_DEBUG); + + ServiceUUID serverID = new ServiceUUID("http://"+java.net.InetAddress.getLocalHost().getCanonicalHostName()+":"+port); + osdAddr = new InetSocketAddress(host,port); + + List locations = new ArrayList(1); + StripingPolicy sp = new RAID0(STRIPE_SIZE,1); + List osd = new ArrayList(1); + osd.add(serverID); + locations.add(new Location(sp,osd)); + loc = new Locations(locations); + + buf = ReusableBuffer.wrap(generateRandomBytes(filesize)); + + MultiSpeedy speedy = new MultiSpeedy(); + speedy.start(); + client = new OSDClient(speedy); + this.capSecret = capSecret; + } + + public void writeFiles(String filename) throws NumberFormatException, HttpErrorException, IOException, JSONException, InterruptedException{ + BufferedReader file = new BufferedReader(new InputStreamReader(new FileInputStream(filename))); + + String fileId; + Capability cap; + int objNo; + String[] uri; + + String line; + while((line = file.readLine()) != null) { + line = line.trim(); + if(line.length()==0){ // whitespace line + continue; + } + uri = line.split("&"); + fileId = uri[0]; + objNo = Integer.parseInt(uri[1]); + + cap = new Capability(fileId,"DebugCapability",0, capSecret); + + RPCResponse r = client.put(osdAddr,loc,cap,fileId,objNo,buf); + r.waitForResponse(); + r.freeBuffers(); + } + file.close(); + } + + private void init(String filename, int fileAmount, int maxObjAmount, int burstLength){ + Random rand = new Random(); + LinkedList requests = new LinkedList(); + + String fileId; + int objects; + // TODO: better algorithm for sessions + for (int i = 0; i < fileAmount; i++) { + fileId = generateFileId(rand); + objects = rand.nextInt(maxObjAmount); + for(int objNo=0; objNo burstLength){ +// writeLine =" \n"; + curBurstLength = -1; + continue; + }else{ + writeLine = " "+line+" \n"; + } + output.write(writeLine); + curBurstLength++; + } + output.flush(); + output.close(); + } catch (FileNotFoundException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } catch (IOException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + } + + private void cleanup(){ + this.client.getSpeedy().shutdown(); + } + + public static void main(String[] args){ + if(args.length >= 4){ + String host = args[0]; + int port = Integer.parseInt(args[1]); + String fileWithFilenames = args[2]; + String filesize = args[3]; + String capSecret = args[4]; + + WriteFilesToOSD writer = null; + try { + writer = new WriteFilesToOSD(host,port,Integer.parseInt(filesize), capSecret); + if(args.length==8 && args[4].equals("init")){ + int fileAmount = Integer.parseInt(args[5]); + int maxObjAmount = Integer.parseInt(args[6]); + int burstLength = Integer.parseInt(args[7]); + writer.init(fileWithFilenames, fileAmount, maxObjAmount, burstLength); + } + writer.writeFiles(fileWithFilenames); + } catch (NullPointerException e) { + System.out.println("usage: HttPerfRequestController [init ]"); + } catch (IOException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } catch (NumberFormatException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } catch (JSONException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } catch (InterruptedException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + }finally{ + if(writer!=null) writer.cleanup(); + } + }else System.out.println("usage: HttPerfRequestController [init ]"); + } + + /** + * generates randomly filled byte-array + * + * @param length + * length of the byte-array + */ + public static byte[] generateRandomBytes(int length) { + Random r = new Random(15619681); + byte[] bytes = new byte[length]; + + r.nextBytes(bytes); + return bytes; + } + + /** + * generates randomly Filename + */ + public static String generateFileId(Random r) throws IllegalArgumentException { + String id = r.nextInt(100000000) + ":" + r.nextInt(1000000000); + return id; + } + +} diff --git a/servers/src/org/xtreemfs/sandbox/httperf/start_httperf b/servers/src/org/xtreemfs/sandbox/httperf/start_httperf new file mode 100755 index 0000000000000000000000000000000000000000..ae2353fa0bc7bd674090099f384d93be21389e7a --- /dev/null +++ b/servers/src/org/xtreemfs/sandbox/httperf/start_httperf @@ -0,0 +1,40 @@ +#!/bin/bash + +##################################################################################### +# +# This script will start the client-side of the Httperf test. +# It starts httperf. +# +# At first, please start the OSD and DIR (it's recommended to use the start_osd +# script). +# Now the user can start with the tests. +# +# The "*files_urls"-files contain the special URL-formatting which is used by +# the WriteFilesToOSD-class and httperf. +# It is necessary that you use the same "*files_urls"-file for the +# WriteFilesToOSD-class and httperf. +# +# You need to specify the following parameters for this script: +# URLS_FILE: PATH to the "*files_urls"-file +# OSD_SERVER: url to the OSD +# OSD_PORT: OSD port (the start_osd script ueses port 32637) +# CONNECTIONS: concurrent connections (httperf param) +# TIMEOUT: reply-timeout (httperf param) +# +##################################################################################### + +URLS_FILE=$1 +OSD_SERVER=$2 +OSD_PORT=$3 +CONNECTIONS=$4 +TIMEOUT=$5 + +httperf --hog --wsesslog=$CONNECTIONS,2,$URLS_FILE --server=$OSD_SERVER --port=$OSD_PORT --http-version=1.1 --print-request --timeout=$TIMEOUT + +usage() { + cat < + +EOF +} diff --git a/servers/src/org/xtreemfs/sandbox/httperf/start_osd b/servers/src/org/xtreemfs/sandbox/httperf/start_osd new file mode 100755 index 0000000000000000000000000000000000000000..08c2de4ce0bc546f0f706402080c3f22e4ed02b4 --- /dev/null +++ b/servers/src/org/xtreemfs/sandbox/httperf/start_osd @@ -0,0 +1,95 @@ +#!/bin/bash + +##################################################################################### +# +# This script will start the server-side of the Httperf test. +# It starts the DIR and the OSD. +# +# At first it starts the DIR and the original OSD. The orininal OSD is needet for +# creating files at the OSD. Therefor the WriteFilesToOSD-class is necessary. +# After the creation-process the old OSD will be killed and the special +# HttperfOSD will be started. +# Now the user can start with the tests. +# +# The "*files_urls"-files contain the special URL-formatting which is used by +# the WriteFilesToOSD-class and httperf. +# It is necessary that you use the same "*files_urls"-file for the +# WriteFilesToOSD-class and httperf. +# +# You need to specify the following parameters for this script: +# OSD_FILEPATH: absolute path where OSD saves the files +# URLS_FILE: absolute path to the "*files_urls"-file +# FILESIZE: filesize of the files which will be created +# +##################################################################################### + +$dir_pid +$osd_pid +OSD_FILEPATH=$1 +URLS_FILE=$2 +FILESIZE=$3 + +cd $XTREEMFS +mkdir /tmp/xtreemfs_httperf_test/ + +# start DIR +echo "start DIR" +bin/xtreemfs_start ds -p 32638 -i -d localhost -c /tmp/xtreemfs_httperf_test/ds.cfg -s /tmp/xtreemfs_httperf_test/ds -l /tmp/xtreemfs_httperf_test/ds.log & +dir_pid=$! + +sleep 10 + +# start original OSD +echo "start OSD" +bin/xtreemfs_start osd -p 32637 -i -d localhost -c /tmp/xtreemfs_httperf_test/osd.cfg -s $OSD_FILEPATH & +osd_pid=$! + +sleep 10 + +cd java/build/classes +# start writing files +echo "start writing files" +java org.xtreemfs.sandbox.httperf.WriteFilesToOSD localhost 32637 $URLS_FILE $FILESIZE + +sleep 10 + +# kill original OSD +echo "kill OSD" +if [ -d /proc/$osd_pid ]; then + echo "Killing process $dir_pid" + kill -9 $osd_pid +fi + +sleep 1 + +# start adapted OSD for httperf +echo "start HttPerfOSD" +java org.xtreemfs.sandbox.httperf.HttperfOSD /tmp/xtreemfs_httperf_test/osd.cfg & +osd_pid=$! + +sleep 10 +echo +echo "services are running" +echo "press a button to kill all services" +read key +if [ -d /proc/$osd_pid ]; then + echo "Killing process $osd_pid" + kill -9 $osd_pid +fi +if [ -d /proc/$dir_pid ]; then + echo "Killing process $dir_pid" + kill -9 $dir_pid +fi +echo "services stopped" + +cd ../../src/org/xtreemfs/sandbox/httperf + + + +usage() { + cat < + +EOF +} diff --git a/servers/src/org/xtreemfs/sandbox/httperf/start_osd_multithreaded b/servers/src/org/xtreemfs/sandbox/httperf/start_osd_multithreaded new file mode 100644 index 0000000000000000000000000000000000000000..87c3a434cf3cda954207e0848ee0ba26c2cbee4a --- /dev/null +++ b/servers/src/org/xtreemfs/sandbox/httperf/start_osd_multithreaded @@ -0,0 +1,116 @@ +#!/bin/bash + +##################################################################################### +# Test for multithreaded OSD +# This script will start the server-side of the Httperf test. +# It starts the DIR and the OSD. +# +# At first it starts the DIR and the original OSD. The orininal OSD is needed for +# creating files at the OSD. Therefor the WriteFilesToOSD-class is necessary. +# After the creation-process the old OSD will be killed and the special +# HttperfOSD will be started. +# Now the user can start with the tests. +# +# The "*files_urls"-files contain the special URL-formatting which is used by +# the WriteFilesToOSD-class and httperf. +# It is necessary that you use the same "*files_urls"-file for the +# WriteFilesToOSD-class and httperf. +# +# You need to specify the following parameters for this script: +# OSD_FILEPATH: absolute path where OSD saves the files +# URLS_FILE: absolute path to the "*files_urls"-file +# FILESIZE: filesize of the files which will be created +# +##################################################################################### + +$dir_pid +$osd1_pid +$osd2_pid +OSD_FILEPATH=$1 +URLS_FILE=$2 +FILESIZE=$3 + +rm -r /tmp/xtreemfs_httperf_test/ +mkdir /tmp/xtreemfs_httperf_test/ + +# start DIR +echo "Starting Directory Service..." +$XTREEMFS/bin/xtreemfs_start ds -p 32638 -i -d http://localhost:32638 -c /tmp/xtreemfs_httperf_test/ds.cfg -s /tmp/xtreemfs_httperf_test/ds -l /tmp/xtreemfs_httperf_test/ds.log & +dir_pid=$! +echo "...Directory Service started (process Id = $dir_pid)." + +echo +sleep 10 + +# start original OSD +echo "Starting OSD_1..." +$XTREEMFS/bin/xtreemfs_start osd -p 32637 -i -d http://localhost:32638 -c /tmp/xtreemfs_httperf_test/osd.cfg -s $OSD_FILEPATH & +osd1_pid=$! +echo "...OSD_1 started (process Id = $osd1_pid)." + +echo +sleep 10 + +cd java/build/classes +# start writing files +#rm -r $XTREEMFS/osdFilePath +echo "Starting writing files..." +$JAVA_HOME/bin/java org.xtreemfs.sandbox.httperf.WriteFilesToOSD localhost 32637 $URLS_FILE $FILESIZE +echo "...writing files completed." + +echo +sleep 10 + +# kill original OSD +echo "kill OSD_1" +if [ -d /proc/$osd1_pid ]; then + echo "Killing process $osd1_pid" + kill $osd1_pid +fi + +echo +sleep 10 + +# start a second OSD +#echo "start second OSD" +#$XTREEMFS/bin/xtreemfs_start osd -p 32639 -i -d http://localhost:32638 -c /tmp/xtreemfs_httperf_test/osd2.cfg -s $OSD_FILEPATH & +# + +#start adapted OSD (using MultithreadedStorageStage) for httperf +#echo "start HttPerfOSD" +#$JAVA_HOME/bin/java org.xtreemfs.sandbox.httperf.HttperfOSDMultithreaded /tmp/xtreemfs_httperf_test/osd.cfg & +#osd2_pid=$! + + +sleep 10 + +echo + +echo "services are running" +echo "press a button to kill all services" +read key + +#if [ -d /proc/$osd2_pid ]; then +# echo "Killing process $osd2_pid" +# kill $osd2_pid +#fi + +#kill Directory Service +echo "kill Directory Service (process Id = $dir_pid)" +if [ -d /proc/$dir_pid ]; then + echo "Killing process $dir_pid" + kill $dir_pid +fi +echo "services stopped" + +#cd ../../src/org/xtreemfs/sandbox/httperf + + + +usage() { + cat < + +EOF +} diff --git a/servers/src/org/xtreemfs/sandbox/org_xtreemfs_sandbox_DirectIOReader.c b/servers/src/org/xtreemfs/sandbox/org_xtreemfs_sandbox_DirectIOReader.c new file mode 100644 index 0000000000000000000000000000000000000000..c15052a8768690f2b3bc36c0c58d00b5ff9b0dc6 --- /dev/null +++ b/servers/src/org/xtreemfs/sandbox/org_xtreemfs_sandbox_DirectIOReader.c @@ -0,0 +1,40 @@ +#include "org_xtreemfs_sandbox_DirectIOReader.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +JNIEXPORT jobject JNICALL Java_org_xtreemfs_sandbox_DirectIOReader_loadFile + (JNIEnv *env, jobject jobj, jstring name) + +{ + void* m; + jobject jb; + jboolean iscopy; + struct stat finfo; + const char *mfile = (*env)->GetStringUTFChars( + env, name, &iscopy); + int fd = open(mfile, O_RDONLY | 040000); + if(!fd) + printf("could not open file"); + + lstat(mfile, &finfo); + m = valloc(finfo.st_size); + + int c; + c = read(fd, m, finfo.st_size); + if(c != finfo.st_size) + printf("read wrong object size"); + + jb=(*env)->NewDirectByteBuffer(env, m, finfo.st_size); + close(fd); + (*env)->ReleaseStringUTFChars(env, name, mfile); + return (jb); + +} diff --git a/servers/src/org/xtreemfs/sandbox/org_xtreemfs_sandbox_DirectIOReader.h b/servers/src/org/xtreemfs/sandbox/org_xtreemfs_sandbox_DirectIOReader.h new file mode 100644 index 0000000000000000000000000000000000000000..992bfbd0aef057b06188775b88b5f80c8421a262 --- /dev/null +++ b/servers/src/org/xtreemfs/sandbox/org_xtreemfs_sandbox_DirectIOReader.h @@ -0,0 +1,21 @@ +/* DO NOT EDIT THIS FILE - it is machine generated */ +#include +/* Header for class org_xtreemfs_sandbox_DirectIOReader */ + +#ifndef _Included_org_xtreemfs_sandbox_DirectIOReader +#define _Included_org_xtreemfs_sandbox_DirectIOReader +#ifdef __cplusplus +extern "C" { +#endif +/* + * Class: org_xtreemfs_sandbox_DirectIOReader + * Method: loadFile + * Signature: (Ljava/lang/String;)[B + */ +JNIEXPORT jobject JNICALL Java_org_xtreemfs_sandbox_DirectIOReader_loadFile + (JNIEnv *, jclass, jstring); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/servers/src/org/xtreemfs/sandbox/tests/CreateConfig.java b/servers/src/org/xtreemfs/sandbox/tests/CreateConfig.java new file mode 100644 index 0000000000000000000000000000000000000000..32e6ac3f59cff66ac014e75b11147950671f729b --- /dev/null +++ b/servers/src/org/xtreemfs/sandbox/tests/CreateConfig.java @@ -0,0 +1,60 @@ +package org.xtreemfs.sandbox.tests; + +import java.io.FileOutputStream; +import java.util.Properties; + +public class CreateConfig { + + public static void main(String[] args) { + + try { + + int numCfgFiles = Integer.parseInt(args[0]); + + Properties config = new Properties(); + if (!args[1].equals("null")) + config.setProperty(OSDTestClient.Configuration.PROP_STRIPESIZE, + args[1]); + if (!args[2].equals("null")) + config.setProperty(OSDTestClient.Configuration.PROP_NUM_REQUESTS, + args[2]); + if (!args[3].equals("null")) + config.setProperty(OSDTestClient.Configuration.PROP_NUM_OBJECTS, + args[3]); + if (!args[4].equals("null")) + config.setProperty(OSDTestClient.Configuration.PROP_POLICY, + args[4]); + if (!args[5].equals("null")) + config.setProperty(OSDTestClient.Configuration.PROP_FILE_ID, + args[5]); + if (!args[6].equals("null")) + config.setProperty(OSDTestClient.Configuration.PROP_OPERATION, + args[6]); + + for (int i = 0; i < numCfgFiles; i++) + config.setProperty(OSDTestClient.Configuration.PROP_OSD + (i + 1), + "opt" + ((i + 1) < 10 ? ("0" + (i + 1)) : (i + 1)) + + ":32640"); + + for (int i = 0; i < numCfgFiles; i++) { + config.setProperty(OSDTestClient.Configuration.PROP_TARGET_OSD, + (i + 1) + ""); + if (!args[7].equals("null")) { + long delay = Long.parseLong(args[7]) - i * 1000; + config.setProperty( + OSDTestClient.Configuration.PROP_INIT_DELAY, (delay < 0 ? 0 + : delay) + ""); + } + + config.store(new FileOutputStream("/home/stender/config" + + ((i + 1) < 10 ? ("0" + (i + 1)) : (i + 1)) + + ".properties"), ""); + } + + } catch (Exception ex) { + ex.printStackTrace(); + System.exit(1); + } + + } +} diff --git a/servers/src/org/xtreemfs/sandbox/tests/MRCStressTest.java b/servers/src/org/xtreemfs/sandbox/tests/MRCStressTest.java new file mode 100644 index 0000000000000000000000000000000000000000..48cd8279267b273adb673b9bfbaff0bab66d4519 --- /dev/null +++ b/servers/src/org/xtreemfs/sandbox/tests/MRCStressTest.java @@ -0,0 +1,115 @@ +package org.xtreemfs.sandbox.tests; + +import java.io.File; +import java.io.IOException; + +public class MRCStressTest { + + private static int fc = 0; + + private static int dc = 0; + + private static final Object fcLock = new Object(); + + private static final Object dcLock = new Object(); + + public static void main(String[] args) throws Exception { + + final String rootDir = "/tmp/xtreemfs"; + final int numberOfThreads = 30; + final int depth = 4; + final int minSpread = 2; + final int maxSpread = 5; + final int minFilesPerDir = 0; + final int maxFilesPerDir = 10; + final int minNameLength = 1; + final int maxNameLength = 32; + + long startTime = System.currentTimeMillis(); + + Thread[] threads = new Thread[numberOfThreads]; + for (int i = 0; i < numberOfThreads; i++) + threads[i] = new Thread() { + public void run() { + try { + createRandomTree(rootDir, depth, minSpread, maxSpread, + minFilesPerDir, maxFilesPerDir, minNameLength, + maxNameLength); + } catch (IOException e) { + e.printStackTrace(); + } + } + }; + + for (Thread th : threads) + th.start(); + + for (Thread th : threads) + th.join(); + + long time = System.currentTimeMillis() - startTime; + System.out.println("created " + fc + " files and " + dc + + " directories in " + time + " ms"); + } + + public static void createRandomTree(String rootDir, int depth, + int minSpread, int maxSpread, int minFilesPerNode, int maxFilesPerNode, + int minNameLength, int maxNameLength) throws IOException { + + int spread = randomNumber(minSpread, maxSpread); + for (int i = 0; i < spread; i++) { + + // create the node + String nestedDir = rootDir + "/" + + randomFileName(minNameLength, maxNameLength); + if (new File(nestedDir).mkdir()) + synchronized (dcLock) { + dc++; + } + else + System.err.println("could not create directory " + nestedDir); + + // create nested files + int fileCount = randomNumber(minFilesPerNode, maxFilesPerNode); + for (int j = 0; j < fileCount; j++) { + String fileName = nestedDir + "/" + + randomFileName(minNameLength, maxNameLength); + + if (new File(fileName).createNewFile()) + synchronized (fcLock) { + fc++; + } + else + System.err.println("could not create file " + nestedDir); + + } + + // create subtree + if (depth > 1) + createRandomTree(nestedDir, depth - 1, minSpread, maxSpread, + minFilesPerNode, maxFilesPerNode, minNameLength, + maxNameLength); + } + } + + private static int randomNumber(int lowerBound, int upperBound) { + return (int) (Math.random() * (upperBound - lowerBound + 1) + lowerBound); + } + + private static String randomFileName(int minLength, int maxLength) { + + final char[] allowedChars = { '0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', + 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', + 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', + 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', + 'y', 'z' }; + + int length = randomNumber(minLength, maxLength); + char[] chars = new char[length]; + for (int i = 0; i < chars.length; i++) + chars[i] = allowedChars[(int) (Math.random() * allowedChars.length)]; + + return new String(chars); + } +} diff --git a/servers/src/org/xtreemfs/sandbox/tests/OSDTestClient.java b/servers/src/org/xtreemfs/sandbox/tests/OSDTestClient.java new file mode 100644 index 0000000000000000000000000000000000000000..c5dee1eef70c7fda3586be5f9eafd29094598daa --- /dev/null +++ b/servers/src/org/xtreemfs/sandbox/tests/OSDTestClient.java @@ -0,0 +1,466 @@ +package org.xtreemfs.sandbox.tests; + +import java.io.BufferedReader; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; + +import org.xtreemfs.common.Capability; +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.clients.HttpErrorException; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.clients.RPCResponseListener; +import org.xtreemfs.common.clients.osd.OSDClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.striping.Location; +import org.xtreemfs.common.striping.Locations; +import org.xtreemfs.common.striping.RAID0; +import org.xtreemfs.common.striping.StripingPolicy; +import org.xtreemfs.common.uuids.ServiceUUID; +import org.xtreemfs.foundation.pinky.HTTPHeaders; + +public class OSDTestClient { + + class ThroughputMonitor extends Thread { + + private ResponseCollector rc; + + private boolean shutdown; + + ThroughputMonitor(ResponseCollector rc) { + this.rc = rc; + } + + public void run() { + + long t = System.currentTimeMillis(); + long resp = rc.getNumberOfResponses(); + + System.out.println("avrg. # ops/s:"); + while (!shutdown) { + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + long oldResp = resp; + long oldT = t; + resp = rc.getNumberOfResponses(); + t = System.currentTimeMillis(); + + double rate = (double) (resp - oldResp) * 1000 / (t - oldT); + + System.out.println(rate + ";" + resp + ";" + rc.getFailures() + + ";" + rc.getRedirects()); + + if (rc.isDone()) { + shutdown = true; + System.out.println("failure rate: " + rc.getFailureRate() + + " (" + rc.getFailures() + " failures)"); + System.out.println("redirects: " + rc.getRedirects()); + System.out.println("total time: " + rc.getTotalTime()); + } + } + } + } + + class ResponseCollector implements RPCResponseListener { + + private int responded; + + private int failed; + + private int redirected; + + private long startTime; + + private long endTime; + + private Configuration config; + + public ResponseCollector(Configuration config) { + this.responded = 0; + this.failed = 0; + this.redirected = 0; + this.config = config; + } + + public void responseAvailable(RPCResponse response) { + + if (startTime == 0) + startTime = System.currentTimeMillis(); + + try { + + try { + + // check if an exception has occured + response.waitForResponse(); + + Object[] context = (Object[]) response.getAttachment(); + // assert body size for read requests + assert (context.length != 4 || response.getBody() != null + && response.getBody().capacity() == config.stripeSize + * KB); + + } catch (HttpErrorException exc) { + + // handle redirect + if (exc.getStatusCode() >= 300 && exc.getStatusCode() < 400) { + String target = response.getSpeedyRequest().responseHeaders + .getHeader(HTTPHeaders.HDR_LOCATION); + assert (target != null); + Logging.logMessage(Logging.LEVEL_INFO, this, + "redirect to " + target); + + // get the request context + Object[] context = (Object[]) response.getAttachment(); + Locations loc = (Locations) context[0]; + Capability cap = (Capability) context[1]; + int obj = (Integer) context[2]; + String fileId = (String) context[3]; + ReusableBuffer data = null; + if (context.length == 5) + data = (ReusableBuffer) context[4]; + + int ind = target.lastIndexOf(':'); + InetSocketAddress osd = new InetSocketAddress(target + .substring("http://".length(), ind), Integer + .parseInt(target.substring(ind + 1))); + + // redirect request + RPCResponse newRes = data == null ? client.get(osd, + loc, cap, fileId, obj) : client.put(osd, loc, cap, + fileId, obj, data.createViewBuffer()); + newRes.setAttachment(context); + newRes.setResponseListener(this); + + redirected++; + return; + } + + failed++; + exc.printStackTrace(); + + } catch (IOException exc) { + exc.printStackTrace(); + failed++; + } finally { + response.freeBuffers(); + } + + } catch (Exception e) { + e.printStackTrace(); + } + + responded++; + if (responded >= config.numRequests) + endTime = System.currentTimeMillis(); + } + + public boolean isDone() { + return responded == config.numRequests; + } + + public int getNumberOfResponses() { + return responded; + } + + public double getFailureRate() { + return (double) failed / responded; + } + + public int getFailures() { + return failed; + } + + public int getRedirects() { + return redirected; + } + + public long getTotalTime() { + return endTime - startTime; + } + + } + + class Configuration { + + public static final String PROP_OSD = "osd"; + + public static final String PROP_STRIPESIZE = "stripeSize"; + + public static final String PROP_NUM_REQUESTS = "numRequests"; + + public static final String PROP_NUM_OBJECTS = "numObjects"; + + public static final String PROP_FIRST_OBJECT = "firstObject"; + + public static final String PROP_STEP_SIZE = "stepSize"; + + public static final String PROP_POLICY = "policy"; + + public static final String PROP_FILE_ID = "fileId"; + + public static final String PROP_TARGET_OSD = "targetOSD"; + + public static final String PROP_OPERATION = "operation"; + + public static final String PROP_INIT_DELAY = "initialDelay"; + + public static final String SEQ_READ = "sequentialRead"; + + public static final String RND_READ = "randomRead"; + + public static final String SEQ_WRITE = "sequentialWrite"; + + public static final String RND_WRITE = "randomWrite"; + + public List osds; + + public int stripeSize; + + public int numRequests; + + public int numObjects; + + public String policy; + + public String fileId; + + public int targetOSD; + + public String operation; + + public long initialDelay; + + public int firstObject; + + public int stepSize; + + public Configuration() throws IOException { + this(null); + + osds.add(new InetSocketAddress("csr-pc24.zib.de", 32640)); + // osds.add(new InetSocketAddress("csr-pc24.zib.de", 32641)); + osds.add(new InetSocketAddress("xtreem.zib.de", 32637)); + // osds.add(new InetSocketAddress("opt.csc.ncsu.edu", 32637)); + } + + public Configuration(String file) throws IOException { + + Properties props = new Properties(); + if (file != null) + props.load(new FileInputStream(file)); + + // parse the OSD list + osds = new ArrayList(); + for (int i = 1;; i++) { + + if (!props.containsKey(PROP_OSD + i)) + break; + + String osd = props.getProperty(PROP_OSD + i); + int colon = osd.lastIndexOf(':'); + String host = osd.substring(0, colon); + int port = Integer.parseInt(osd.substring(colon + 1)); + osds.add(new InetSocketAddress(host, port)); + } + + stripeSize = Integer.parseInt(props.getProperty(PROP_STRIPESIZE, + "4")); + numRequests = Integer.parseInt(props.getProperty(PROP_NUM_REQUESTS, + "20000")); + numObjects = Integer.parseInt(props.getProperty(PROP_NUM_OBJECTS, + "1000")); + firstObject = Integer.parseInt(props.getProperty(PROP_FIRST_OBJECT, + "0")); + stepSize = Integer.parseInt(props.getProperty(PROP_STEP_SIZE, "1")); + policy = props.getProperty(PROP_POLICY, "lazy"); + fileId = props.getProperty(PROP_FILE_ID, Long.toHexString( + System.currentTimeMillis() / 1000).toUpperCase() + + ":1"); + targetOSD = Integer.parseInt(props + .getProperty(PROP_TARGET_OSD, "0")) - 1; + operation = props.getProperty(PROP_OPERATION, RND_WRITE); + initialDelay = Long.parseLong(props.getProperty(PROP_INIT_DELAY, + "-1")); + } + + public String toString() { + + StringBuffer buf = new StringBuffer(); + buf.append(" OSD list:\n"); + for (InetSocketAddress osd : osds) + buf.append(" " + osd + "\n"); + + buf.append(" operation: " + operation + "\n"); + buf.append("number of requests: " + numRequests + "\n"); + buf.append(" number of objects: " + numObjects + "\n"); + buf.append(" first object: " + firstObject + "\n"); + buf.append(" stepSize: " + stepSize + "\n"); + buf.append(" target OSD: " + + (targetOSD == -1 ? "random" : osds.get(targetOSD)) + "\n"); + buf.append(" update policy: " + policy + "\n"); + buf.append(" file ID: " + fileId + "\n"); + buf.append(" stripe size: " + stripeSize + "\n"); + buf.append(" initial delay: " + initialDelay + "\n"); + + return buf.toString(); + } + } + + private static final int KB = 1024; + + private static final byte PATTERN1 = (byte) 'X'; + + private static final byte PATTERN2 = (byte) 'Y'; + + private static final long TIMEOUT = 5000; + + private final OSDClient client; + + private final Configuration config; + + public OSDTestClient(String configFile) throws Exception { + this.client = new OSDClient(null); + this.config = configFile == null ? new Configuration() + : new Configuration(configFile); + System.out.println(config.toString()); + } + + public void testRead(Configuration config) throws Exception { + + Capability cap = new Capability(config.fileId, "rw", System + .currentTimeMillis() + 1000 * 60 * 60, 0, "secretPassphrase"); + + // create a locations list with the given replication policy + Locations loc = createLocations(config); + + ResponseCollector rc = new ResponseCollector(config); + ThroughputMonitor calc = new ThroughputMonitor(rc); + calc.start(); + + // perform reads + for (int i = 0; i < config.numRequests; i++) { + + InetSocketAddress osd = config.targetOSD == -1 ? config.osds + .get((int) (Math.random() * config.osds.size())) + : config.osds.get(config.targetOSD); + int obj = config.operation.equals(Configuration.SEQ_READ) ? (config.firstObject + i + * config.stepSize) + % config.numObjects + : (int) (Math.random() * config.numObjects); + + RPCResponse response = client + .get(osd, loc, cap, config.fileId, obj); + response + .setAttachment(new Object[] { loc, cap, obj, config.fileId }); + response.setResponseListener(rc); + } + + calc.join(); + } + + public void testWrite(Configuration config) throws Exception { + + Capability cap = new Capability(config.fileId, "rw", System + .currentTimeMillis() + 1000 * 60 * 60, 0, "secretPassphrase"); + + // create a locations list with the given replication policy + Locations loc = createLocations(config); + + ResponseCollector rc = new ResponseCollector(config); + ThroughputMonitor calc = new ThroughputMonitor(rc); + calc.start(); + + // perform writes + for (int i = 0; i < config.numRequests; i++) { + + ReusableBuffer buf = allocateAndFillBuffer(config.stripeSize, + PATTERN1); + + InetSocketAddress osd = config.targetOSD == -1 ? config.osds + .get((int) (Math.random() * config.osds.size())) + : config.osds.get(config.targetOSD); + int obj = config.operation.equals(Configuration.SEQ_WRITE) ? (config.firstObject + i + * config.stepSize) + % config.numObjects + : (int) (Math.random() * config.numObjects); + + RPCResponse response = client.put(osd, loc, cap, config.fileId, + obj, buf); + response.setAttachment(new Object[] { loc, cap, obj, config.fileId, + buf }); + response.setResponseListener(rc); + } + + calc.join(); + } + + public void runTest() throws Exception { + + BufferedReader r = new BufferedReader(new InputStreamReader(System.in)); + + if (config.initialDelay == -1) { + System.out.println("press ENTER to start test run"); + r.readLine(); + } else { + Thread.sleep(config.initialDelay); + } + + if (config.operation.equals(Configuration.RND_READ) + || config.operation.equals(Configuration.SEQ_READ)) + testRead(config); + else if (config.operation.equals(Configuration.RND_WRITE) + || config.operation.equals(Configuration.SEQ_WRITE)) + testWrite(config); + else + System.err.println("invalid operation: " + config.operation); + + client.shutdown(); + client.waitForShutdown(); + } + + private Locations createLocations(Configuration config) { + List locations = new ArrayList(config.osds.size()); + for (InetSocketAddress addr : config.osds) { + StripingPolicy sp = new RAID0(config.stripeSize, 1); + List osd = new ArrayList(1); + osd.add(new ServiceUUID("http://"+addr.getHostName()+":"+addr.getPort())); + locations.add(new Location(sp, osd)); + } + return new Locations(locations, 0, config.policy, 0); + } + + private static ReusableBuffer allocateAndFillBuffer(int stripeSize, + byte pattern) { + ReusableBuffer buf = BufferPool.allocate(stripeSize * KB); + for (int i = 0; i < stripeSize * KB; i++) + buf.put(pattern); + + return buf; + } + + public static void main(String[] args) { + + try { + Logging.start(Logging.LEVEL_ERROR); + OSDTestClient client = new OSDTestClient(args.length == 0 ? null + : args[0]); + client.runTest(); + + } catch (Exception ex) { + ex.printStackTrace(); + System.exit(1); + } + + } + +} diff --git a/servers/src/org/xtreemfs/sandbox/tests/ReplicatedOSDTest.java b/servers/src/org/xtreemfs/sandbox/tests/ReplicatedOSDTest.java new file mode 100644 index 0000000000000000000000000000000000000000..6c6dc72d83a40e25702d7fb35b622257ac52b68c --- /dev/null +++ b/servers/src/org/xtreemfs/sandbox/tests/ReplicatedOSDTest.java @@ -0,0 +1,260 @@ +/* + * ReplicatedOSDTest.java + * + * Created on August 9, 2007, 10:38 AM + * + * To change this template, choose Tools | Template Manager + * and open the template in the editor. + */ + +package org.xtreemfs.sandbox.tests; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.List; +import org.xtreemfs.common.Capability; +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.clients.osd.OSDClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.striping.Location; +import org.xtreemfs.common.striping.Locations; +import org.xtreemfs.common.striping.RAID0; +import org.xtreemfs.common.striping.StripingPolicy; +import org.xtreemfs.common.uuids.ServiceUUID; +import org.xtreemfs.foundation.json.JSONException; + +/** + * This test requires a manual setup of two OSDs. Several access + * patterns are simulated to trigger data replication. + * Data consistency is checked + * @author bjko + */ +public class ReplicatedOSDTest { + + public final String FILE_ID; + + public static final int STRIPE_SIZE = 4; + + public static final int KB = 1024; + + public static final int WAIT_FOR_LEASE_RETURN = 6000; + + private final List osds; + + private final Capability cap; + + private final OSDClient client; + + private final Locations loc; + + private static final long RESPONSE_TO = 5000; + + /** Creates a new instance of ReplicatedOSDTest */ + public ReplicatedOSDTest(List osds) throws IOException, JSONException { + + this.FILE_ID = Long.toHexString(System.currentTimeMillis()/1000).toUpperCase()+":1"; + Logging.logMessage(Logging.LEVEL_INFO,this,"file id is "+FILE_ID); + + this.osds = osds; + client = new OSDClient(null); + cap = new Capability(FILE_ID,"rw",System.currentTimeMillis()+1000*60*60,0,"secretPassphrase"); + + List locations = new ArrayList(osds.size()); + for (InetSocketAddress addr : osds) { + StripingPolicy sp = new RAID0(STRIPE_SIZE,1); + List osd = new ArrayList(1); + osd.add(new ServiceUUID("http://"+addr.getHostName()+":"+addr.getPort())); + locations.add(new Location(sp,osd)); + } + loc = new Locations(locations); + System.out.println("locations: "+loc.asJSONString()); + } + + public void testWriteRead(int numObjects, byte pattern) throws Exception { + + RPCResponse r = client.truncate(osds.get(0),loc,cap,FILE_ID,0); + r.waitForResponse(); + r.freeBuffers(); + + for (int obj = 0; obj < numObjects; obj++) { + ReusableBuffer buf = BufferPool.allocate(STRIPE_SIZE*KB); + for (int i = 0; i < STRIPE_SIZE*KB; i++) { + buf.put(pattern); + } + + RPCResponse response = client.put(osds.get(0),loc,cap,FILE_ID,obj,buf); + response.waitForResponse(); + response.freeBuffers(); + } + Logging.logMessage(Logging.LEVEL_INFO,this,"data writen"); + + Thread.sleep(WAIT_FOR_LEASE_RETURN); + + Logging.logMessage(Logging.LEVEL_INFO,this,"start reading"); + for (int obj = 0; obj < numObjects; obj++) { + RPCResponse response = client.get(osds.get(1),loc,cap,FILE_ID,obj); + ReusableBuffer data = response.getBody(); + data.position(0); + for (int i = 0; i < STRIPE_SIZE*KB; i++) { + if (data.get() != pattern) { + throw new IllegalArgumentException("invalid data"); + } + } + + response.freeBuffers(); + } + Logging.logMessage(Logging.LEVEL_INFO,this,"testWriteRead successful!"); + + } + + public void testInterleavingWrite(int numObjects) throws Exception { + + RPCResponse r = client.truncate(osds.get(0),loc,cap,FILE_ID,0); + r.waitForResponse(); + r.freeBuffers(); + + if (numObjects%2 != 0) + throw new IllegalArgumentException("numObjects must be an even integer"); + + + for (int obj = 0; obj < numObjects; obj += 2) { + + ReusableBuffer bufA = BufferPool.allocate(STRIPE_SIZE*KB); + for (int i = 0; i < STRIPE_SIZE*KB; i++) { + bufA.put((byte)'a'); + } + + ReusableBuffer bufB = BufferPool.allocate(STRIPE_SIZE*KB); + for (int i = 0; i < STRIPE_SIZE*KB; i++) { + bufB.put((byte)'b'); + } + bufB.position(0); + bufA.position(0); + RPCResponse responseA = client.put(osds.get(0),loc,cap,FILE_ID,obj,bufA); + Logging.logMessage(Logging.LEVEL_DEBUG,this,"wrote a to "+obj); + RPCResponse responseB = client.put(osds.get(1),loc,cap,FILE_ID,obj+1,bufB); + Logging.logMessage(Logging.LEVEL_DEBUG,this,"wrote b to "+(obj+1)); + responseA.waitForResponse(); + responseB.waitForResponse(); + responseA.freeBuffers(); + responseB.freeBuffers(); + } + + Thread.sleep(WAIT_FOR_LEASE_RETURN); + + + for (int obj = 0; obj < numObjects; obj += 2) { + RPCResponse responseA = client.get(osds.get(1),loc,cap,FILE_ID,obj); + RPCResponse responseB = client.get(osds.get(0),loc,cap,FILE_ID,obj+1); + ReusableBuffer dataA = responseA.getBody(); + ReusableBuffer dataB = responseB.getBody(); + + dataA.position(0); + dataB.position(0); + + for (int i = 0; i < STRIPE_SIZE*KB; i++) { + byte tmp = dataA.get(); + if (tmp != (byte)'a') { + throw new IllegalArgumentException("invalid data: "+tmp+"/"+((char)tmp)); + } + tmp = dataB.get(); + if (tmp != (byte)'b') { + throw new IllegalArgumentException("invalid data: "+tmp+"/"+((char)tmp)); + } + } + + responseA.freeBuffers(); + responseB.freeBuffers(); + } + Logging.logMessage(Logging.LEVEL_INFO,this,"interleaving test successful!"); + + } + + + public void testRoundRobin(int numRounds) throws Exception { + + final int numOsds = osds.size(); + + RPCResponse r = client.truncate(osds.get(0),loc,cap,FILE_ID,0); + r.waitForResponse(); + r.freeBuffers(); + + for (int obj = 0; obj < numRounds; obj++) { + + for (int j = 0; j < numOsds; j++) { + ReusableBuffer bufA = BufferPool.allocate(STRIPE_SIZE*KB); + for (int i = 0; i < STRIPE_SIZE*KB; i++) { + bufA.put((byte)('a'+j)); + } + + bufA.position(0); + RPCResponse responseA = client.put(osds.get(j),loc,cap,FILE_ID,obj*numOsds+j,bufA); + Logging.logMessage(Logging.LEVEL_DEBUG,this,"wrote "+((char)('a'+j))+" to "+osds.get(j)); + responseA.waitForResponse(); + responseA.freeBuffers(); + } + } + + //Thread.sleep(10001); + Thread.sleep(WAIT_FOR_LEASE_RETURN/2); + + for (int obj = 0; obj < numRounds; obj++) { + + for (int j = 0; j < numOsds; j++) { + RPCResponse responseA = client.get(osds.get(numOsds-j-1),loc,cap,FILE_ID,obj*numOsds+j); + ReusableBuffer dataA = responseA.getBody(); + Logging.logMessage(Logging.LEVEL_DEBUG,this,"read from "+osds.get(numOsds-j-1)); + dataA.position(0); + for (int i = 0; i < STRIPE_SIZE*KB; i++) { + byte tmp = dataA.get(); + if (tmp != (byte)('a'+j)) { + throw new IllegalArgumentException("invalid data: "+(char)tmp+"/"+((char)('a'+j))); + } + } + + responseA.freeBuffers(); + } + } + + Logging.logMessage(Logging.LEVEL_INFO,this,"roundrobin test successful!"); + + } + + public void shutdown() { + client.shutdown(); + client.waitForShutdown(); + } + + /** + * @param args the command line arguments + */ + public static void main(String[] args) { + try { + + Logging.start(Logging.LEVEL_INFO); + + List osds = new ArrayList(2); + osds.add(new InetSocketAddress("xtreem.zib.de",32640)); + //osds.add(new InetSocketAddress("farnsworth.zib.de",32641)); + osds.add(new InetSocketAddress("pub2-s.ane.cmc.osaka-u.ac.jp",32641)); + osds.add(new InetSocketAddress("planetlab5.flux.utah.edu",32641)); + + ReplicatedOSDTest test = new ReplicatedOSDTest(osds); + + Thread.sleep(100); + + //test.testInterleavingWrite(100); + test.testRoundRobin(50); + + System.exit(0); + } catch (Exception ex) { + ex.printStackTrace(); + System.exit(1); + } + + } + +} diff --git a/servers/src/org/xtreemfs/sandbox/tests/TortureXtreemFS.java b/servers/src/org/xtreemfs/sandbox/tests/TortureXtreemFS.java new file mode 100644 index 0000000000000000000000000000000000000000..e33ef2e7c3b833e3a5714f9dfa97d30ce203d2b1 --- /dev/null +++ b/servers/src/org/xtreemfs/sandbox/tests/TortureXtreemFS.java @@ -0,0 +1,141 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Björn Kolbeck (ZIB) + */ + + +package org.xtreemfs.sandbox.tests; + +import java.net.InetSocketAddress; +import java.net.URL; +import org.xtreemfs.common.TimeSync; +import org.xtreemfs.common.clients.dir.DIRClient; +import org.xtreemfs.common.clients.io.RandomAccessFile; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.uuids.UUIDResolver; +import org.xtreemfs.foundation.speedy.MultiSpeedy; + +/** + * + * @author bjko + */ +public class TortureXtreemFS { + + public static void main(String[] args) { + MultiSpeedy speedy = null; + try { + Logging.start(Logging.LEVEL_WARN); + TimeSync.initialize(null, 10000, 50, ""); + final String mrcURL = ((args.length >= 1) ? args[0] : "http://localhost:32636"); + + URL url = new URL(mrcURL); + + final String path = ((args.length >= 2) ? args[1] : "test")+"/torture.data"; + + final String dirAddr = ((args.length >= 3) ? args[2] : "http://localhost:32638"); + final URL dirURL = new URL(dirAddr); + + DIRClient dir = new DIRClient(null,new InetSocketAddress(dirURL.getHost(),dirURL.getPort())); + UUIDResolver.start(dir, 10000, 9999999); + System.out.println("file size from 64k to 512MB with record length from 4k to 1M"); + + final int MIN_FS = 64*1024; + final int MAX_FS = 512*1024*1024; + + final int MIN_REC = 4*1024; + final int MAX_REC = 1024*1024; + + speedy = new MultiSpeedy(); + speedy.start(); + + RandomAccessFile tmp = new RandomAccessFile("cw",url,path+".tmp",speedy); + System.out.println("Default striping policy is: "+tmp.getStripingPolicy()); + + for (int fsize = MIN_FS; fsize <= MAX_FS; fsize = fsize * 2) { + for (int recsize = MIN_REC; recsize <= MAX_REC; recsize = recsize *2) { + final int numRecs = fsize/recsize; + if (numRecs == 0) + continue; + byte[] sendBuffer = new byte[recsize]; + for (int i = 0; i < recsize; i++) { + sendBuffer[i] = (byte)((i%26) + 65); + } + + long tStart = System.currentTimeMillis(); + RandomAccessFile raf = new RandomAccessFile("cw",url,path,speedy); + long tOpen = System.currentTimeMillis(); + + long bytesWritten = 0; + //do writes + for (int rec = 0; rec < numRecs;rec++) { + bytesWritten += raf.write(sendBuffer, 0, recsize); + } + final long tWrite = System.currentTimeMillis(); + assert(bytesWritten == numRecs*recsize); + + raf.flush(); + raf.seek(0); + final long tFlush = System.currentTimeMillis(); + + //do writes + byte[] readBuffer = new byte[recsize]; + for (int rec = 0; rec < numRecs;rec++) { + raf.read(readBuffer, 0, recsize); + for (int i = 0; i < recsize; i++) { + if (readBuffer[i] != (byte)((i%26) + 65)) { + System.out.println("INVALID CONTENT AT "+(rec*recsize+i)); + System.out.println("expected: "+(byte)((i%26) + 65)); + System.out.println("got : "+readBuffer[i]); + System.exit(1); + } + } + } + final long tRead = System.currentTimeMillis(); + + raf.delete(); + + final long tDelete = System.currentTimeMillis(); + + double writeRate = ( (double) fsize) / 1024.0 / ( ((double)(tWrite-tOpen+1)) / 1000.0 ); + + double readRate = ( (double) fsize) / 1024.0 / ( ((double)(tRead-tFlush+1)) / 1000.0 ); + + System.out.format("fs: %8d bs: %8d write: %6d ms %6.0f kb/s read: %6d ms %6.0f kb/s\n", + fsize/1024,recsize,(tWrite-tOpen),writeRate,(tRead-tFlush), + readRate); + + + } + } + + System.out.println("finished"); + speedy.shutdown(); + dir.shutdown(); + UUIDResolver.shutdown(); + } catch (Exception ex) { + ex.printStackTrace(); + System.exit(1); + } + + } + +} diff --git a/servers/src/org/xtreemfs/utils/CLIParser.java b/servers/src/org/xtreemfs/utils/CLIParser.java new file mode 100644 index 0000000000000000000000000000000000000000..5edb61a63dca05fbaa4c9fc58f8968d421ada2ff --- /dev/null +++ b/servers/src/org/xtreemfs/utils/CLIParser.java @@ -0,0 +1,114 @@ +package org.xtreemfs.utils; + +import java.io.File; +import java.net.URL; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +public class CLIParser { + + public static final class CliOption { + public enum OPTIONTYPE { + NUMBER, STRING, SWITCH, URL, FILE + }; + + public final OPTIONTYPE optType; + + public Boolean switchValue; + + public String stringValue; + + public Long numValue; + + public URL urlValue; + + public File fileValue; + + public CliOption(OPTIONTYPE oType) { + this.optType = oType; + } + } + + public static void parseCLI(String[] args, Map options, + List arguments) throws IllegalArgumentException { + List argList = Arrays.asList(args); + + Iterator iter = argList.iterator(); + while (iter.hasNext()) { + final String arg = iter.next().trim(); + if (arg.startsWith("-")) { + // option + final String optName = arg.substring(1); + final CliOption option = options.get(optName); + if (option == null) { + throw new IllegalArgumentException(arg + " is not a valid option"); + } + switch (option.optType) { + case SWITCH: { + option.switchValue = true; + break; + } + case STRING: { + if (iter.hasNext()) { + final String value = iter.next(); + option.stringValue = value.trim(); + } else { + throw new IllegalArgumentException(arg + " requires a string argument"); + } + break; + } + case NUMBER: { + if (iter.hasNext()) { + final String value = iter.next(); + try { + option.numValue = Long.valueOf(value.trim()); + } catch (NumberFormatException ex) { + throw new IllegalArgumentException(arg + + " requires a integer argument and " + value + + " is not an integer"); + } + } else { + throw new IllegalArgumentException(arg + " requires a string argument"); + } + break; + } + case URL: { + if (iter.hasNext()) { + final String value = iter.next(); + try { + final URL tmp = new URL(value); + option.urlValue = tmp; + } catch (Exception ex) { + throw new IllegalArgumentException(arg + " requires :"); + } + } else { + throw new IllegalArgumentException(arg + " requires a string argument"); + } + break; + } + + case FILE: { + if (iter.hasNext()) { + final String value = iter.next(); + try { + final File tmp = new File(value); + option.fileValue = tmp; + } catch (Exception ex) { + throw new IllegalArgumentException(arg + " requires ://:"); + } + } else { + throw new IllegalArgumentException(arg + " requires a string argument"); + } + break; + } + + } + } else { + arguments.add(arg); + } + } + } + +} diff --git a/servers/src/org/xtreemfs/utils/DefaultDirConfig.java b/servers/src/org/xtreemfs/utils/DefaultDirConfig.java new file mode 100644 index 0000000000000000000000000000000000000000..0583a9309cb107537c812469a7a1bdff134a5cf5 --- /dev/null +++ b/servers/src/org/xtreemfs/utils/DefaultDirConfig.java @@ -0,0 +1,115 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Christian Lorenz (ZIB), Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.utils; + +import org.xtreemfs.common.config.*; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.Properties; + +public class DefaultDirConfig extends Config { + + protected InetSocketAddress directoryService; + + private boolean sslEnabled; + private String serviceCredsFile; + + private String serviceCredsPassphrase; + + private String serviceCredsContainer; + + private String trustedCertsFile; + + private String trustedCertsPassphrase; + + private String trustedCertsContainer; + + public DefaultDirConfig() { + super(); + } + + public DefaultDirConfig(Properties prop) { + super(prop); + } + + public DefaultDirConfig(String filename) throws IOException { + super(filename); + } + + public void read() throws IOException { + + this.directoryService = this.readRequiredInetAddr("dir_service.host", "dir_service.port"); + + this.sslEnabled = readOptionalBoolean("ssl.enabled", false); + + if(isSslEnabled()){ + this.serviceCredsFile = this.readRequiredString("ssl.service_creds"); + + this.serviceCredsPassphrase = this.readRequiredString("ssl.service_creds.pw"); + + this.serviceCredsContainer = this.readRequiredString("ssl.service_creds.container"); + + this.trustedCertsFile = this.readRequiredString("ssl.trusted_certs"); + + this.trustedCertsPassphrase = this.readRequiredString("ssl.trusted_certs.pw"); + + this.trustedCertsContainer = this.readRequiredString("ssl.trusted_certs.container"); + } + + } + + public InetSocketAddress getDirectoryService() { + return directoryService; + } + + public boolean isSslEnabled() { + return sslEnabled; + } + + public String getServiceCredsFile() { + return serviceCredsFile; + } + + public String getServiceCredsPassphrase() { + return serviceCredsPassphrase; + } + + public String getServiceCredsContainer() { + return serviceCredsContainer; + } + + public String getTrustedCertsFile() { + return trustedCertsFile; + } + + public String getTrustedCertsPassphrase() { + return trustedCertsPassphrase; + } + + public String getTrustedCertsContainer() { + return trustedCertsContainer; + } + +} diff --git a/servers/src/org/xtreemfs/utils/cleanup_osd.java b/servers/src/org/xtreemfs/utils/cleanup_osd.java new file mode 100644 index 0000000000000000000000000000000000000000..9eee5eb00c3bb23d7cdf77c4982564a1f65699e2 --- /dev/null +++ b/servers/src/org/xtreemfs/utils/cleanup_osd.java @@ -0,0 +1,379 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Felix Langner (ZIB) + */ +package org.xtreemfs.utils; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.net.InetSocketAddress; +import java.net.URL; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.xtreemfs.common.TimeSync; +import org.xtreemfs.common.auth.NullAuthProvider; +import org.xtreemfs.common.clients.HttpErrorException; +import org.xtreemfs.common.clients.RPCClient; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.clients.dir.DIRClient; +import org.xtreemfs.common.clients.mrc.MRCClient; +import org.xtreemfs.common.clients.osd.ConcurrentFileMap; +import org.xtreemfs.common.clients.osd.OSDClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.uuids.ServiceUUID; +import org.xtreemfs.common.uuids.UUIDResolver; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.pinky.SSLOptions; +import org.xtreemfs.utils.CLIParser.CliOption; + +/** + *

OSD service function for the console. + * Cleans an OSD up, by eliminating zombie files. + * Supports SSL connection.

+ * + * @author langner + * + */ + +public class cleanup_osd { + private static final String DEFAULT_DIR_CONFIG = "/etc/xos/xtreemfs/default_dir"; + private static final String DEFAULT_RESTORE_PATH = "lost+found"; + + private static BufferedReader answers = new BufferedReader(new InputStreamReader(System.in)); + + private static DIRClient dirClient; + + private static MRCClient mrcClient; + + private static OSDClient osdClient; + // generate authString + private static String authString; + static { + try { + authString = NullAuthProvider.createAuthString("root", MRCClient + .generateStringList("root")); + } catch (JSONException e) { + e.printStackTrace(); + } + } + + public static void main(String[] args) throws Exception{ + Logging.start(Logging.LEVEL_WARN); + + // parse the call arguments + Map options = new HashMap(); + List arguments = new ArrayList(1); + options.put("h", new CliOption(CliOption.OPTIONTYPE.SWITCH)); + options.put("v", new CliOption(CliOption.OPTIONTYPE.SWITCH)); + options.put("r", new CliOption(CliOption.OPTIONTYPE.SWITCH)); + options.put("e", new CliOption(CliOption.OPTIONTYPE.SWITCH)); + options.put("d", new CliOption(CliOption.OPTIONTYPE.URL)); + + CLIParser.parseCLI(args, options, arguments); + + if (arguments.size() != 1 || options.get("h").switchValue != null) { + usage(); + return; + } + + InetSocketAddress osdAddr = null; + boolean useSSL = false; + String serviceCredsFile = null; + String serviceCredsPass = null; + String trustedCAsFile = null; + String trustedCAsPass = null; + SSLOptions sslOptions = null; + URL dirURL = options.get("d").urlValue; + boolean verbose = options.get("v").switchValue != null; + boolean erase = options.get("e").switchValue != null; + boolean restore = options.get("r").switchValue != null; + + // read default settings for the DIR + if (dirURL == null) { + DefaultDirConfig cfg = new DefaultDirConfig(DEFAULT_DIR_CONFIG); + cfg.read(); + + // load SSL options + useSSL = cfg.isSslEnabled(); + serviceCredsFile = cfg.getServiceCredsFile(); + serviceCredsPass = cfg.getServiceCredsPassphrase(); + trustedCAsFile = cfg.getTrustedCertsFile(); + trustedCAsPass = cfg.getTrustedCertsPassphrase(); + sslOptions = useSSL ? new SSLOptions(serviceCredsFile, serviceCredsPass, + trustedCAsFile, trustedCAsPass) : null; + + dirClient = new DIRClient(cfg.getDirectoryService(), sslOptions, RPCClient.DEFAULT_TIMEOUT); + } else + dirClient = new DIRClient(new InetSocketAddress(dirURL.getHost(), dirURL.getPort()), null, RPCClient.DEFAULT_TIMEOUT); + + // read default settings for the OSD + String osdUUID = null; + String address = arguments.get(0); + boolean isUUID = false; + if (address.startsWith("uuid:")) { + address = address.substring("uuid:".length()); + isUUID = true; + } + + + + // resolve UUID if necessary + if (!isUUID){ + /* + URL osdURL; + try{ + osdURL = new URL(address); + }catch (MalformedURLException mue){ + System.out.println("The given address could not be resolved!"); + return; + } + osdAddr = new InetSocketAddress(osdURL.getHost(), osdURL.getPort()); + */ + + dirClient.shutdown(); + usage(); + }else{ + TimeSync.initialize(dirClient, 60000, 60000, authString); + if(!UUIDResolver.isRunning()){ + UUIDResolver.start(dirClient, 1000, 1000); + } + ServiceUUID service = new ServiceUUID(address); + service.resolve(); + osdAddr = service.getAddress(); + UUIDResolver.shutdown(); + TimeSync.getInstance().shutdown(); + + osdUUID = address; + } + + // start cleanUp process + osdClient = new OSDClient(600000,sslOptions); + RPCResponse response = null; + ConcurrentFileMap fileList = null; + System.out.println("The OSD will now be checked for 'zombie' files. \n" + + "Depending on the speed of that OSD this check can take a few minutes...\n"); + try{ + Map>> rsp = null; + + response = osdClient.cleanUp(osdAddr,authString); + + if (response == null || (rsp = (Map>>) response.get()) == null){ + osdClient.shutdown(); + System.out.println("This OSD is clean."); + System.exit(0); + } + fileList = new ConcurrentFileMap(rsp); + }catch (NumberFormatException nfe){ + osdClient.shutdown(); + usage(); + }catch (IllegalArgumentException ia){ + osdClient.shutdown(); + usage(); + }catch (Exception e){ + System.out.println("Checking the OSD was not successful. Cause: "+e.getLocalizedMessage()); + osdClient.shutdown(); + System.exit(1); + }finally{ + if (response!=null) + response.freeBuffers(); + } + + String empty1 = " |"; + String empty = " |"; + String question; + Long fileSize; + String filePreview; + + // user interaction + long totalZombiesSize = 0L; + for (List volume : fileList.keySetList()){ + for (String file : fileList.getFileIDSet(volume)){ + totalZombiesSize += fileList.getFileSize(volume, file); + } + } + + //validate(fileList); + + if (fileList != null && fileList.size()!=0){ + if (fileList.size()==1) System.out.println("There is one zombie on that OSD."); + else System.out.println("There are '"+fileList.size()+"' zombies with a total size of "+totalZombiesSize+" bytes on that OSD. "); + question = ("Do you want to list "+(fileList.size()==1 ? "it":"them")+"? [y/n]"); + verbose = (verbose) ? true : !requestUserDecision(question); + if (!verbose){ + System.out.println("VolumeID:FileNumber |File size in byte |Preview"); + } + for (List volume : fileList.keySetList()){ + for (String file : fileList.getFileIDSet(volume)){ + Long fileNumber = Long.valueOf(file.substring(file.indexOf(":")+1, file.length())); + + // get the file details + fileSize = fileList.getFileSize(volume,file); + filePreview = fileList.getFilePreview(volume, file); + + if (!verbose){ + String f = file+(volume.get(0).equals("unknown") ? "(unknown)" : ""); + String out = f+empty1.substring(f.length(),empty1.length())+ + empty.substring(0, empty.length()-(fileSize.toString().length()+2))+ + fileSize+" |"+filePreview; + + System.out.println(out); + } + if (!volume.get(0).equals("unknown") && !erase){ + question = ("Do you want to restore File: '"+file+"'? Otherwise it will be permanently deleted. [y/n]"); + if ((restore) ? true : requestUserDecision(question)){ + if (mrcClient==null) mrcClient = new MRCClient(); + try{ + mrcClient.restoreFile( + new InetSocketAddress(volume.get(1), + Integer.parseInt(volume.get(2))), + DEFAULT_RESTORE_PATH, fileNumber, + fileList.getFileSize(volume, file),null, + authString,osdUUID,fileList.getObjectSize(volume,file),volume.get(0)); + }catch (HttpErrorException he){ + System.out.println(file+" could not be restored properly. Cause: "+he.getMessage()); + } + }else{ + if ((erase) ? true : requestUserDecision("Do you really want to delete that file? [y/n]")){ + response = osdClient.cleanUpDelete(osdAddr,authString,file); + try{ + response.waitForResponse(1000); + }catch (HttpErrorException he){ + System.out.println(file+" could not be deleted properly. Cause: "+he.getMessage()); + } + if (response!=null) response.freeBuffers(); + } + } + }else{ + if((erase) ? true : requestUserDecision("Do you really want to delete that file? [y/n]")){ + response = osdClient.cleanUpDelete(osdAddr,authString,file); + try{ + response.waitForResponse(1000); + }catch (HttpErrorException he){ + System.out.println(file+" could not be deleted properly. Cause: "+he.getMessage()); + } + if (response!=null) response.freeBuffers(); + } + } + } + } + }else + System.out.println("\n There are no zombies on that OSD."); + + // operation finished + osdClient.shutdown(); + if (mrcClient!=null) mrcClient.shutdown(); + System.out.println("done."); + System.exit(0); + } + + private static void usage(){ + System.out.println("usage: xtfs_cleanup [options] uuid:\n"); + System.out.println(" -h show usage info"); + System.out.println(" -v verbose"); + System.out.println(" -r restore all potential zombies"); + System.out.println(" -e !erase all potential zombies permanently!"); + System.out.println(" -d directory service to use (e.g. 'http://localhost:32638')"); + System.out.println("If no DIR URI is specified, URI and security settings are taken from '/etc/xos/xtreemfs/default_dir'"); + System.exit(1); + } + + private static boolean requestUserDecision(String question){ + System.out.println(question); + String answer; + try { + answer = answers.readLine(); + assert(answer!=null && answer.length()>0); + } catch (IOException e) { + System.out.println("Answer could not be read due an IO Exception."); + return false; + } + return (answer.charAt(0) == 'y' || answer.charAt(0) == 'Y'); + } + /* + @Deprecated + private static void validate(ConcurrentFileMap fm) throws IOException, ClassNotFoundException{ + String path = "/home/flangner/temp/database/"; + String volID1 = "0004760EDB9818CA9248215D00000001"; + String volID2 = "0004760EDB982F2A024949CA00000001"; + String volID3 = "0004760EDB984AECE148237D00000001"; + String volID4 = "0004760EDB9859CFDC4884D000000001"; + String volID5 = "0004760EDB986FD49148F20E00000001"; + String volID6 = "0004760EDB989891BD4774E200000001"; + String volID7 = "0004760EDB98B11FA0482ECD00000001"; + String volID8 = "0004760EDB98CDCDDC485C4600000001"; + String fileName = "/mrcdb.1"; + File f = null; + + for (List volume : fm.keySetList()){ + if(volume.get(0).equals(volID1.substring(0,volID1.length()-8))){ + f = new File(path+volID1+fileName); + }else if(volume.get(0).equals(volID2.substring(0,volID2.length()-8))){ + f = new File(path+volID2+fileName); + }else if(volume.get(0).equals(volID3.substring(0,volID3.length()-8))){ + f = new File(path+volID3+fileName); + }else if(volume.get(0).equals(volID4.substring(0,volID4.length()-8))){ + f = new File(path+volID4+fileName); + }else if(volume.get(0).equals(volID5.substring(0,volID5.length()-8))){ + f = new File(path+volID5+fileName); + }else if(volume.get(0).equals(volID6.substring(0,volID6.length()-8))){ + f = new File(path+volID6+fileName); + }else if(volume.get(0).equals(volID7.substring(0,volID7.length()-8))){ + f = new File(path+volID7+fileName); + }else if(volume.get(0).equals(volID8.substring(0,volID8.length()-8))){ + f = new File(path+volID8+fileName); + }else if(volume.get(0).equals("unknown")){ + continue; + }else{ + System.out.println("ERROR: Volume not found! Available VolIds are: "); + System.out.println(volID1.substring(0,volID1.length()-8)); + System.out.println(volID2.substring(0,volID2.length()-8)); + System.out.println(volID3.substring(0,volID3.length()-8)); + System.out.println(volID4.substring(0,volID4.length()-8)); + System.out.println(volID5.substring(0,volID5.length()-8)); + System.out.println(volID6.substring(0,volID6.length()-8)); + System.out.println(volID7.substring(0,volID7.length()-8)); + System.out.println(volID8.substring(0,volID8.length()-8)); + + System.out.println("But requested was: "+volume.get(0)); + break; + } + FileInputStream fis = new FileInputStream(f); + ObjectInputStream ois = new ObjectInputStream(fis); + Map fileMap = (TreeMap) ois.readObject(); + Set fileNumbers = fileMap.keySet(); + + for (String file : fm.getFileNumberSet(volume)){ + if (fileNumbers.contains(Long.valueOf(file))){ + System.out.println("ERROR: "+volume.get(0)+":"+file+" is no zombie!"); + } + } + + ois.close(); + fis.close(); + } + } + */ +} diff --git a/servers/src/org/xtreemfs/utils/locate_user_home.java b/servers/src/org/xtreemfs/utils/locate_user_home.java new file mode 100644 index 0000000000000000000000000000000000000000..d205affb9cac1c43844e35692caebbbf483d288a --- /dev/null +++ b/servers/src/org/xtreemfs/utils/locate_user_home.java @@ -0,0 +1,114 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package org.xtreemfs.utils; + +import java.io.File; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.URL; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.xtreemfs.common.auth.NullAuthProvider; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.clients.dir.DIRClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.pinky.SSLOptions; +import org.xtreemfs.utils.CLIParser.CliOption; + +/** + * + * @author bjko + */ +public final class locate_user_home { + + public static final String HOMEDIR_PREFIX = "user-"; + + + public static void main(String[] args) { + try { + + Logging.start(Logging.LEVEL_ERROR); + + Map options = new HashMap(); + List arguments = new ArrayList(3); + options.put("d",new CliOption(CliOption.OPTIONTYPE.URL)); + + CLIParser.parseCLI(args, options, arguments); + + if (arguments.size() != 1) { + System.out.println("Usage: locate_user_home "); + System.out.println(" This utility retrieves the volume URL for a volume named"); + System.out.println(" '"+HOMEDIR_PREFIX+"' from the directory service"); + System.out.println("options: "); + System.out.println(" -d http://: specifies the URL to the directory service"); + System.out.println(" by default, the directory service set in /etc/xos/xtreemfs/default_dir is used"); + System.out.println(" The -d option does not work with SSL, use the config file instead."); + System.out.println(""); + System.out.println("This utility will return the first matching volume URL and an exit status 0 on success. "+ + "If no matching volume can be found an exit status of 2 is returned. If an error occurs (e.g. directory "+ + "service not available) an exit status of 1 is returned."); + System.out.println(""); + System.exit(1); + } + + URL dirUrl = options.get("d").urlValue; + + InetSocketAddress dirAddr = null; + SSLOptions sslopts = null; + + if (dirUrl == null) { + File defaultDir = new File("/etc/xos/xtreemfs/default_dir"); + if (!defaultDir.exists()) { + System.err.println("Cannot read Directory Service URL from "+defaultDir.getAbsolutePath()); + System.err.println("Please create file or specify with -d "); + System.exit(1); + } else { + DefaultDirConfig c = new DefaultDirConfig(defaultDir.getAbsolutePath()); + c.read(); + dirAddr = c.getDirectoryService(); + + if (c.isSslEnabled()) { + sslopts = new SSLOptions(c.getServiceCredsFile(), c.getServiceCredsPassphrase(), + c.getServiceCredsContainer(),c.getTrustedCertsFile(), c.getTrustedCertsPassphrase(), + c.getTrustedCertsContainer(),false); + } + } + } else { + dirAddr = new InetSocketAddress(dirUrl.getHost(),dirUrl.getPort()); + } + + DIRClient dir = null; + if (sslopts == null) + dir = new DIRClient(null, dirAddr,5000); + else + dir = new DIRClient(dirAddr,sslopts,5000); + + String volURL = dir.locateUserHome(arguments.get(0), NullAuthProvider.createAuthString("locate_user_home", "utils")); + dir.shutdown(); + + if (volURL == null) { + System.err.println("Home volume for user "+arguments.get(0)+" does not exist!"); + System.exit(2); + } else { + System.out.println(volURL); + } + + } catch (IOException ex) { + ex.printStackTrace(); + System.exit(1); + } catch (InterruptedException ex) { + ex.printStackTrace(); + System.exit(1); + } catch (Exception ex) { + ex.printStackTrace(); + System.exit(1); + } + } + +} diff --git a/servers/src/org/xtreemfs/utils/lookupdirurl.java b/servers/src/org/xtreemfs/utils/lookupdirurl.java new file mode 100644 index 0000000000000000000000000000000000000000..81286ba7b2288ebf104f0bc8ac4ea8cc7903be0c --- /dev/null +++ b/servers/src/org/xtreemfs/utils/lookupdirurl.java @@ -0,0 +1,71 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package org.xtreemfs.utils; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.HashMap; +import java.util.Map; +import org.xtreemfs.common.auth.NullAuthProvider; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.clients.dir.DIRClient; +import org.xtreemfs.common.logging.Logging; + +/** + * + * @author bjko + */ +public class lookupdirurl { + + public static void main(String[] args) { + try { + + if (args.length != 3) { + System.out.println("usage: lookupdirurl \n"); + System.exit(1); + } + Logging.start(Logging.LEVEL_ERROR); + // remove leading and trailing quotes + if (args[2].charAt(0) == '"' && args[2].charAt(args[2].length() - 1) == '"') + args[2] = args[2].substring(1, args[2].length() - 1); + + // remove all backslashes with spaces + String volname = args[2].replaceAll("\\\\ ", " "); + + String diraddr = args[0]; + + int dirport = Integer.valueOf(args[1]); + + DIRClient dir = new DIRClient(null, new InetSocketAddress(diraddr,dirport)); + Map qry = new HashMap(); + qry.put("type","volume"); + qry.put("name",volname); + RPCResponse>> r = dir.getEntities(qry, null, NullAuthProvider.createAuthString("nobody", "lookupdirurl")); + Map> map = r.get(); + + if (map.size() == 0) + System.exit(1); + + for (String uuid : map.keySet()) { + Map data = map.get(uuid); + System.out.println(data.get("mrc")+"/"+data.get("name")); + break; + } + dir.shutdown(); + + } catch (IOException ex) { + ex.printStackTrace(); + System.exit(1); + } catch (InterruptedException ex) { + ex.printStackTrace(); + System.exit(1); + } catch (Exception ex) { + ex.printStackTrace(); + System.exit(1); + } + } + +} diff --git a/servers/src/org/xtreemfs/utils/mount_user_home.java b/servers/src/org/xtreemfs/utils/mount_user_home.java new file mode 100644 index 0000000000000000000000000000000000000000..3f6f89926745c3dcfbc23edef9d51fbc3ef39c50 --- /dev/null +++ b/servers/src/org/xtreemfs/utils/mount_user_home.java @@ -0,0 +1,116 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package org.xtreemfs.utils; + +import java.io.File; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.URL; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.xtreemfs.common.auth.NullAuthProvider; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.clients.dir.DIRClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.utils.CLIParser.CliOption; + +/** + * + * @author bjko + */ +public final class mount_user_home { + + + + public static void main(String[] args) { + try { + + Logging.start(Logging.LEVEL_ERROR); + + Map options = new HashMap(); + List arguments = new ArrayList(3); + options.put("d",new CliOption(CliOption.OPTIONTYPE.URL)); + + CLIParser.parseCLI(args, options, arguments); + + if (arguments.size() != 1) { + System.out.println("Usage: locate_volume "); + System.out.println("options: "); + System.out.println(" -d http(s)://: specifies the URL to the directory service"); + System.out.println(" by default, the directory service set in /etc/xos/xtreemfs/default_dir is used"); + System.out.println(""); + System.exit(1); + } + + URL dirUrl = options.get("d").urlValue; + + InetSocketAddress dirAddr = null; + + if (dirUrl == null) { + File defaultDir = new File("/etc/xos/xtreemfs/default_dir"); + if (!defaultDir.exists()) { + System.err.println("Cannot read Directory Service URL from "+defaultDir.getAbsolutePath()); + System.err.println("Please create file or specify with -d "); + System.exit(1); + } else { + DefaultDirConfig c = new DefaultDirConfig(defaultDir.getAbsolutePath()); + dirAddr = c.getDirectoryService(); + } + } else { + dirAddr = new InetSocketAddress(dirUrl.getHost(),dirUrl.getPort()); + } + + DIRClient dir = new DIRClient(null, dirAddr); + Map qry = new HashMap(); + qry.put("type","volume"); + qry.put("name","user-"+arguments.get(0)); + RPCResponse>> r = dir.getEntities(qry, null, NullAuthProvider.createAuthString("nobody", "lookupdirurl")); + Map> map = r.get(); + + if (map.size() == 0) + System.exit(1); + + String volURL = null; + for (String uuid : map.keySet()) { + Map data = map.get(uuid); + volURL = data.get("mrc")+"/"+data.get("name"); + break; + } + dir.shutdown(); + + if (volURL == null) { + System.err.println("Home volume for user "+arguments.get(0)+" does not exist!"); + System.exit(1); + } else { + File gridhome = new File(System.getenv().get("HOME")+"/gridhome"); + if (!gridhome.exists()) { + gridhome.mkdirs(); + } + System.out.println("executing: "+"xtfs_mount -o volume_url="+volURL+" "+gridhome.getAbsolutePath()); + Process mount = Runtime.getRuntime().exec("xtfs_mount -o volume_url="+volURL+" "+gridhome.getAbsolutePath()); + mount.waitFor(); + if (mount.exitValue() != 0) { + System.err.println("cannot mount gridhome... exiting"); + System.exit(1); + } + } + + } catch (IOException ex) { + ex.printStackTrace(); + System.exit(1); + } catch (InterruptedException ex) { + ex.printStackTrace(); + System.exit(1); + } catch (Exception ex) { + ex.printStackTrace(); + System.exit(1); + } + } + +} diff --git a/servers/src/org/xtreemfs/utils/replicas.java b/servers/src/org/xtreemfs/utils/replicas.java new file mode 100644 index 0000000000000000000000000000000000000000..704103bcdf46cb283dff2834696a82f94ae78eb7 --- /dev/null +++ b/servers/src/org/xtreemfs/utils/replicas.java @@ -0,0 +1,165 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.utils; + +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.xtreemfs.common.auth.NullAuthProvider; +import org.xtreemfs.common.clients.mrc.MRCClient; +import org.xtreemfs.common.logging.Logging; + +/** + * + * @author bjko + */ +public class replicas { + + /** Creates a new instance of replicas */ + public replicas() { + } + + /** + * @param args + * the command line arguments + */ + public static void main(String[] args) { + Logging.start(Logging.LEVEL_ERROR); + // TODO code application logic here + if (args.length < 2) { + System.out + .println("Usage: replicas http://hostname:port/path command"); + System.exit(1); + } + + String url = args[0]; + + Pattern schema = Pattern + .compile("http://([a-zA-Z_0-9\\-\\.]+)(?:\\:(\\d+))??/(.+)"); + Matcher m = schema.matcher(url.trim()); + InetSocketAddress mrcAddr = null; + String path = ""; + if (m.matches()) { + mrcAddr = new InetSocketAddress(m.group(1), + (m.group(2) == null) ? 32636 : Integer.parseInt(m.group(2))); + path = m.group(3); + } else { + System.out.println(url + " is not a valid URL"); + System.exit(1); + } + + String cmd = args[1]; + + try { + + MRCClient client = new MRCClient(); + + Map data = client.stat(mrcAddr, path, true, false, + false, NullAuthProvider.createAuthString("bla", "bla")); + + if (!data.containsKey("replicas")) { + System.out.println("requested object does not exist!"); + System.exit(1); + } + + String fileId = (String) data.get("fileId"); + + if (cmd.equalsIgnoreCase("list")) { + List l = (List) data.get("replicas"); + System.out.println("current replica list for " + fileId + + ", version " + l.get(1) + ":"); + List replicas = (List) l.get(0); + for (int i = 0; i < replicas.size(); i++) { + System.out.println("\t" + (i + 1) + ": " + + replicas.get(i).get(0) + " on " + + replicas.get(i).get(1)); + } + } else if (cmd.equalsIgnoreCase("add")) { + if (args.length < 6) { + System.out + .println("add ... "); + System.out + .println("policy can be RAID0, width is the number of OSDs to use for striping, stripe size is the object size in KB"); + System.out.println(""); + System.exit(1); + } + String sPolicy = args[2].toUpperCase(); + if (!sPolicy.equals("RAID0")) { + System.out.println("unknown striping policy " + sPolicy); + System.exit(1); + } + + int sWidth = Integer.parseInt(args[3]); + + int sSize = Integer.parseInt(args[4]); + + if (args.length - 5 != sWidth) { + System.out.println("you must specify exactly width (" + + sWidth + ") OSDs!"); + System.exit(1); + } + Map policy = new TreeMap(); + policy.put("policy", sPolicy); + policy.put("width", sWidth); + policy.put("stripe-size", sSize); + + List osds = new ArrayList(20); + for (int i = 5; i < args.length; i++) { + osds.add(args[i]); + } + client.addReplica(mrcAddr, fileId, policy, osds, + NullAuthProvider.createAuthString("bla", "bla")); + System.out.println("replica added"); + } else if (cmd.equalsIgnoreCase("rem")) { + if (args.length < 3) { + System.out.println("remove "); + System.exit(1); + } + List l = (List) ((List) data.get("replicas")).get(0); + List repl = (List) l.get(Integer.parseInt(args[2]) - 1); + Map sp = (Map) repl.get(0); + List locs = (List) repl.get(1); + + client.removeReplica(mrcAddr, fileId, sp, locs, + NullAuthProvider.createAuthString("bla", "bla")); + System.out.println("replica removed"); + } + + client.shutdown(); + + } catch (Exception ex) { + System.out.println("cannot complete operation: " + ex); + ex.printStackTrace(); + System.exit(1); + } + + } + +} diff --git a/servers/src/org/xtreemfs/utils/utils.java b/servers/src/org/xtreemfs/utils/utils.java new file mode 100644 index 0000000000000000000000000000000000000000..b1df16ab4a25d44311888d2f4ca70c608d10f5cc --- /dev/null +++ b/servers/src/org/xtreemfs/utils/utils.java @@ -0,0 +1,102 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.utils; + +import java.io.BufferedReader; +import java.io.File; +import java.io.IOException; +import java.io.InputStreamReader; +import java.util.HashMap; +import java.util.Map; +import java.util.StringTokenizer; + +/** + * + * @author bjko + */ +public class utils { + + public static Map getxattrs(String filename) throws IOException, + InterruptedException { + + File f = new File(filename); + Process p = Runtime.getRuntime().exec( + new String[] { "getfattr", "-m", "xtreemfs.*", "-d", f.getAbsolutePath() }); + p.waitFor(); + if (p.exitValue() != 0) + return null; + + Map result = new HashMap(); + + BufferedReader br = new BufferedReader(new InputStreamReader(p.getInputStream())); + br.readLine(); // skip first line + + for (;;) { + String nextLine = br.readLine(); + if (nextLine == null) + break; + StringTokenizer st = new StringTokenizer(nextLine, "="); + if (!st.hasMoreElements()) + continue; + + String key = st.nextToken(); + String value = st.nextToken(); + + // remove leading and trailing quotes + value = value.substring(1, value.length() - 1); + + result.put(key, value); + } + + return result; + } + + public static String getxattr(String filename, String attrname) throws IOException, + InterruptedException { + + File f = new File(filename); + Process p = Runtime.getRuntime().exec( + new String[] { "getfattr", "--only-values", "-n", attrname, f.getAbsolutePath() }); + p.waitFor(); + if (p.exitValue() != 0) + return null; + + BufferedReader br = new BufferedReader(new InputStreamReader(p.getInputStream())); + String target = br.readLine(); + + return target; + } + + public static void setxattr(String filename, String attrname, String attrvalue) + throws IOException, InterruptedException { + + File f = new File(filename); + Process p = Runtime.getRuntime().exec( + new String[] { "setfattr", "-n", attrname, "-v", attrvalue, f.getAbsolutePath() }); + p.waitFor(); + if (p.exitValue() != 0) + throw new IOException("a problem occurred when setting '" + attrname + "'"); + } +} diff --git a/servers/src/org/xtreemfs/utils/xtfs_mrcdbtool.java b/servers/src/org/xtreemfs/utils/xtfs_mrcdbtool.java new file mode 100644 index 0000000000000000000000000000000000000000..3a94025f8bb30de072ee19f568e8b7077a6c1d2f --- /dev/null +++ b/servers/src/org/xtreemfs/utils/xtfs_mrcdbtool.java @@ -0,0 +1,158 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.utils; + +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.xtreemfs.common.auth.NullAuthProvider; +import org.xtreemfs.common.clients.RPCClient; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.pinky.SSLOptions; +import org.xtreemfs.utils.CLIParser.CliOption; + +public class xtfs_mrcdbtool { + + /** + * @param args + */ + public static void main(String[] args) { + + Logging.start(Logging.LEVEL_ERROR); + + String authString = null; + try { + authString = NullAuthProvider.createAuthString(System.getProperty("user.name"), System + .getProperty("user.name")); + } catch (JSONException e) { + e.printStackTrace(); + System.exit(1); + } + + Map options = new HashMap(); + List arguments = new ArrayList(3); + options.put("mrc", new CliOption(CliOption.OPTIONTYPE.URL)); + options.put("c", new CliOption(CliOption.OPTIONTYPE.STRING)); + options.put("cp", new CliOption(CliOption.OPTIONTYPE.STRING)); + options.put("t", new CliOption(CliOption.OPTIONTYPE.STRING)); + options.put("tp", new CliOption(CliOption.OPTIONTYPE.STRING)); + options.put("h", new CliOption(CliOption.OPTIONTYPE.SWITCH)); + + try { + CLIParser.parseCLI(args, options, arguments); + } catch (Exception exc) { + System.out.println(exc); + usage(); + return; + } + + CliOption h = options.get("h"); + if(h.switchValue != null) { + usage(); + return; + } + + CliOption mrc = options.get("mrc"); + if (mrc.urlValue == null) { + System.out.println("missing MRC URL"); + usage(); + return; + } + + if (arguments.size() != 2) { + usage(); + return; + } + + String op = arguments.get(0); + if (!"dump".equals(op) && !"restore".equals(op)) { + System.out.println("invalid operation: " + op); + usage(); + return; + } + + String dumpFile = arguments.get(1); + + CliOption c = options.get("c"); + CliOption cp = options.get("cp"); + CliOption t = options.get("t"); + CliOption tp = options.get("tp"); + + String host = mrc.urlValue.getHost(); + int port = mrc.urlValue.getPort(); + String protocol = mrc.urlValue.getProtocol(); + + RPCClient client = null; + + try { + client = protocol.startsWith("https") ? new RPCClient(0, new SSLOptions(c.stringValue, + cp.stringValue, t.stringValue, tp.stringValue)) : new RPCClient(0, null); + client.setTimeout(0); + + if (op.equals("dump")) { + RPCResponse r = null; + try { + r = client.sendRPC(new InetSocketAddress(host, port), ".dumpdb", RPCClient + .generateList(dumpFile), authString, null); + r.waitForResponse(); + } finally { + r.freeBuffers(); + } + } + + else if (op.equals("restore")) { + RPCResponse r = null; + try { + r = client.sendRPC(new InetSocketAddress(host, port), ".restoredb", RPCClient + .generateList(dumpFile), authString, null); + r.waitForResponse(); + } finally { + r.freeBuffers(); + } + } + + else + usage(); + + } catch (Exception exc) { + exc.printStackTrace(); + } finally { + if (client != null) + client.shutdown(); + } + } + + private static void usage() { + System.out + .println("usage: xtfs_mrcdbtool -mrc [-c ] [-cp ] [-t ] [-tp ] dump|restore "); + System.exit(1); + } + +} diff --git a/servers/src/org/xtreemfs/utils/xtfs_stat.java b/servers/src/org/xtreemfs/utils/xtfs_stat.java new file mode 100644 index 0000000000000000000000000000000000000000..a47b47326160722fffd24fe5962035e3bc7b2023 --- /dev/null +++ b/servers/src/org/xtreemfs/utils/xtfs_stat.java @@ -0,0 +1,203 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB) + */ + +package org.xtreemfs.utils; + +import java.io.File; +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import org.xtreemfs.common.util.OutputUtils; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.json.JSONString; + +/** + * + * @author bjko + */ +public class xtfs_stat { + + private static final int FILE = 1; + + private static final int DIR = 2; + + private static final int SYMLINK = 3; + + /** + * @param args + * the command line arguments + */ + public static void main(String[] args) { + try { + + if (args.length < 1) { + System.out.println("usage: " + xtfs_stat.class.getSimpleName() + " \n"); + System.exit(1); + } + + String fileName = ""; + for (String arg : args) + fileName += arg + " "; + + // remove leading and trailing spaces + fileName = fileName.trim(); + + // remove leading and trailing quotes + if (fileName.charAt(0) == '"' && fileName.charAt(fileName.length() - 1) == '"') + fileName = fileName.substring(1, fileName.length() - 1); + + // replace all backslashes with spaces + fileName = fileName.replaceAll("\\\\ ", " "); + + final String format = "%-25s %s\n"; + + // fetch all XtreemFS-related extended attributes + Map attrs = utils.getxattrs(fileName); + if (attrs == null) { + System.err.println("file not found: " + fileName); + return; + } + + String url = attrs.get("xtreemfs.url"); + if (url == null) { + System.out.println("'" + fileName + + "' is probably not part of an XtreemFS volume (no MRC URL found)."); + return; + } + + // first, render all general XtreemFS attributes + System.out.format(format, "filename", new File(fileName).getName()); + System.out.format(format, "XtreemFS URI", url); + System.out.format(format, "XtreemFS fileID", attrs.get("xtreemfs.file_id")); + + int ftype = Integer.parseInt(attrs.get("xtreemfs.object_type")); + switch (ftype) { + case FILE: + System.out.format(format, "object type", "regular file"); + break; + case DIR: + System.out.format(format, "object type", "directory" + + (attrs.get("xtreemfs.file_id").endsWith(":1") ? " (volume root)" : "")); + break; + case SYMLINK: + System.out.format(format, "object type", "symlink"); + break; + } + System.out.format(format, "owner", attrs.get("xtreemfs.owner")); + System.out.format(format, "group", attrs.get("xtreemfs.group")); + + if (ftype == FILE) { + String readOnly = attrs.containsKey("xtreemfs.read_only") ? attrs + .get("xtreemfs.read_only") : String.valueOf(false); + System.out.format(format, "read-only", readOnly); + } + + // if the file refers to a directory, render directory attributes + if (ftype == DIR) { + String defSP = attrs.get("xtreemfs.default_sp"); + if (defSP == null) + defSP = "none"; + System.out.format(format, "default striping policy", defSP); + } + + // render other known XtreemFS attributes + if (attrs.containsKey("xtreemfs.ac_policy_id")) + System.out.format(format, "access control policy ID", attrs + .get("xtreemfs.ac_policy_id")); + + if (attrs.containsKey("xtreemfs.osdsel_policy_id")) + System.out.format(format, "OSD selection policy ID", attrs + .get("xtreemfs.osdsel_policy_id")); + + if (attrs.containsKey("xtreemfs.free_space")) + System.out.format(format, "free usable disk space", OutputUtils.formatBytes(Long + .valueOf(attrs.get("xtreemfs.free_space")))); + + // if the file does not refer to a directory, render the + // X-Locations list + if (ftype != DIR) { + + // because of escape characters, the X-Locations list needs to + // be parsed in two steps: first, parse the string, + // then, parse a list from the parsed string + String s = (String) JSONParser.parseJSON(new JSONString("\"" + + attrs.get("xtreemfs.locations") + "\"")); + List l = (List) JSONParser.parseJSON(new JSONString(s)); + + System.out.println("\nXtreemFS replica list"); + if (l == null) { + System.out.println(" This file does not have any replicas yet."); + } else { + System.out.format(format, " list version ", l.get(1)); + List replicas = (List) l.get(0); + for (int i = 0; i < replicas.size(); i++) { + final Map policy = (Map) replicas.get(i).get(0); + final String pStr = policy.get("policy") + "," + policy.get("stripe-size") + + "kb," + policy.get("width"); + System.out.format(format, " replica " + (i + 1) + " policy", pStr); + System.out.format(format, " replica " + (i + 1) + " OSDs", replicas + .get(i).get(1)); + } + } + + System.out.println(); + } + + } catch (IOException ex) { + ex.printStackTrace(); + } catch (InterruptedException ex) { + ex.printStackTrace(); + } catch (Exception ex) { + ex.printStackTrace(); + } + } + + private static String translatePermissions(Object perms) { + int perm = ((Long) perms).intValue(); + + String pStr = ""; + + for (int i = 0; i < 3; i++) { + if ((perm & 1) != 0) { + pStr = "x" + pStr; + } else { + pStr = "-" + pStr; + } + if ((perm & 2) != 0) { + pStr = "w" + pStr; + } else { + pStr = "-" + pStr; + } + if ((perm & 4) != 0) { + pStr = "r" + pStr; + } else { + pStr = "-" + pStr; + } + perm = perm >> 3; + } + return pStr; + } + +} diff --git a/servers/test/org/xtreemfs/integrationtest/ExternalIntegrationTest.java b/servers/test/org/xtreemfs/integrationtest/ExternalIntegrationTest.java new file mode 100644 index 0000000000000000000000000000000000000000..108a58b6b9059819d9cd15ee74c6041eb7a6b51a --- /dev/null +++ b/servers/test/org/xtreemfs/integrationtest/ExternalIntegrationTest.java @@ -0,0 +1,405 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.integrationtest; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.RandomAccessFile; + +import junit.framework.TestCase; +import junit.textui.TestRunner; + +import org.xtreemfs.common.util.FSUtils; + +/** + * This test case externally tests the integration of all XtreemFS components at + * the vnode layer. It requires a complete XtreemFS infrastructure consisting of + * a Directory Service, at least one MRC, at least one OSD and the Access Layer + * with a local mountpoint. Moreover, a volume named "Test" has to exist. + *

+ * In order to set up a valid environment, take the following steps: + *

    + *
  • start a Directory Serivce (e.g. on localhost:32638) + *
  • start an OSD (e.g. on localhost:32637) + *
  • start an MRC (e.g. on localhost:32636) + *
  • create a directory for the XtreemFS root (e.g. /tmp/xtreemfs) + *
  • mount the access layer (e.g. + * xtreemfs -d -o volume_url=http://localhost:32636/Test,direct_io /tmp/xtreemfs) + *
  • create a volume "Test" (e.g. mkvol http://localhost:32636/Test) + *
  • change the 'xtreemFSMountPoint' variable to the mount point and compile + * this test case + *
+ * + * @author stender + * + */ +public class ExternalIntegrationTest extends TestCase { + + private static File xtreemFSMountPoint = new File("/tmp/xtreemfs"); + + protected void setUp() throws Exception { + + System.out.println("TEST: " + getClass().getSimpleName() + "." + + getName()); + + FSUtils.delTree(xtreemFSMountPoint); + xtreemFSMountPoint.mkdirs(); + } + + protected void tearDown() throws Exception { + + } + + /** + * Perform some sequential read/write operations on a file. + * + * @throws Exception + */ + public void testSeqReadWrite() throws Exception { + + // create a new file for sequential r/w access + File f = createFile(xtreemFSMountPoint, "/testfile.tmp"); + + FileOutputStream fout = new FileOutputStream(f); + fout.write(65); + fout.write(66); + fout.close(); + assertEquals(2, f.length()); + + FileInputStream fin = new FileInputStream(f); + assertEquals(65, fin.read()); + assertEquals(66, fin.read()); + assertEquals(-1, fin.read()); + fin.close(); + } + + /** + * Perform some random read/write operations on a file. + * + * @throws Exception + */ + public void testRndReadWrite() throws Exception { + + File file = createFile(xtreemFSMountPoint, "/testfile2.tmp"); + + // create a new file for random r/w access + RandomAccessFile f2 = new RandomAccessFile(file, "rw"); + f2.writeBytes("Hello World!"); + assertEquals(12, f2.length()); + + f2.seek(4); + assertEquals('o', f2.read()); + f2.seek(6); + f2.write('w'); + f2.seek(6); + assertEquals('w', f2.read()); + f2.seek(6); + f2.writeBytes("XtreemFS!"); + f2.seek(0); + + byte[] chars = new byte[(int) f2.length()]; + f2.readFully(chars); + + assertEquals("Hello XtreemFS!", new String(chars)); + + f2.seek(16384); + f2.writeBytes("This is a string at offset 16384"); + + byte[] buf = new byte[2048]; + f2.seek(8192); + f2.read(buf); + for (int i = 0; i < buf.length; i++) + assertEquals(0, buf[i]); + + f2.seek(16384); + f2.read(buf); + assertEquals("This is a string at offset 16384", new String(buf, 0, 32)); + + f2.close(); + assertEquals(16416, file.length()); + } + + /** + * Read and write random strides. + * + * @throws Exception + */ + public void testRndReadWrite2() throws Exception { + + final File file = createFile(xtreemFSMountPoint, "/testfile3.tmp"); + final RandomAccessFile raf = new RandomAccessFile(file, "rw"); + final int maxAccesses = 10000; + final int maxFileSize = 1024 * 1024; + final int maxNumberOfBytes = 1024 * 128; + final double readWriteRatio = .15; + + // allocate 10M + byte[] buf = new byte[maxFileSize]; + + for (int i = 0; i < maxAccesses; i++) { + + boolean write = Math.random() > readWriteRatio; + + int numberOfBytes = (int) (Math.random() * maxNumberOfBytes); + int offset = (int) (Math.random() * buf.length); + offset = Math.min(offset, buf.length - numberOfBytes); + byte[] stride = new byte[numberOfBytes]; + + if (write) { + // write a stride + + for (int j = 0; j < stride.length; j++) + stride[j] = (byte) (Math.random() * 256 - 128); + + raf.seek(offset); + raf.write(stride); + + System.arraycopy(stride, 0, buf, offset, stride.length); + + } else { + // read a stride + + raf.seek(offset); + raf.read(stride); + + for (int j = 0; j < stride.length; j++) + assertEquals(stride[j], buf[offset + j]); + } + } + + // finally, read and compare the complete file + byte[] readBuf = new byte[maxFileSize]; + raf.seek(0); + raf.read(readBuf); + + for (int j = 0; j < maxFileSize; j++) + assertEquals(buf[j], readBuf[j]); + + raf.close(); + } + + /** + * Create and delete some files and directories. + * + * @throws Exception + */ + public void testCreateDelete() throws Exception { + + assertEquals(0, xtreemFSMountPoint.listFiles().length); + + // create a new directory + File dir1 = createDir(xtreemFSMountPoint, "/testDir"); + + // create a path of depth 3 in the root directory + File dir2 = createDir(xtreemFSMountPoint, "/someOtherDir"); + File dir3 = createDir(dir2, "/nestedDir"); + File dir4 = createDir(dir3, "/leafDir"); + + // delete the leaf directory + delete(dir4); + + // re-create the leaf directory + dir4 = createDir(dir4.getParentFile(), dir4.getName()); + + // create and test a tree of depth 5 with three children per node + createTree(dir1, 3, 3); + testTree(dir1, 3, 3); + + // create and delete a file in the root directory + File file1 = createFile(xtreemFSMountPoint, "testfile.tmp"); + + assertFalse(file1.createNewFile()); + delete(file1); + createFile(file1.getParentFile(), file1.getName()); + delete(file1); + + // create and delete a file in a sub directory + File file2 = createFile(new File(xtreemFSMountPoint + "/someOtherDir"), + "testfile.tmp"); + + assertFalse(file2.createNewFile()); + delete(file2); + createFile(file2.getParentFile(), file2.getName()); + delete(file2); + } + + public void testRename() throws Exception { + + // create a file and a directory + File sourceFile = createFile(xtreemFSMountPoint, "sourceFile.txt"); + File targetDir = createDir(xtreemFSMountPoint, "targetDir"); + + // move the file to the directory + File targetFile = new File(targetDir.getAbsolutePath() + + "/sourceFile.txt"); + assertTrue(sourceFile.renameTo(targetFile)); + + // afterwards, the target file should exist + assertTrue(targetFile.exists()); + + // ... and the source file should not exist anymore + assertFalse(sourceFile.exists()); + + // rename the target directory + File newTargetDir = new File(xtreemFSMountPoint, "newTargetDir"); + assertTrue(targetDir.renameTo(newTargetDir)); + + // afterwards, the former target directory should not exist anymore + assertFalse(targetDir.exists()); + + // ... and the new one should exist instead + assertTrue(newTargetDir.exists()); + + // ... and the nested file should have a new path + File newTargetFile = new File(newTargetDir, targetFile.getName()); + assertTrue(newTargetFile.exists()); + + // create a new directory and move the entire path to it while renaming + // the moved path + File topLevelTargetDir = createDir(xtreemFSMountPoint, "topLevelDir"); + File nestedDir = new File(topLevelTargetDir, "nestedDir"); + assertTrue(newTargetDir.renameTo(nestedDir)); + + // ... afterwards, the file should still exist in the nested directory + File nestedFile = new File(nestedDir, newTargetFile.getName()); + assertTrue(nestedFile.exists()); + + } + + public static void main(String[] args) { + + // if (args.length != 1) { + // System.out + // .println("usage: java " + // + ExternalIntegrationTest.class.getName() + // + " "); + // System.exit(1); + // } + // xtreemFSMountPoint = new File(args[0]); + + TestRunner.run(ExternalIntegrationTest.class); + } + + private File createDir(File parentDir, String name) { + + final long numberOfChildren = parentDir.list().length; + + // a new directory ... + File dir = new File(parentDir, name); + + // ... must not exist before + assertTrue(!dir.exists()); + + dir.mkdir(); + + // ... must exist afterwards + assertTrue(dir.exists()); + + // ... must be a directory + assertTrue(dir.isDirectory()); + + // ... must be an additional element in its parent directory + assertEquals(numberOfChildren + 1, parentDir.list().length); + + // ... must be an empty directory + assertEquals(0, dir.list().length); + + return dir; + } + + private File createFile(File parentDir, String name) throws Exception { + + final long numberOfChildren = parentDir.list().length; + + // a new file ... + File file = new File(parentDir, name); + + // ... must not exist before + assertTrue(!file.exists()); + + assertTrue(file.createNewFile()); + + // ... must exist afterwards + assertTrue(file.exists()); + + // ... must be a file + assertTrue(file.isFile()); + + // ... must be an additional element in its parent directory + assertEquals(numberOfChildren + 1, parentDir.list().length); + + // ... must not have any content + assertEquals(0, file.length()); + + return file; + } + + private void delete(File fileOrDir) { + + final long numberOfChildren = fileOrDir.getParentFile().list().length; + + // a file or directory that is deleted ... + + // ... must exist before + assertTrue(fileOrDir.exists()); + + assertTrue(fileOrDir.delete()); + + // ... must not exist afterwards + assertTrue(!fileOrDir.exists()); + + // ... must not exist in its parent directory anymore + assertEquals(numberOfChildren - 1, + fileOrDir.getParentFile().list().length); + } + + private void createTree(File root, int depth, int breadth) throws Exception { + + if (depth == -1) + return; + + for (int j = 0; j < breadth; j++) { + File f = new File(root, j + ""); + f.mkdir(); + createTree(f, depth - 1, breadth); + } + } + + private void testTree(File root, int depth, int breadth) throws Exception { + + if (depth == -1) + return; + + assertEquals(breadth, root.list().length); + + for (int j = 0; j < breadth; j++) { + File f = new File(root, j + ""); + assertTrue(f.exists()); + testTree(f, depth - 1, breadth); + } + + } +} diff --git a/servers/test/org/xtreemfs/test/AllTests.java b/servers/test/org/xtreemfs/test/AllTests.java new file mode 100644 index 0000000000000000000000000000000000000000..6ed65f4fcda4371cfdd7d86f7546333f7d89eeef --- /dev/null +++ b/servers/test/org/xtreemfs/test/AllTests.java @@ -0,0 +1,102 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Christian Lorenz (ZIB), Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.test; + +import java.io.BufferedReader; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.util.Collection; +import java.util.LinkedList; + +import junit.framework.Test; +import junit.framework.TestCase; +import junit.framework.TestSuite; +import junit.textui.TestRunner; + +import org.xtreemfs.common.logging.Logging; + + +/** + * A test suite consisting of all test cases from this package. + * + * @author stender + * + */ +public class AllTests extends TestSuite { + + public static Test suite() throws Exception { + + Logging.start(Logging.LEVEL_ERROR); + + TestSuite mySuite = new TestSuite("XtreemFS Test Suite"); + + Collection> testCases = new LinkedList(); + findRecursively(testCases, "."); + for (Class clazz : testCases) { + System.out.println("adding test '" + clazz.getName() + "'"); + mySuite.addTestSuite(clazz); + } + + return mySuite; + } + + protected static void findRecursively( + Collection> testCases, String root) + throws Exception { + + + System.out.println("ROOT: "+root); + + BufferedReader buf = new BufferedReader(new InputStreamReader( + (InputStream) AllTests.class.getResource(root) + .getContent())); + for (;;) { + String line = buf.readLine(); + if (line == null) + break; + if (line.endsWith("Test.class")) { + + String packageName = AllTests.class.getPackage().getName() + + root.substring(1).replace('/', '.'); + + Class clazz = Class.forName( + packageName + "." + + line.substring(0, line.length() - ".class".length())) + .asSubclass(TestCase.class); + + testCases.add(clazz); + + } else if (!line.endsWith(".class") && !line.startsWith(".") + && (line.substring(1).indexOf(".") == -1)) { + findRecursively(testCases, root + "/" + line); + } + } + } + + public static void main(String[] args) throws Exception { + TestRunner.run(AllTests.suite()); + } + +} diff --git a/servers/test/org/xtreemfs/test/SetupUtils.java b/servers/test/org/xtreemfs/test/SetupUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..c43ae1f4dfb95a98a8aea2e6380bfd1670ba66c1 --- /dev/null +++ b/servers/test/org/xtreemfs/test/SetupUtils.java @@ -0,0 +1,353 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Christian Lorenz (ZIB), Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.test; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.Properties; + +import org.xtreemfs.common.TimeSync; +import org.xtreemfs.common.auth.NullAuthProvider; +import org.xtreemfs.common.clients.dir.DIRClient; +import org.xtreemfs.common.clients.mrc.MRCClient; +import org.xtreemfs.common.clients.osd.OSDClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.uuids.ServiceUUID; +import org.xtreemfs.common.uuids.UUIDResolver; +import org.xtreemfs.dir.DIRConfig; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.pinky.SSLOptions; +import org.xtreemfs.mrc.MRCConfig; +import org.xtreemfs.osd.OSDConfig; + +/** + * + * @author bjko + */ +public class SetupUtils { + + public static final String TEST_DIR = "/tmp/xtreemfs-test"; + + public static final String CERT_DIR = "config/certs/"; + + public static boolean SSL_ON = false; + + public static final int DEBUG_LEVEL = Logging.LEVEL_DEBUG; + + public static OSDConfig createOSD1Config() throws IOException { + Properties props = new Properties(); + props.setProperty("dir_service.host", "localhost"); + props.setProperty("dir_service.port", "33638"); + props.setProperty("object_dir", TEST_DIR + "/osd0"); + props.setProperty("debug_level", "" + DEBUG_LEVEL); + props.setProperty("listen.port", "33637"); + //props.setProperty("listen.address", "localhost"); + props.setProperty("local_clock_renewal", "50"); + props.setProperty("remote_time_sync", "60000"); + props.setProperty("ssl.enabled", "" + SSL_ON); + props.setProperty("ssl.service_creds", CERT_DIR + "service2.jks"); + props.setProperty("ssl.service_creds.pw", "passphrase"); + props.setProperty("ssl.service_creds.container", "jks"); + props.setProperty("ssl.trusted_certs", CERT_DIR + "trust.jks"); + props.setProperty("ssl.trusted_certs.pw", "passphrase"); + props.setProperty("ssl.trusted_certs.container", "jks"); + props.setProperty("report_free_space", "true"); + props.setProperty("checksums.enabled", "true"); + props.setProperty("checksums.algorithm", "Adler32"); + props.setProperty("capability_secret", "secretPassphrase"); + props.setProperty("uuid", getOSD1UUID().toString()); + + return new OSDConfig(props); + } + + public static OSDConfig createOSD2Config() throws IOException { + Properties props = new Properties(); + props.setProperty("dir_service.host", "localhost"); + props.setProperty("dir_service.port", "33638"); + props.setProperty("object_dir", TEST_DIR + "/osd1"); + props.setProperty("debug_level", "" + DEBUG_LEVEL); + props.setProperty("listen.port", "33640"); + //props.setProperty("listen.address", "localhost"); + props.setProperty("local_clock_renewal", "50"); + props.setProperty("remote_time_sync", "60000"); + props.setProperty("ssl.enabled", "" + SSL_ON); + props.setProperty("ssl.service_creds", CERT_DIR + "service2.jks"); + props.setProperty("ssl.service_creds.pw", "passphrase"); + props.setProperty("ssl.service_creds.container", "jks"); + props.setProperty("ssl.trusted_certs", CERT_DIR + "trust.jks"); + props.setProperty("ssl.trusted_certs.pw", "passphrase"); + props.setProperty("ssl.trusted_certs.container", "jks"); + props.setProperty("report_free_space", "true"); + props.setProperty("checksums.enabled", "true"); + props.setProperty("checksums.algorithm", "Adler32"); + props.setProperty("capability_secret", "secretPassphrase"); + props.setProperty("uuid", getOSD2UUID().toString()); + + return new OSDConfig(props); + } + + public static OSDConfig createOSD3Config() throws IOException { + Properties props = new Properties(); + props.setProperty("dir_service.host", "localhost"); + props.setProperty("dir_service.port", "33638"); + props.setProperty("object_dir", TEST_DIR + "/osd2"); + props.setProperty("debug_level", "" + DEBUG_LEVEL); + props.setProperty("listen.port", "33641"); + props.setProperty("listen.address", "localhost"); + props.setProperty("local_clock_renewal", "50"); + props.setProperty("remote_time_sync", "60000"); + props.setProperty("ssl.enabled", "" + SSL_ON); + props.setProperty("ssl.service_creds", CERT_DIR + "service2.jks"); + props.setProperty("ssl.service_creds_pw", "passphrase"); + props.setProperty("ssl.service_creds_container", "jks"); + props.setProperty("ssl.trusted_certs", CERT_DIR + "trust.jks"); + props.setProperty("ssl.trusted_certs.pw", "passphrase"); + props.setProperty("ssl.trusted_certs.container", "jks"); + props.setProperty("report_free_space", "true"); + props.setProperty("checksums.enabled", "true"); + props.setProperty("checksums.algorithm", "Adler32"); + props.setProperty("capability_secret", "secretPassphrase"); + props.setProperty("uuid", getOSD3UUID().toString()); + + return new OSDConfig(props); + } + + public static DIRConfig createDIRConfig() throws IOException { + Properties props = new Properties(); + props.setProperty("database.dir", TEST_DIR); + props.setProperty("debug_level", "" + DEBUG_LEVEL); + props.setProperty("listen.port", "33638"); + props.setProperty("ssl.enabled", "" + SSL_ON); + props.setProperty("ssl.service_creds", CERT_DIR + "service3.jks"); + props.setProperty("ssl.service_creds.pw", "passphrase"); + props.setProperty("ssl.service_creds.container", "jks"); + props.setProperty("ssl.trusted_certs", CERT_DIR + "trust.jks"); + props.setProperty("ssl.trusted_certs.pw", "passphrase"); + props.setProperty("ssl.trusted_certs.container", "jks"); + props.setProperty("authentication_provider", "org.xtreemfs.common.auth.NullAuthProvider"); + + return new DIRConfig(props); + } + + public static MRCConfig createMRC1Config() throws IOException { + Properties props = new Properties(); + props.setProperty("dir_service.host", "localhost"); + props.setProperty("dir_service.port", "33638"); + props.setProperty("database.dir", TEST_DIR + "/mrc0"); + props.setProperty("database.log", TEST_DIR + "/test-brain0.log"); + props.setProperty("osd_check_interval", "10"); + props.setProperty("debug_level", "" + DEBUG_LEVEL); + props.setProperty("listen.port", "33636"); + props.setProperty("listen.address", "localhost"); + props.setProperty("no_atime", "true"); + props.setProperty("local_clock_renewal", "50"); + props.setProperty("remote_time_sync", "60000"); + props.setProperty("ssl.enabled", "" + SSL_ON); + props.setProperty("ssl.service_creds", CERT_DIR + "service1.jks"); + props.setProperty("ssl.service_creds.pw", "passphrase"); + props.setProperty("ssl.service_creds.container", "jks"); + props.setProperty("ssl.trusted_certs", CERT_DIR + "trust.jks"); + props.setProperty("ssl.trusted_certs.pw", "passphrase"); + props.setProperty("ssl.trusted_certs.container", "jks"); + props.setProperty("database.checkpoint.interval", "1800000"); + props.setProperty("database.checkpoint.idle_interval", "1000"); + props.setProperty("database.checkpoint.logfile_size", "16384"); + props.setProperty("authentication_provider", "org.xtreemfs.common.auth.NullAuthProvider"); + props.setProperty("capability_secret", "secretPassphrase"); + props.setProperty("uuid", getMRC1UUID().toString()); + + return new MRCConfig(props); + } + + public static MRCConfig createMRC2Config() throws IOException { + Properties props = new Properties(); + props.setProperty("dir_service.host", "localhost"); + props.setProperty("dir_service.port", "33638"); + props.setProperty("database.dir", TEST_DIR + "/mrc1"); + props.setProperty("database.log", TEST_DIR + "/test-brain1.log"); + props.setProperty("osd_check_interval", "10"); + props.setProperty("debug_level", "" + DEBUG_LEVEL); + props.setProperty("listen.port", "33639"); + props.setProperty("listen.address", "localhost"); + props.setProperty("no_atime", "true"); + props.setProperty("local_clock_renewal", "50"); + props.setProperty("remote_time_sync", "60000"); + props.setProperty("ssl.enabled", "" + SSL_ON); + props.setProperty("ssl.service_creds", CERT_DIR + "service1.jks"); + props.setProperty("ssl.service_creds.pw", "passphrase"); + props.setProperty("ssl.service_creds.container", "jks"); + props.setProperty("ssl.trusted_certs", CERT_DIR + "trust.jks"); + props.setProperty("ssl.trusted_certs.pw", "passphrase"); + props.setProperty("ssl.trusted_certs.container", "jks"); + props.setProperty("database.checkpoint.interval", "1800000"); + props.setProperty("database.checkpoint.idle_interval", "1000"); + props.setProperty("database.checkpoint.logfile_size", "16384"); + props.setProperty("authentication_provider", "org.xtreemfs.common.auth.NullAuthProvider"); + props.setProperty("capability_secret", "secretPassphrase"); + props.setProperty("uuid", getMRC2UUID().toString()); + + return new MRCConfig(props); + } + + public static InetSocketAddress getMRC1Addr() { + return new InetSocketAddress("localhost", 33636); + } + + public static InetSocketAddress getMRC2Addr() { + return new InetSocketAddress("localhost", 33639); + } + + public static InetSocketAddress getOSD1Addr() { + return new InetSocketAddress("localhost", 33637); + } + + public static InetSocketAddress getOSD2Addr() { + return new InetSocketAddress("localhost", 33640); + } + + public static InetSocketAddress getOSD3Addr() { + return new InetSocketAddress("localhost", 33641); + } + + public static InetSocketAddress getDIRAddr() { + return new InetSocketAddress("localhost", 33638); + } + + public static ServiceUUID getMRC1UUID() { + return new ServiceUUID("UUID:localhost:33636"); + } + + public static ServiceUUID getMRC2UUID() { + return new ServiceUUID("UUID:localhost:33639"); + } + + public static ServiceUUID getOSD1UUID() { + return new ServiceUUID("UUID:localhost:33637"); + } + + public static ServiceUUID getOSD2UUID() { + return new ServiceUUID("UUID:localhost:33640"); + } + + public static ServiceUUID getOSD3UUID() { + return new ServiceUUID("UUID:localhost:33641"); + } + + public static void setupLocalResolver() throws IOException, JSONException { + TimeSync.initialize(null, 100000, 50, ""); + UUIDResolver.shutdown(); + + UUIDResolver.start(null, 1000, 1000); + UUIDResolver.addLocalMapping(getMRC1UUID(), 33636, SSL_ON); + UUIDResolver.addLocalMapping(getMRC2UUID(), 33639, SSL_ON); + UUIDResolver.addLocalMapping(getOSD1UUID(), 33637, SSL_ON); + UUIDResolver.addLocalMapping(getOSD2UUID(), 33640, SSL_ON); + UUIDResolver.addLocalMapping(getOSD3UUID(), 33641, SSL_ON); + } + + + public static MRCClient createMRCClient(int timeout) throws IOException { + return SSL_ON ? new MRCClient(timeout, new SSLOptions(CERT_DIR + "client1.p12", + "passphrase", SSLOptions.PKCS12_CONTAINER, CERT_DIR + "trust.jks", "passphrase", + SSLOptions.JKS_CONTAINER, false)) : new MRCClient(); + } + + public static OSDClient createOSDClient(int timeout) throws IOException { + return SSL_ON ? new OSDClient(timeout, new SSLOptions(CERT_DIR + "client1.p12", + "passphrase", SSLOptions.PKCS12_CONTAINER, CERT_DIR + "trust.jks", "passphrase", + SSLOptions.JKS_CONTAINER, false)) : new OSDClient(null); + } + + public static DIRClient createDIRClient(int timeout) throws IOException { + return SSL_ON ? new DIRClient(new InetSocketAddress("localhost", 33638), new SSLOptions( + CERT_DIR + "client1.p12", "passphrase", SSLOptions.PKCS12_CONTAINER, CERT_DIR + + "trust.jks", "passphrase", SSLOptions.JKS_CONTAINER, false), timeout) + : new DIRClient(null, new InetSocketAddress("localhost", 33638)); + } + + public static OSDConfig createOSD1ConfigForceWithoutSSL() throws IOException { + boolean tmp = SSL_ON; + SSL_ON = false; + OSDConfig config = createOSD1Config(); + SSL_ON = tmp; + return config; + } + + public static OSDConfig createOSD2ConfigForceWithoutSSL() throws IOException { + boolean tmp = SSL_ON; + SSL_ON = false; + OSDConfig config = createOSD2Config(); + SSL_ON = tmp; + return config; + } + + public static OSDConfig createOSD3ConfigForceWithoutSSL() throws IOException { + boolean tmp = SSL_ON; + SSL_ON = false; + OSDConfig config = createOSD3Config(); + SSL_ON = tmp; + return config; + } + + public static MRCConfig createMRC1ConfigForceWithoutSSL() throws IOException { + boolean tmp = SSL_ON; + SSL_ON = false; + MRCConfig config = createMRC1Config(); + SSL_ON = tmp; + return config; + } + + public static MRCConfig createMRC2ConfigForceWithoutSSL() throws IOException { + boolean tmp = SSL_ON; + SSL_ON = false; + MRCConfig config = createMRC2Config(); + SSL_ON = tmp; + return config; + } + + public static DIRConfig createDIRConfigForceWithoutSSL() throws IOException { + boolean tmp = SSL_ON; + SSL_ON = false; + DIRConfig config = createDIRConfig(); + SSL_ON = tmp; + return config; + } + + public static DIRClient initTimeSync() throws IOException, JSONException { + + try { + TimeSync.getInstance(); + return null; + + } catch (RuntimeException ex) { + // no time sync there, start one + DIRClient dirClient = SetupUtils.createDIRClient(10000); + TimeSync.initialize(dirClient, 60000, 50, NullAuthProvider.createAuthString("bla", + "bla")); + + return dirClient; + } + } +} diff --git a/servers/test/org/xtreemfs/test/common/CapabilityTest.java b/servers/test/org/xtreemfs/test/common/CapabilityTest.java new file mode 100644 index 0000000000000000000000000000000000000000..c51a9856c7e1919311ad00f3c32d29af83d34c35 --- /dev/null +++ b/servers/test/org/xtreemfs/test/common/CapabilityTest.java @@ -0,0 +1,82 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.test.common; + +import junit.framework.TestCase; +import junit.textui.TestRunner; + +import org.xtreemfs.common.Capability; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.test.SetupUtils; + +public class CapabilityTest extends TestCase { + + private static final String SECRET = "secret"; + + protected void setUp() throws Exception { + Logging.start(SetupUtils.DEBUG_LEVEL); + System.out.println("TEST: " + getClass().getSimpleName() + "." + getName()); + } + + protected void tearDown() throws Exception { + } + + public void testCapability() throws Exception { + + // create and test capability that is valid for an hour + Capability cap = new Capability("bla:1", "r", System.currentTimeMillis() / 1000 + 3600, 0, + SECRET); + + assertTrue(cap.isValid()); + assertEquals(cap.getFileId(), "bla:1"); + assertEquals(cap.getAccessMode(), "r"); + + // create and test a valid capability that is parsed from + // a string representation + String capAsString = cap.toString(); + + Capability cap2 = new Capability(capAsString, SECRET); + assertTrue(cap2.isValid()); + assertEquals(cap2.getFileId(), "bla:1"); + assertEquals(cap2.getAccessMode(), "r"); + + // assert that a capability is invalid if the signature is invalid + String fakedCap = capAsString.substring(0, capAsString.length() - 35) + + "\"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF\"]"; + Capability cap3 = new Capability(fakedCap, SECRET); + assertFalse(cap3.isValid()); + + // assert that a capability is invalid if it has timed out + Capability cap4 = new Capability("bla:2", "w", System.currentTimeMillis() / 1000 - 3600, 0, + SECRET); + assertFalse(cap4.isValid()); + + } + + public static void main(String[] args) { + TestRunner.run(CapabilityTest.class); + } + +} diff --git a/servers/test/org/xtreemfs/test/common/JSONParserTest.java b/servers/test/org/xtreemfs/test/common/JSONParserTest.java new file mode 100644 index 0000000000000000000000000000000000000000..1eb7cead751961559bffecb2ea55483806ceec97 --- /dev/null +++ b/servers/test/org/xtreemfs/test/common/JSONParserTest.java @@ -0,0 +1,204 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.test.common; + +import java.nio.ByteBuffer; +import java.nio.CharBuffer; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import junit.framework.TestCase; +import junit.textui.TestRunner; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.pinky.HTTPUtils; +import org.xtreemfs.foundation.json.JSONCharBufferString; + +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.json.JSONString; +import org.xtreemfs.test.SetupUtils; + +public class JSONParserTest extends TestCase { + + protected void setUp() throws Exception { + Logging.start(SetupUtils.DEBUG_LEVEL); + System.out.println("TEST: " + getClass().getSimpleName() + "." + getName()); + } + + protected void tearDown() { + } + + public void testPrimitiveEncode() throws Exception { + + String json = JSONParser.writeJSON(1711); + assertEquals(json, "1711"); + + json = JSONParser.writeJSON(-12); + assertEquals(json, "-12"); + + json = JSONParser.writeJSON("this is a string"); + assertEquals(json, "\"this is a string\""); + + json = JSONParser.writeJSON(null); + assertEquals(json, "null"); + + json = JSONParser.writeJSON(true); + assertEquals(json, "true"); + + json = JSONParser.writeJSON(false); + assertEquals(json, "false"); + } + + public void testPrimitiveDecode() throws Exception { + + int i = ((Long) JSONParser.parseJSON(new JSONString("17132"))) + .intValue(); + assertEquals(i, 17132); + + int i2 = ((Long) JSONParser.parseJSON(new JSONString("-7327"))) + .intValue(); + assertEquals(i2, -7327); + + String s = (String) JSONParser.parseJSON(new JSONString( + "\"blubberbla\"")); + assertEquals(s, "blubberbla"); + + s = (String) JSONParser.parseJSON(new JSONString( + "\"\\\\\"")); + assertEquals(s, "\\"); + + boolean b = (Boolean) JSONParser.parseJSON(new JSONString("true")); + assertTrue(b); + } + + public void testMapEncode() throws Exception { + + Map someMap = new HashMap(); + someMap.put("bla", 438); + someMap.put("blub", -321); + + String s = JSONParser.writeJSON(someMap); + assertEquals(s.charAt(0), '{'); + assertEquals(s.charAt(s.length() - 1), '}'); + assertTrue(s.indexOf("\"bla\":438") != -1); + assertTrue(s.indexOf("\"blub\":-321") != -1); + } + + public void testMapDecode() throws Exception { + + String json = "{\"foo\":\"bar\",\"bla\":482,\"4\":-3}"; + Map map = (Map) JSONParser.parseJSON(new JSONString(json)); + assertEquals(map.get("foo"), "bar"); + assertEquals(((Long) map.get("bla")).intValue(), 482); + assertEquals(map.get("4"), new Long(-3)); + assertEquals(map.size(), 3); + } + + public void testListEncode() throws Exception { + + List list = new ArrayList(); + list.add("bla"); + list.add(327); + list.add(-5); + list.add(false); + list.add("blub"); + + String s = JSONParser.writeJSON(list); + assertEquals(s, "[\"bla\",327,-5,false,\"blub\"]"); + } + + public void testListDecode() throws Exception { + + String json = "[\"bla\",32,-3]"; + List l = (List) JSONParser.parseJSON(new JSONString(json)); + assertEquals(l.get(0), "bla"); + assertEquals(((Long) l.get(1)).intValue(), 32); + assertEquals(((Long) l.get(2)).intValue(), -3); + assertEquals(l.size(), 3); + + } + + public void testComplexEncode() throws Exception { + + ArrayList nestedList = new ArrayList(); + ArrayList nestedList2 = new ArrayList(); + nestedList2.add("bla"); + nestedList2.add(32); + nestedList2.add(-12); + nestedList2.add("blub"); + + Map map = new HashMap(); + map.put(32, "blub"); + map.put("bla", nestedList); + map.put("bar", nestedList2); + map.put("test", false); + + // encode + List list = new ArrayList(); + list.add(map); + list.add(12); + list.add("blub"); + list.add(new ArrayList()); + list.add(nestedList2); + + String json = JSONParser.writeJSON(list); + + // String l2Str = "[\"bla\",32,-12,\"blub\"]"; + // assertEquals(json, "[{32:\"blub\",\"bla\":[],\"bar\":" + l2Str + + // ",\"test\":false},12,\"blub\",[]," + l2Str + "]"); + } + + public void testComplexDecode() throws Exception { + + String json = "[43,\"bla\",[],{},[43,{}],{\"43\":32,\"ertz\":{}}]"; + List l = (List) JSONParser.parseJSON(new JSONString(json)); + assertEquals(((Long) l.get(0)).intValue(), 43); + assertTrue(((List) l.get(2)).isEmpty()); + assertTrue(((Map) l.get(3)).isEmpty()); + assertEquals(((Map) l.get(5)).size(), 2); + + json = "[[{\"atime\":1169209166,\"isDirectory\":false,\"userId\":1,\"name\":\"test.txt\",\"mtime\":1169209166,\"ctime\":1169209166,\"size\":0},[{\"userId\":1,\"value\":\"ertz\",\"type\":1,\"key\":\"ref\"}],[],[]],\"testVolume2\\/newTest.txt\"]"; + l = (List) JSONParser.parseJSON(new JSONString(json)); + assertEquals(((Map) ((List) l.get(0)).get(0)).get("isDirectory"), false); + } + + public void testCharBufferInput() throws Exception { + + String json = "[43,\"bla\",[],{},[43,{}],{\"43\":32,\"ertz\":{}}]"; + ByteBuffer bb = ByteBuffer.wrap(json.getBytes(HTTPUtils.ENC_UTF8)); + CharBuffer cb = HTTPUtils.ENC_UTF8.decode(bb); + List l = (List) JSONParser.parseJSON(new JSONCharBufferString(cb)); + assertEquals(((Long) l.get(0)).intValue(), 43); + assertEquals(((String) l.get(1)), "bla"); + assertTrue(((List) l.get(2)).isEmpty()); + assertTrue(((Map) l.get(3)).isEmpty()); + assertEquals(((Map) l.get(5)).size(), 2); + } + + public static void main(String[] args) { + TestRunner.run(JSONParserTest.class); + } +} diff --git a/servers/test/org/xtreemfs/test/common/RPCClientTest.java b/servers/test/org/xtreemfs/test/common/RPCClientTest.java new file mode 100644 index 0000000000000000000000000000000000000000..9c387946ccd1c8520995769217df13a8fb6aea94 --- /dev/null +++ b/servers/test/org/xtreemfs/test/common/RPCClientTest.java @@ -0,0 +1,232 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.test.common; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.nio.channels.UnresolvedAddressException; +import java.util.concurrent.atomic.AtomicBoolean; +import junit.framework.*; +import org.xtreemfs.common.clients.HttpErrorException; +import org.xtreemfs.common.clients.RPCClient; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.clients.RPCResponseListener; +import org.xtreemfs.common.logging.Logging; + +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; + +/** + * + * @author bjko + */ +public class RPCClientTest extends TestCase { + + public static final int PORT = 12345; + + public static final int PORT500 = 12346; + + public static final int PORTWAIT = 12347; + + HttpServer serverOK, server500, serverwait; + + RPCClient client; + + public RPCClientTest(String testName) { + super(testName); + Logging.start(Logging.LEVEL_DEBUG); + } + + protected void setUp() throws Exception { + + System.out.println("TEST: " + getClass().getSimpleName() + "." + + getName()); + + serverOK = HttpServer.create(new InetSocketAddress("localhost", PORT), + 0); + serverOK.createContext("/", new HttpHandler() { + public void handle(HttpExchange httpExchange) throws IOException { + byte[] content = "simpleContents".getBytes("ascii"); + Logging.logMessage(Logging.LEVEL_DEBUG,this,"request received: "); + httpExchange.sendResponseHeaders(200,content.length); + httpExchange.getResponseBody().write(content); + httpExchange.getResponseBody().close(); + } + }); + serverOK.start(); + + server500 = HttpServer.create(new InetSocketAddress("localhost", + PORT500), 0); + server500.createContext("/", new HttpHandler() { + public void handle(HttpExchange httpExchange) throws IOException { + byte[] content = "simpleContents".getBytes("ascii"); + Logging.logMessage(Logging.LEVEL_DEBUG,this,"request received: "); + httpExchange.sendResponseHeaders(500,content.length); + httpExchange.getResponseBody().write(content); + httpExchange.getResponseBody().close(); + } + }); + server500.start(); + + serverwait = HttpServer.create(new InetSocketAddress("localhost", + PORTWAIT), 0); + serverwait.createContext("/", new HttpHandler() { + public void handle(HttpExchange httpExchange) throws IOException { + Logging.logMessage(Logging.LEVEL_DEBUG,this,"request received: "); + synchronized (this) { + try { + this.wait(5000); + } catch (InterruptedException ex) { + Logging.logMessage(Logging.LEVEL_DEBUG, this, ex); + } + } + Logging.logMessage(Logging.LEVEL_DEBUG,this,"request answered "); + byte[] content = "simpleContents".getBytes("ascii"); + httpExchange.sendResponseHeaders(500, content.length); + httpExchange.getResponseBody().write(content); + httpExchange.getResponseBody().close(); + } + }); + serverwait.start(); + + client = new RPCClient(null, 2000); + } + + protected void tearDown() throws Exception { + client.shutdown(); + client.waitForShutdown(); + serverOK.stop(0); + server500.stop(0); + serverwait.stop(0); + } + + // TODO add test methods here. The name must begin with 'test'. For example: + // public void testHello() {} + + public void testErrorCases() throws Exception { + + RPCResponse rp = null; + try { + InetSocketAddress nonexiting = new InetSocketAddress( + "yabba-brabbel.zib.de", 80); + rp = client.sendRPC(nonexiting, "bla", null, "bla", null); + rp.waitForResponse(); + fail("IOException should have been thrown."); + } catch (UnresolvedAddressException ex) { + } finally { + if (rp != null) + rp.freeBuffers(); + } + + InetSocketAddress local = new InetSocketAddress("localhost", PORT); + rp = client.sendRPC(local, "/bla", null, "bla", null); + assertEquals(rp.getStatusCode(), 200); + rp.freeBuffers(); + + InetSocketAddress local500 = null; + try { + local500 = new InetSocketAddress("localhost",PORT500); + rp = client.sendRPC(local500,"/bla",null,"bla",null); + rp.waitForResponse(); + fail("HttpErrorException should have been thrown."); + } catch (HttpErrorException ex) { + assertEquals(ex.getStatusCode(), 500); + } finally { + if (rp != null) + rp.freeBuffers(); + } + + InetSocketAddress localWait = null; + try { + localWait = new InetSocketAddress("localhost",PORTWAIT); + rp = client.sendRPC(localWait,"/bla",null,"bla",null); + rp.waitForResponse(); + fail("IOException should have been thrown."); + } catch (IOException ex) { + } finally { + if (rp != null) + rp.freeBuffers(); + } + + rp = client.sendRPC(local,"/bla",null,"bla",null); + final AtomicBoolean hasResponse = new AtomicBoolean(false); + final Object me = this; + rp.setResponseListener(new RPCResponseListener() { + + @Override + public void responseAvailable(RPCResponse response) { + hasResponse.set(true); + synchronized (me) { + me.notify(); + } + } + }); + synchronized (this) { + try { + this.wait(1000); + + } catch (InterruptedException interruptedException) { + } + + } + assertTrue(hasResponse.get()); + + rp = client.sendRPC(localWait,"/bla",null,"bla",null); + final AtomicBoolean hasNoResponse = new AtomicBoolean(true); + rp.setResponseListener(new RPCResponseListener() { + + @Override + public void responseAvailable(RPCResponse response) { + hasNoResponse.set(false); + synchronized (me) { + me.notify(); + } + } + }); + synchronized (this) { + try { + this.wait(500); + + } catch (InterruptedException interruptedException) { + } + + } + rp.freeBuffers(); + assertTrue(hasNoResponse.get()); + System.out.println("wait for response!"); + synchronized (this) { + try { + this.wait(10000); + + } catch (InterruptedException interruptedException) { + interruptedException.printStackTrace(); + } + + } + System.out.println("waiting done"); + + } +} diff --git a/servers/test/org/xtreemfs/test/common/RingBufferTest.java b/servers/test/org/xtreemfs/test/common/RingBufferTest.java new file mode 100644 index 0000000000000000000000000000000000000000..230ac7688cf9219afcd6806947d5529bc92bbb20 --- /dev/null +++ b/servers/test/org/xtreemfs/test/common/RingBufferTest.java @@ -0,0 +1,57 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package org.xtreemfs.test.common; + +import java.util.Iterator; +import junit.framework.TestCase; +import org.xtreemfs.common.RingBuffer; + +/** + * + * @author bjko + */ +public class RingBufferTest extends TestCase { + + public RingBufferTest(String testName) { + super(testName); + } + + @Override + protected void setUp() throws Exception { + super.setUp(); + } + + @Override + protected void tearDown() throws Exception { + super.tearDown(); + } + + public void testRingBuffer() throws Exception { + + RingBuffer b = new RingBuffer(100); + for (long i = 1; i < 101; i++) { + b.insert(i); + } + Iterator iter = b.iterator(); + for (long i = 1; i < 101; i++) { + assertTrue(iter.hasNext()); + assertEquals(i, iter.next().longValue()); + } + assertFalse(iter.hasNext()); + System.out.println(b); + + b.insert(101l); + iter = b.iterator(); + for (long i = 2; i < 102; i++) { + assertTrue(iter.hasNext()); + assertEquals(i, iter.next().longValue()); + } + assertFalse(iter.hasNext()); + System.out.println(b); + + } + +} diff --git a/servers/test/org/xtreemfs/test/common/checksums/ChecksumFactoryTest.java b/servers/test/org/xtreemfs/test/common/checksums/ChecksumFactoryTest.java new file mode 100644 index 0000000000000000000000000000000000000000..dcddea3ad6fa031d93d111080a9add4f97f44867 --- /dev/null +++ b/servers/test/org/xtreemfs/test/common/checksums/ChecksumFactoryTest.java @@ -0,0 +1,289 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Christian Lorenz (ZIB) + */ +package org.xtreemfs.test.common.checksums; + +import java.nio.ByteBuffer; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.LinkedList; +import java.util.Random; +import java.util.concurrent.Callable; +import java.util.concurrent.Executor; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.zip.Adler32; +import java.util.zip.Checksum; + +import junit.framework.TestCase; + +import org.junit.After; +import org.junit.Before; +import org.xtreemfs.common.checksums.ChecksumAlgorithm; +import org.xtreemfs.common.checksums.ChecksumFactory; +import org.xtreemfs.common.checksums.ChecksumProvider; +import org.xtreemfs.common.checksums.algorithms.SDBM; +import org.xtreemfs.common.checksums.provider.JavaChecksumProvider; +import org.xtreemfs.common.logging.Logging; + +/** + * tests the checksum factory and some checksums + * + * 19.08.2008 + * + * @author clorenz + */ +public class ChecksumFactoryTest extends TestCase { + private ChecksumFactory factory; + private ByteBuffer data; + + @Before + public void setUp() throws Exception { + System.out.println("TEST: " + getClass().getSimpleName() + "." + + getName()); + Logging.start(Logging.LEVEL_ERROR); + + this.factory = ChecksumFactory.getInstance(); + + ChecksumProvider provider = new JavaChecksumProvider(); + this.factory.addProvider(provider); + + this.data = ByteBuffer.wrap(generateRandomBytes(1024 * 128)); + } + + @After + public void tearDown() throws Exception { + } + + /** + * generates randomly filled byte-array + * + * @param length + * of the byte-array + */ + public static byte[] generateRandomBytes(int length) { + Random r = new Random(); + byte[] bytes = new byte[length]; + + r.nextBytes(bytes); + return bytes; + } + + /** + * tests the internal java checksum algorithms + * @throws Exception + */ + public void testJavaChecksumAlgorithm() throws Exception { + // compute checksum with xtreemfs ChecksumFactory + String xtreemfsValue = computeXtreemfsChecksum("Adler32", true); + + // compute checksum with java API + Checksum javaAlgorithm = new Adler32(); + javaAlgorithm.update(data.array(), 0, data.array().length); + String javaValue = Long.toHexString(javaAlgorithm.getValue()); + + // System.out.println(javaValue); + // System.out.println(xtreemfsValue); + + assertEquals(javaValue, xtreemfsValue); + } + + /** + * tests the internal java message digest algorithms + * @throws Exception + */ + public void testJavaMessageDigestAlgorithm() throws Exception { + // compute checksum with xtreemfs ChecksumFactory + String xtreemfsValue = computeXtreemfsChecksum("MD5", true); + + // compute checksum with java API + String javaValue = computeJavaMessageDigest("MD5"); + +// System.out.println("java: "+xtreemfsValue); +// System.out.println("xtreemfs: "+javaValue.toString()); + + assertEquals(javaValue.toString(), xtreemfsValue); + } + + /** + * @param algorithm TODO + * @param returnAlgorithm TODO + * @return + * @throws NoSuchAlgorithmException + */ + private String computeXtreemfsChecksum(String algorithm, boolean returnAlgorithm) throws NoSuchAlgorithmException { + // compute checksum with xtreemfs ChecksumFactory + ChecksumAlgorithm xtreemfsAlgorithm = factory.getAlgorithm(algorithm); + xtreemfsAlgorithm.update(data); + String xtreemfsValue = xtreemfsAlgorithm.getValue(); + if(returnAlgorithm) + this.factory.returnAlgorithm(xtreemfsAlgorithm); + return xtreemfsValue; + } + + /** + * @param algorithm TODO + * @return + * @throws NoSuchAlgorithmException + */ + private String computeJavaMessageDigest(String algorithm) + throws NoSuchAlgorithmException { + // compute checksum with java API + MessageDigest javaAlgorithm = MessageDigest.getInstance(algorithm); + javaAlgorithm.update(data.array()); + byte[] javaHash = javaAlgorithm.digest(); + StringBuffer javaValue = new StringBuffer(); + for (int i = 0; i < javaHash.length; i++) { + javaValue.append(Integer.toHexString(0xFF & javaHash[i])); + } + return javaValue.toString(); + } + + /** + * tests, if the internal buffer of the checksums is working correctly, + * if the checksum is used more than once + * @throws Exception + */ + public void testIfChecksumIsAlwaysTheSame() throws Exception { + ChecksumAlgorithm algorithm = factory.getAlgorithm("SHA-1"); + algorithm.update(data); + String oldValue = algorithm.getValue(); + + for(int i=0; i<32; i++){ + algorithm.update(data); + String newValue = algorithm.getValue(); + + assertEquals(oldValue, newValue); + oldValue = newValue; + } + } + + /** + * tests, if the ChecksumFactory delivers only "thread-safe" instances (cache-pool) + * @throws Exception + */ + public void testThreadSafety() throws Exception { + final int THREADS = 8; + this.data = ByteBuffer.wrap(generateRandomBytes(1024 * 1024 * 32)); + + // compute correct checksum with java API + String javaValue = computeJavaMessageDigest("SHA-1"); + + Callable computation = new Callable(){ + @Override + public String call() { + try { + // compute checksum with xtreemfs ChecksumFactory + String xtreemfsValue = computeXtreemfsChecksum("SHA-1", true); + return xtreemfsValue; + } catch (NoSuchAlgorithmException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + return null; + } catch (Exception e) { + // TODO Auto-generated catch block + e.printStackTrace(); + return null; + } + } + }; + LinkedList> results = useMultipleThreads(THREADS, computation); + + // compare correct java checksum with xtreemfs checksums + for(Future result : results){ + assertEquals(javaValue, result.get()); + } + } + + /** + * tests, if the ChecksumFactory cache-pool works correctly + * @throws Exception + */ + public void testChecksumFactoryCache() throws Exception { + // FIXME: use bigger values for more comprehensive testing, but this will slow down the test + final int THREADS = 8; + final int ROUNDS = 50; + this.data = ByteBuffer.wrap(generateRandomBytes(1024 * 1024)); + + // compute correct checksum with java API + String javaValue = computeJavaMessageDigest("SHA-1"); + + Callable> computation = new Callable>(){ + @Override + public LinkedList call() { + try { + LinkedList values = new LinkedList(); + boolean returning = false; + for(int i=0; i>> results = useMultipleThreads(THREADS, computation); + + // compare correct java checksum with xtreemfs checksums + for(Future> result : results){ + for(String value : result.get()){ + assertEquals(javaValue, value); + } + } + } + + /** + * executes a given computation in a couple of threads + * and returns the results of the computations + * + * @param THREADS + * @param computation TODO + * @return a list of futures, which contain the results of the computations + * @throws InterruptedException + */ + private LinkedList> useMultipleThreads(final int THREADS, Callable computation) + throws InterruptedException { + LinkedList> results = new LinkedList>(); + // compute xtreemfs checksums with multiple threads + ExecutorService executor = Executors.newFixedThreadPool(THREADS); + for(int i=0; i tmp = executor.submit(computation); + results.add(tmp); + } + executor.shutdown(); + executor.awaitTermination(60,TimeUnit.SECONDS); + return results; + } +} diff --git a/servers/test/org/xtreemfs/test/common/checksums/StringChecksumAlgorithmTest.java b/servers/test/org/xtreemfs/test/common/checksums/StringChecksumAlgorithmTest.java new file mode 100644 index 0000000000000000000000000000000000000000..303d295b0fa8f9e0cd24309b94d5f1578d59d591 --- /dev/null +++ b/servers/test/org/xtreemfs/test/common/checksums/StringChecksumAlgorithmTest.java @@ -0,0 +1,86 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Christian Lorenz (ZIB) + */ +package org.xtreemfs.test.common.checksums; + +import java.nio.ByteBuffer; + +import junit.framework.TestCase; + +import org.junit.After; +import org.junit.Before; +import org.xtreemfs.common.checksums.StringChecksumAlgorithm; +import org.xtreemfs.common.checksums.algorithms.SDBM; +import org.xtreemfs.common.logging.Logging; + +/** + * some tests for the checksum algorithms, which are based on strings + * + * 02.09.2008 + * + * @author clorenz + */ +public class StringChecksumAlgorithmTest extends TestCase { + private ByteBuffer bufferData; + private String stringData; + + @Before + public void setUp() throws Exception { + System.out.println("TEST: " + getClass().getSimpleName() + "." + + getName()); + Logging.start(Logging.LEVEL_ERROR); + + this.stringData = ""; + for(int i=0; i<1024; i++){ + this.stringData += "Test, "; + } + this.bufferData = ByteBuffer.wrap(stringData.getBytes()); + } + + @After + public void tearDown() throws Exception { + } + + /** + * tests, if the SDBM algorithm generates the same checksum with + * a String-input and ByteBuffer-input + * @throws Exception + */ + public void testSDBMStringBufferEquality() throws Exception { + // compute checksum with xtreemfs ChecksumFactory + StringChecksumAlgorithm algorithm = new SDBM(); + + // string + algorithm.digest(stringData); + String stringValue = algorithm.getValue(); + + // buffer + algorithm.update(bufferData); + String bufferValue = algorithm.getValue(); + +// System.out.println(stringValue); +// System.out.println(bufferValue); + + assertEquals(stringValue, bufferValue); + } +} diff --git a/servers/test/org/xtreemfs/test/common/striping/LocationTest.java b/servers/test/org/xtreemfs/test/common/striping/LocationTest.java new file mode 100644 index 0000000000000000000000000000000000000000..51eb9b8aff79005be4b8caeef27c43085304bc2b --- /dev/null +++ b/servers/test/org/xtreemfs/test/common/striping/LocationTest.java @@ -0,0 +1,336 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin, + Barcelona Supercomputing Center - Centro Nacional de Supercomputacion and + Consiglio Nazionale delle Ricerche. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Jesús Malo (BSC), Björn Kolbeck (ZIB), + * Eugenio Cesario (CNR) + */ + +package org.xtreemfs.test.common.striping; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import junit.framework.TestCase; +import junit.textui.TestRunner; + +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.striping.Location; +import org.xtreemfs.common.striping.RAID0; +import org.xtreemfs.common.striping.StripingPolicy; +import org.xtreemfs.common.uuids.ServiceUUID; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.json.JSONString; +import org.xtreemfs.test.SetupUtils; + +/** + * It tests the Location class + * + * @author Jesus Malo (jmalo) + */ +public class LocationTest extends TestCase { + + private List osdList; + private StripingPolicy stripingPolicy; + + /** Creates a new instance of LocationTest */ + public LocationTest(String testName) { + super(testName); + Logging.start(SetupUtils.DEBUG_LEVEL); + } + + protected void setUp() throws Exception { + System.out.println("TEST: " + getClass().getSimpleName() + "." + + getName()); + + final int numberOfOSDs = 3; + this.stripingPolicy = new RAID0(1, numberOfOSDs); + osdList = new ArrayList(numberOfOSDs); + osdList.add(new ServiceUUID("http://www.google.com:80")); + osdList.add(new ServiceUUID("http://www.yahoo.com:80")); + osdList.add(new ServiceUUID("http://www.ozu.com:80")); + } + + protected void tearDown() throws Exception { + } + + /** + * It tests the creation of objects + */ + public void testCreateLocation() throws Exception { + List[] lo = { null, new ArrayList(), osdList }; + StripingPolicy[] stripingPolicies = new StripingPolicy[] { null, + this.stripingPolicy }; + + // Wrong use cases + try { + // Null sp and list of OSDs + Location tester = new Location(stripingPolicies[0], lo[0]); + fail(); + } catch (IllegalArgumentException e) { + } + + try { + // Null sp + Location tester = new Location(stripingPolicies[0], lo[1]); + fail(); + } catch (IllegalArgumentException e) { + } + + try { + // Null list of OSDs + Location tester = new Location(stripingPolicies[1], lo[0]); + fail(); + } catch (IllegalArgumentException e) { + } + + try { + // Wrong matching of sp and list of OSDs + Location tester = new Location(stripingPolicies[1], lo[1]); + fail(); + } catch (IllegalArgumentException e) { + } + + // Right use case + Location tester = new Location(stripingPolicies[1], lo[2]); + + // Right use case + List validList = tester.asList(); + Location tester2 = new Location(validList); + + // Preparing for the second constructor + List invalidList2 = new ArrayList(); + List invalidList3 = new ArrayList(); + invalidList3.add(validList.get(1)); + invalidList3.add(null); + List invalidList4 = new ArrayList(); + invalidList4.add(validList.get(0)); + invalidList4.add(new ArrayList()); + List invalidList5 = new ArrayList(); + List listOSDs = (List) validList.get(1); + listOSDs.remove(0); + invalidList5.add(validList.get(0)); + invalidList5.add(listOSDs); + + // Wrong use cases + try { + // Wrong number of arguments + Location tester3 = new Location(invalidList2); + fail(); + } catch (IllegalArgumentException e) { + } + + try { + // Wrong first argument + Location tester3 = new Location(invalidList3); + fail(); + } catch (IllegalArgumentException e) { + } catch (ClassCastException e) { + } + + try { + // Wrong second argument + Location tester3 = new Location(invalidList4); + fail(); + } catch (IllegalArgumentException e) { + } + + try { + // Wrong matching + Location tester3 = new Location(invalidList5); + fail(); + } catch (IllegalArgumentException e) { + } + } + + public void testCreateLocationFromJSON() throws Exception { + // Preparing for the JSON constructor + JSONString JSONLocNull = new JSONString("n"); + + JSONString JSONstripingPolicy = this.stripingPolicy.asJSONString(); + List osds = new ArrayList(); + String osdList = JSONParser.writeJSON(osds); + + JSONString JSONemptyOSDList = new JSONString("[" + + JSONstripingPolicy.asString() + "," + osdList + "]"); + + for (ServiceUUID osd : this.osdList) { + osds.add(osd.toString()); + } + osdList = JSONParser.writeJSON(osds); + JSONString JSONLocRight = new JSONString("[" + + JSONstripingPolicy.asString() + "," + osdList + "]"); + + try { + // Null JSON String + new Location(JSONLocNull); + fail(); + } catch (IllegalArgumentException e) { + } + + try { + // Empty osdList + new Location(JSONemptyOSDList); + fail(); + } catch (IllegalArgumentException e) { + } + + try { + // Right tests + Location fromJSONLoc = new Location(JSONLocRight); + new Location(new JSONString(JSONParser.writeJSON(fromJSONLoc + .asList()))); + } catch (Exception e) { + fail(); + } + + Location tested = new Location( + new JSONString( + "[{\"width\":1,\"policy\":\"RAID0\",\"stripe-size\":1},[\"http:\\/\\/127.0.0.2:32637\"]]")); + + StripingPolicy sp = tested.getStripingPolicy(); + List osds2 = tested.getOSDs(); + + assertEquals(new RAID0(1, 1), sp); + assertEquals(osds2.get(0), new ServiceUUID("http://127.0.0.2:32637")); + assertEquals(1, osds2.size()); + } + + /** + * It tests the getStripingPolicy method + */ + public void testGetStripingPolicy() throws Exception { + // Preparing + final int numberOfOSDs = 1; + final StripingPolicy sp = new RAID0(1, numberOfOSDs); + final StripingPolicy sp2 = new RAID0(1, numberOfOSDs); + final List osds = new ArrayList(numberOfOSDs); + osds.add(new ServiceUUID("http://www.google.com:80")); + final Location loc = new Location(sp, osds); + + // Test + StripingPolicy answer = loc.getStripingPolicy(); + + // Checking + assertEquals(sp, answer); + assertEquals(sp2, answer); + } + + /** + * It tests the getOSDs method + */ + public void testGetOSDs() throws Exception { + + // Preparing + final int numberOfOSDs = 3; + final StripingPolicy sp = new RAID0(1, numberOfOSDs); + final List osds = new ArrayList(numberOfOSDs); + osds.add(new ServiceUUID("http://www.google.com:80")); + osds.add(new ServiceUUID("http://www.yahoo.com:80")); + osds.add(new ServiceUUID("http://www.ozu.com:80")); + final Location loc = new Location(sp, osds); + + final List osds2 = new ArrayList(numberOfOSDs); + osds2.add(new ServiceUUID("http://www.google.com:80")); + osds2.add(new ServiceUUID("http://www.yahoo.com:80")); + osds2.add(new ServiceUUID("http://www.ozu.com:80")); + + // Test + List answer = loc.getOSDs(); + + // Checking + assertEquals(osds, answer); + assertEquals(osds2, answer); + } + + /** + * It tests the asList method + */ + public void testAsList() throws Exception { + // Preparing + final int numberOfOSDs = 3; + final StripingPolicy sp = new RAID0(1, numberOfOSDs); + final List osds = new ArrayList(numberOfOSDs); + osds.add(new ServiceUUID("http://www.google.com:80")); + osds.add(new ServiceUUID("http://www.yahoo.com:80")); + osds.add(new ServiceUUID("http://www.ozu.com:80")); + final Location loc = new Location(sp, osds); + + // Test + List answer = loc.asList(); + + // Checking + Map mappedSP = (Map) answer.get(0); + List listedOSDs = (List) answer.get(1); + + assertEquals(sp, StripingPolicy.readFromJSON(mappedSP)); + assertEquals(numberOfOSDs, listedOSDs.size()); + for (int i = 0; i < osds.size(); i++) { + assertEquals(osds.get(i).toString(), listedOSDs.get(i)); + } + } + + public void testGetOSDByX() throws Exception { + Location loc = new Location(this.stripingPolicy, this.osdList); + + assertEquals(this.osdList.get(this.stripingPolicy.getOSDByObject(0)), + loc.getOSDByObject(0)); + assertEquals(this.osdList.get(this.stripingPolicy.getOSDByObject(1)), + loc.getOSDByObject(1)); + assertEquals(this.osdList.get(this.stripingPolicy.getOSDByObject(85)), + loc.getOSDByObject(85)); + + final long KB = 1024; + assertEquals(this.osdList.get(this.stripingPolicy.getOSDByOffset(0)), + loc.getOSDByOffset(0)); + assertEquals(this.osdList.get(this.stripingPolicy.getOSDByOffset(20)), + loc.getOSDByOffset(20)); + assertEquals(this.osdList.get(this.stripingPolicy + .getOSDByOffset(1 * KB)), loc.getOSDByOffset(1 * KB)); + assertEquals(this.osdList.get(this.stripingPolicy + .getOSDByOffset(85 * KB)), loc.getOSDByOffset(85 * KB)); + + // one OSD + assertEquals(this.osdList.get(this.stripingPolicy.getOSDByOffset(20)), + loc.getOSDByByteRange(20, 80)); + assertEquals( + this.osdList.get(this.stripingPolicy.getOSDByOffset(1022)), loc + .getOSDByByteRange(1022, 1023)); + assertEquals( + this.osdList.get(this.stripingPolicy.getOSDByOffset(1024)), loc + .getOSDByByteRange(1024, 2047)); + + // multiple osds + assertEquals(null, loc.getOSDByByteRange(1020, 1055)); + } + + public void testContainsOSD() throws Exception { + Location loc = new Location(this.stripingPolicy, this.osdList); + assertEquals(true, loc.containsOSD(this.osdList.get(1))); + assertEquals(false, loc.containsOSD(new ServiceUUID("bla"))); + } + + public static void main(String[] args) { + TestRunner.run(LocationTest.class); + } +} diff --git a/servers/test/org/xtreemfs/test/common/striping/LocationsCacheTest.java b/servers/test/org/xtreemfs/test/common/striping/LocationsCacheTest.java new file mode 100644 index 0000000000000000000000000000000000000000..943c2f7e685af0dc4f8a544f6f43ac7326956d84 --- /dev/null +++ b/servers/test/org/xtreemfs/test/common/striping/LocationsCacheTest.java @@ -0,0 +1,143 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin, + Barcelona Supercomputing Center - Centro Nacional de Supercomputacion and + Consiglio Nazionale delle Ricerche. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Jesús Malo (BSC), Björn Kolbeck (ZIB), + * Eugenio Cesario (CNR) + */ + +package org.xtreemfs.test.common.striping; + +import junit.framework.TestCase; +import junit.textui.TestRunner; + +import org.xtreemfs.common.striping.Locations; +import org.xtreemfs.foundation.json.JSONString; +import org.xtreemfs.osd.LocationsCache; + +/** + * This class implements the tests for LocationsCache + * + * @author jmalo + */ +public class LocationsCacheTest extends TestCase { + private LocationsCache cache; + private final int maximumSize = 3; + + /** Creates a new instance of LocationsCacheTest */ + public LocationsCacheTest(String testName) { + super(testName); + } + + protected void setUp() throws Exception { + System.out.println("TEST: " + getClass().getSimpleName() + "." + + getName()); + cache = new LocationsCache(maximumSize); + } + + protected void tearDown() throws Exception { + cache = null; + } + + /** + * It tests the update method + */ + public void testUpdate() throws Exception { + + Locations loc = new Locations(new JSONString("[[], 1]")); + + for (int i = 0; i < 3 * maximumSize; i++) { + cache.update("F" + i, loc); + } + + for (int i = 0; i < 2 * maximumSize; i++) { + assertNull(cache.getLocations("F" + i)); + assertEquals(0, cache.getVersion("F" + i)); + } + + for (int i = 2 * maximumSize; i < 3 * maximumSize; i++) { + assertNotNull(cache.getLocations("F" + i)); + assertEquals(loc.getVersion(), cache.getVersion("F" + i)); + } + } + + /** + * It tests the getVersion method + */ + public void testGetVersion() throws Exception { + + Locations loc0 = new Locations(new JSONString("[[], 1]")); + Locations loc1 = new Locations(new JSONString("[[], 2]")); + String fileId = "F0"; + + // It asks the version number of an inexistent entry + assertEquals(0, cache.getVersion(fileId)); + + // It asks the version number of a new added entry + cache.update(fileId, loc0); + assertEquals(loc0.getVersion(), cache.getVersion(fileId)); + + // It asks the version number of an updated entry + cache.update(fileId, loc1); + assertEquals(loc1.getVersion(), cache.getVersion(fileId)); + } + + /** + * It tests the getLocations method + */ + public void testGetLocations() throws Exception { + + Locations loc = new Locations(new JSONString("[[], 1]")); + + // It fills the cache + for (int i = 0; i < maximumSize; i++) { + cache.update("F" + i, loc); + } + + // Checks the whole cache + for (int i = 0; i < maximumSize; i++) { + Locations loc2 = cache.getLocations("F" + i); + + assertNotNull(loc2); + assertEquals(loc, loc2); + } + + // Removes an entry and adds a new one + { + cache.update("F" + maximumSize, loc); + + Locations loc2 = cache.getLocations("F" + 0); + assertNull(loc2); + + for (int i = 1; i <= maximumSize; i++) { + loc2 = cache.getLocations("F" + i); + + assertNotNull(loc2); + assertEquals(loc, loc2); + } + } + } + + public static void main(String[] args) { + TestRunner.run(LocationsCacheTest.class); + } +} diff --git a/servers/test/org/xtreemfs/test/common/striping/LocationsTest.java b/servers/test/org/xtreemfs/test/common/striping/LocationsTest.java new file mode 100644 index 0000000000000000000000000000000000000000..b7e7a77f6fafdbd6fc306c4180daf876e1ad7d7c --- /dev/null +++ b/servers/test/org/xtreemfs/test/common/striping/LocationsTest.java @@ -0,0 +1,245 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin, + Barcelona Supercomputing Center - Centro Nacional de Supercomputacion and + Consiglio Nazionale delle Ricerche. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Jesús Malo (BSC), Björn Kolbeck (ZIB), + * Eugenio Cesario (CNR) + */ + +package org.xtreemfs.test.common.striping; + +import java.util.ArrayList; +import java.util.List; + +import junit.framework.TestCase; +import junit.textui.TestRunner; + +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.striping.Locations; +import org.xtreemfs.common.uuids.ServiceUUID; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.json.JSONString; +import org.xtreemfs.test.SetupUtils; + +/** + * This class implements the tests for Locations + * + * @author jmalo + */ +public class LocationsTest extends TestCase { + List osds = new ArrayList(); + + /** + * Creates a new instance of LocationsTest + */ + public LocationsTest(String testName) { + super(testName); + Logging.start(SetupUtils.DEBUG_LEVEL); + + osds.add(new ServiceUUID("http://127.0.0.1:65535")); + osds.add(new ServiceUUID("http://192.168.0.1:65535")); + osds.add(new ServiceUUID("http://172.16.0.1:65535")); + osds.add(new ServiceUUID("http://10.0.0.1:65535")); + } + + protected void setUp() throws Exception { + System.out.println("TEST: " + getClass().getSimpleName() + "." + + getName()); + } + + protected void tearDown() throws Exception { + } + + /** + * It tests the constructor from strings + */ + public void testFromString() throws Exception { + Locations loc; + + // Right use cases + { + loc = new Locations( + new JSONString( + "[[[{\"width\":1,\"policy\":\"RAID0\",\"stripe-size\":1},[\"http:\\/\\/127.0.0.1:65535\"]]],1,\"sync\"]")); + assertNotNull(loc.getLocation(osds.get(0))); + assertNull(loc.getLocation(osds.get(1))); + + assertEquals(1, loc.getNumberOfReplicas()); + assertEquals(1, loc.getVersion()); + assertEquals(Locations.REPLICA_UPDATE_POLICY_SYNC, loc + .getReplicaUpdatePolicy()); + } + + { + loc = new Locations( + new JSONString( + "[[[{\"width\":2,\"policy\":\"RAID0\",\"stripe-size\":1},[\"http:\\/\\/127.0.0.1:65535\", \"http:\\/\\/192.168.0.1:65535\"]]], 1,\"lazy\"]")); + assertNotNull(loc.getLocation(osds.get(0))); + assertNotNull(loc.getLocation(osds.get(1))); + assertNull(loc.getLocation(osds.get(2))); + assertEquals(loc.getLocation(osds.get(0)), loc.getLocation(osds + .get(1))); + + assertEquals(1, loc.getNumberOfReplicas()); + assertEquals(1, loc.getVersion()); + assertEquals(Locations.REPLICA_UPDATE_POLICY_ONDEMAND, loc + .getReplicaUpdatePolicy()); + } + + { + loc = new Locations( + new JSONString( + "[[[{\"width\":1,\"policy\":\"RAID0\",\"stripe-size\":1},[\"http:\\/\\/127.0.0.1:65535\"]], [{\"width\":1,\"policy\":\"RAID0\",\"stripe-size\":1},[\"http:\\/\\/172.16.0.1:65535\"]]], 1]")); + assertNotNull(loc.getLocation(osds.get(0))); + assertNull(loc.getLocation(osds.get(1))); + assertNotNull(loc.getLocation(osds.get(2))); + assertNull(loc.getLocation(osds.get(3))); + assertFalse(loc.getLocation(osds.get(0)).equals( + loc.getLocation(osds.get(2)))); + + assertEquals(2, loc.getNumberOfReplicas()); + assertEquals(1, loc.getVersion()); + assertEquals(Locations.REPLICA_UPDATE_POLICY_ONDEMAND, loc + .getReplicaUpdatePolicy()); + } + + { + loc = new Locations( + new JSONString( + "[[[{\"width\":2,\"policy\":\"RAID0\",\"stripe-size\":1},[\"http:\\/\\/127.0.0.1:65535\", \"http:\\/\\/192.168.0.1:65535\"]], [{\"width\":2,\"policy\":\"RAID0\",\"stripe-size\":1},[\"http:\\/\\/172.16.0.1:65535\", \"http:\\/\\/10.0.0.1:65535\"]]], 1,\"lazy\"]")); + assertNotNull(loc.getLocation(osds.get(0))); + assertNotNull(loc.getLocation(osds.get(1))); + assertNotNull(loc.getLocation(osds.get(2))); + assertNotNull(loc.getLocation(osds.get(3))); + assertEquals(loc.getLocation(osds.get(0)), loc.getLocation(osds + .get(1))); + assertEquals(loc.getLocation(osds.get(2)), loc.getLocation(osds + .get(3))); + assertFalse(loc.getLocation(osds.get(0)).equals( + loc.getLocation(osds.get(2)))); + + assertEquals(2, loc.getNumberOfReplicas()); + assertEquals(1, loc.getVersion()); + assertEquals(Locations.REPLICA_UPDATE_POLICY_ONDEMAND, loc + .getReplicaUpdatePolicy()); + } + + { + loc = new Locations(new JSONString("[[], 1]")); + + assertEquals(0, loc.getNumberOfReplicas()); + assertEquals(1, loc.getVersion()); + } + + { + loc = new Locations( + new JSONString( + "[[[{\"width\":1,\"policy\":\"RAID0\",\"stripe-size\":1},[\"http:\\/\\/127.0.0.1:65535\"]], [{\"width\":1,\"policy\":\"RAID0\",\"stripe-size\":1},[\"http:\\/\\/172.16.0.1:65535\"]]], 1,\"sync:1\"]")); + assertEquals(1, loc.getReplicaSyncLevel()); + } + + { + loc = new Locations( + new JSONString( + "[[[{\"width\":1,\"policy\":\"RAID0\",\"stripe-size\":1},[\"http:\\/\\/127.0.0.1:65535\"]]],1,\"sync:2\"]")); + assertEquals(Locations.REPLICA_UPDATE_POLICY_SYNC, loc + .getReplicaUpdatePolicy()); + assertEquals(1, loc.getReplicaSyncLevel()); + } + + { + loc = new Locations( + new JSONString( + "[[[{\"width\":1,\"policy\":\"RAID0\",\"stripe-size\":1},[\"http:\\/\\/127.0.0.1:65535\"]]],1,\"sync:0\"]")); + assertEquals(Locations.REPLICA_UPDATE_POLICY_SYNC, loc + .getReplicaUpdatePolicy()); + assertEquals(0, loc.getReplicaSyncLevel()); + } + + // Wrong use cases + try { + new Locations(new JSONString("")); + fail(); + } catch (JSONException ex) { + } + + try { + new Locations(new JSONString("[]")); + fail(); + } catch (Exception ex) { + } + + try { + new Locations(new JSONString("[[]]")); + fail(); + } catch (Exception ex) { + } + + try { + new Locations(new JSONString("[[],]")); + fail(); + } catch (Exception ex) { + } + + try { + new Locations(new JSONString("[0]")); + fail(); + } catch (Exception ex) { + } + + try { + new Locations(new JSONString("[, 0]")); + fail(); + } catch (Exception ex) { + } + + try { + new Locations(new JSONString("[0, 0]")); + fail(); + } catch (Exception ex) { + } + + try { + new Locations(new JSONString("[0, []]")); + fail(); + } catch (Exception ex) { + } + + // tests, if we don't allow more than specified + /* + * try { new Locations(newJSONString( + * "[[[{\"width\":1,\"policy\":\"RAID0\",\"stripe-size\":1},[\"http:\\/\\/127.0.0.1:65535\"]], [{\"width\":1,\"policy\":\"RAID0\",\"stripe-size\":1},[\"http:\\/\\/172.16.0.1:65535\"]]], 1,\"lazy:1\"]" + * )); fail(); } catch(Exception ex) { } + */ + } + + public void testAsJSONString() throws Exception { + JSONString expected = new JSONString( + "[[[{\"width\":2,\"policy\":\"RAID0\",\"stripe-size\":1},[\"http:\\/\\/127.0.0.1:65535\",\"http:\\/\\/192.168.0.1:65535\"]],[{\"width\":2,\"policy\":\"RAID0\",\"stripe-size\":1},[\"http:\\/\\/172.16.0.1:65535\",\"http:\\/\\/10.0.0.1:65535\"]]],1,\"sync:1\"]"); + assertEquals(expected.asString(), new Locations(expected) + .asJSONString().asString()); + } + + public static void main(String[] args) { + TestRunner.run(LocationsTest.class); + } +} diff --git a/servers/test/org/xtreemfs/test/common/striping/RAID0Test.java b/servers/test/org/xtreemfs/test/common/striping/RAID0Test.java new file mode 100644 index 0000000000000000000000000000000000000000..d6815c05d2347a895d82f0752b40e7d091d16d89 --- /dev/null +++ b/servers/test/org/xtreemfs/test/common/striping/RAID0Test.java @@ -0,0 +1,234 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin, + Barcelona Supercomputing Center - Centro Nacional de Supercomputacion and + Consiglio Nazionale delle Ricerche. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Christian Lorenz (ZIB), Jan Stender (ZIB), Jesús Malo (BSC), Björn Kolbeck (ZIB), + * Eugenio Cesario (CNR) + */ + +package org.xtreemfs.test.common.striping; + +import java.util.List; + +import junit.framework.TestCase; +import junit.textui.TestRunner; + +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.striping.RAID0; +import org.xtreemfs.common.striping.StripeInfo; +import org.xtreemfs.common.striping.StripingPolicy; +import org.xtreemfs.foundation.json.JSONString; +import org.xtreemfs.test.SetupUtils; + +/** + * It tests the RAID0 class + * + * @author clorenz + */ +public class RAID0Test extends TestCase { + private static final long KILOBYTE = 1024L; + + /** Creates a new instance of RAID0Test */ + public RAID0Test(String testName) { + super(testName); + Logging.start(SetupUtils.DEBUG_LEVEL); + } + + protected void setUp() throws Exception { + System.out.println("TEST: " + getClass().getSimpleName() + "." + + getName()); + } + + protected void tearDown() throws Exception { + + } + + /** + * It tests the creation of RAID0 objects + */ + public void testRAID0ObjectCreation() throws Exception { + + RAID0 wrong; + + try { + // Bad size + wrong = new RAID0(0, 1); + + fail(); + } catch (IllegalArgumentException e) { + } + + try { + // Bad number of OSDs + wrong = new RAID0(1, 0); + + fail(); + } catch (IllegalArgumentException e) { + } + + try { + // Bad size and number of OSDs + wrong = new RAID0(-1, 0); + + fail(); + } catch (IllegalArgumentException e) { + } + + RAID0 right = new RAID0(1, 1); + } + + public void testGetObjectsAndBytes() throws Exception { + RAID0 policy = new RAID0(128, 3); // 128 kB per stripe and 3 OSDs + long objectID, offset; + + objectID = policy.getObject(20); + assertEquals(0, objectID); + + objectID = policy.getObject(20 * KILOBYTE); + assertEquals(0, objectID); + + objectID = policy.getObject(255 * KILOBYTE); + assertEquals(1, objectID); + + objectID = policy.getObject(256 * KILOBYTE); + assertEquals(2, objectID); + + offset = policy.getFirstByte(5); + assertEquals(640 * KILOBYTE, offset); + + offset = policy.getLastByte(5); + assertEquals(768 * KILOBYTE - 1, offset); + + offset = policy.getFirstByte(6); + assertEquals(768 * KILOBYTE, offset); + } + + public void testGetOSDs() throws Exception { + RAID0 policy = new RAID0(128, 8); // 128 kB per stripe and 8 OSDs + + int osd0 = policy.getOSDByObject(0); + assertEquals(0, osd0); + + int osd1 = policy.getOSDByObject(1); + assertEquals(1, osd1); + + int osd7 = policy.getOSDByObject(7); + assertEquals(7, osd7); + + int osd8 = policy.getOSDByObject(8); + assertEquals(0, osd8); + + int osd21 = policy.getOSDByObject(2125648682); + assertEquals(2, osd21); + + int osd0b = policy.getOSDByOffset(20); + assertEquals(osd0, osd0b); + + int osd0c = policy.getOSDByOffset(20 * KILOBYTE); + assertEquals(0, osd0c); + + int osd7b = policy.getOSDByOffset(7 * 128 * KILOBYTE); + assertEquals(osd7, osd7b); + + int osd8b = policy.getOSDByOffset(8 * 128 * KILOBYTE); + assertEquals(osd8, osd8b); + + int osd21b = policy.getOSDByOffset(2125648682 * 128 * KILOBYTE); + assertEquals(osd21, osd21b); + } + + public void testGetObjectsByRange() throws Exception { + RAID0 policy = new RAID0(10, 3); // 10 kB per stripe and 3 OSDs + List result; + StripeInfo expectedStart; + StripeInfo expectedEnd; + + // one object, byte-range < stripe size + result = policy.getObjects(0, 9 * KILOBYTE); + expectedStart = new StripeInfo(0, 0, 0, 9 * KILOBYTE); + // expected only one element in list + assertEquals(1, result.size()); + assertEquals(expectedStart, result.remove(0)); + + // more objects, byte-range < stripe size + result = policy.getObjects(5 * KILOBYTE, 14 * KILOBYTE); + expectedStart = new StripeInfo(0, 0, 5 * KILOBYTE, 10 * KILOBYTE - 1); + expectedEnd = new StripeInfo(1, 1, 0, 4 * KILOBYTE); + // expected two elements in list + assertEquals(2, result.size()); + assertEquals(expectedStart, result.remove(0)); + assertEquals(expectedEnd, result.remove(0)); + + // more objects, byte-range > stripe size (simple) + result = policy.getObjects(0, 29 * KILOBYTE); + expectedStart = new StripeInfo(0, 0, 0, 10 * KILOBYTE - 1); + expectedEnd = new StripeInfo(2, 2, 0, 9 * KILOBYTE); + assertEquals(expectedStart, result.remove(0)); + assertEquals(expectedEnd, result.remove(0)); + + // more objects, byte-range > stripe size + result = policy.getObjects(8 * KILOBYTE, 54 * KILOBYTE); + expectedStart = new StripeInfo(0, 0, 8 * KILOBYTE, 10 * KILOBYTE - 1); + expectedEnd = new StripeInfo(5, 2, 0, 4 * KILOBYTE); + assertEquals(expectedStart, result.remove(0)); + assertEquals(expectedEnd, result.remove(0)); + } + + public void testGetStripeSize() throws Exception { + RAID0 policy = new RAID0(256, 3); // 256 kB per stripe and 3 OSDs + assertEquals(256 * KILOBYTE, policy.getStripeSize(5)); + } + + public void testCalculateLastObject() throws Exception { + RAID0 policy = new RAID0(256, 3); // 256 kB per stripe and 3 OSDs + assertEquals(41, policy.calculateLastObject(256L * KILOBYTE * 42)); // filesize + // = + // offset + // + + // 1 + assertEquals(42, policy + .calculateLastObject(256L * KILOBYTE * 42 + 32000)); + assertEquals(42, policy.calculateLastObject(256L * KILOBYTE * 43 - 1)); + } + + /** + * It tests the export method + */ + public void testAsJSONString() throws Exception { + RAID0 subject = new RAID0(7, 3); // 7 kB per stripe and 3 OSDs + assertEquals(new JSONString( + "{\"width\":3,\"policy\":\"RAID0\",\"stripe-size\":7}"), + subject.asJSONString()); + } + + /** + * It tests the parsing and unparsing of the striping policies + */ + public void testJSONParsing() throws Exception { + // RAID0 parsing and unparsing + RAID0 unparsedRaid0 = new RAID0(1, 1); + StripingPolicy parsedRaid0 = StripingPolicy.readFromJSON(unparsedRaid0 + .asJSONString()); + + assertEquals(unparsedRaid0.asJSONString(), parsedRaid0.asJSONString()); + } +} diff --git a/servers/test/org/xtreemfs/test/common/uuid/UUIDResolverTest.java b/servers/test/org/xtreemfs/test/common/uuid/UUIDResolverTest.java new file mode 100644 index 0000000000000000000000000000000000000000..f4d7255d088fe69c6b0ce2385e84544a2470f2c0 --- /dev/null +++ b/servers/test/org/xtreemfs/test/common/uuid/UUIDResolverTest.java @@ -0,0 +1,114 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package org.xtreemfs.test.common.uuid; + +import java.io.File; +import java.net.InetSocketAddress; +import java.util.List; +import java.util.Map; +import junit.framework.TestCase; +import org.xtreemfs.common.TimeSync; +import org.xtreemfs.common.auth.NullAuthProvider; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.clients.dir.DIRClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.util.FSUtils; +import org.xtreemfs.common.util.NetUtils; +import org.xtreemfs.common.uuids.ServiceUUID; +import org.xtreemfs.common.uuids.UUIDResolver; +import org.xtreemfs.common.uuids.UnknownUUIDException; +import org.xtreemfs.dir.DIRConfig; +import org.xtreemfs.dir.RequestController; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.test.SetupUtils; + +/** + * + * @author bjko + */ +public class UUIDResolverTest extends TestCase { + + private final String nullAuth; + + private RequestController dirCtrl = null; + + private DIRClient dc; + + private InetSocketAddress localhost; + + private final int TIMEOUT = 10000; + + + public UUIDResolverTest(String testName) throws JSONException { + super(testName); + nullAuth = NullAuthProvider.createAuthString("bla", "blub"); + } + + @Override + protected void setUp() throws Exception { + super.setUp(); + + Logging.start(Logging.LEVEL_DEBUG); + System.out.println("TEST: " + getClass().getSimpleName() + "." + getName()); + + // cleanup + File testDir = new File(SetupUtils.TEST_DIR); + FSUtils.delTree(testDir); + testDir.mkdirs(); + + DIRConfig conf = SetupUtils.createDIRConfig(); + + localhost = new InetSocketAddress("localhost", conf.getPort()); + dc = SetupUtils.createDIRClient(60000); + + dirCtrl = new RequestController(conf); + dirCtrl.startup(); + + TimeSync.initialize(dc, 1000000, 50, nullAuth); + + UUIDResolver.shutdown(); + UUIDResolver.start(dc,100,100); + UUIDResolver.addLocalMapping("localhost", 32636, false); + } + + @Override + protected void tearDown() throws Exception { + super.tearDown(); + dirCtrl.shutdown(); + dc.shutdown(); + UUIDResolver.shutdown(); + } + + public void testSimpleMapping() throws Exception { + List> mpgs = NetUtils.getReachableEndpoints(32636, "http"); + RPCResponse r = dc.registerAddressMapping("MY_TEST_UUID", mpgs, 1l , nullAuth); + r.waitForResponse(); + ServiceUUID uuid = new ServiceUUID("MY_TEST_UUID"); + uuid.resolve(); + System.out.println(uuid); + System.out.println(uuid); + + try { + ServiceUUID uuid2 = new ServiceUUID("YAGGA YAGGA"); + uuid2.getAddress(); + fail("returned result for unknown address mapping"); + } catch (UnknownUUIDException ex) { + //supi + } + + Thread.sleep(200); + + uuid = new ServiceUUID("MY_TEST_UUID"); + uuid.resolve(); + System.out.println(uuid); + + uuid = new ServiceUUID("localhost"); + uuid.resolve(); + System.out.println(uuid); + } + + +} diff --git a/servers/test/org/xtreemfs/test/dir/DIRTest.java b/servers/test/org/xtreemfs/test/dir/DIRTest.java new file mode 100644 index 0000000000000000000000000000000000000000..080c7772d3b2b08f4cdfdcd9433979d017e0cef1 --- /dev/null +++ b/servers/test/org/xtreemfs/test/dir/DIRTest.java @@ -0,0 +1,214 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.test.dir; + +import java.io.File; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import junit.framework.*; +import junit.textui.TestRunner; + +import org.xtreemfs.common.auth.NullAuthProvider; +import org.xtreemfs.common.clients.HttpErrorException; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.clients.dir.DIRClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.util.FSUtils; +import org.xtreemfs.dir.DIRConfig; +import org.xtreemfs.dir.RequestController; +import org.xtreemfs.foundation.pinky.HTTPUtils; +import org.xtreemfs.test.SetupUtils; + +/** + * + * @author bjko + */ +public class DIRTest extends TestCase { + + private final String nullAuth; + + private RequestController dirCtrl = null; + + private DIRClient dc; + + private InetSocketAddress localhost; + + private final int TIMEOUT = 10000; + + public DIRTest(String testName) throws Exception { + super(testName); + nullAuth = NullAuthProvider.createAuthString("bla", "blub"); + } + + protected void setUp() throws Exception { + + Logging.start(Logging.LEVEL_DEBUG); + System.out.println("TEST: " + getClass().getSimpleName() + "." + getName()); + + // cleanup + File testDir = new File(SetupUtils.TEST_DIR); + FSUtils.delTree(testDir); + testDir.mkdirs(); + + DIRConfig conf = SetupUtils.createDIRConfig(); + + localhost = new InetSocketAddress("localhost", conf.getPort()); + dc = SetupUtils.createDIRClient(60000); + + dirCtrl = new RequestController(conf); + dirCtrl.startup(); + + } + + protected void tearDown() throws Exception { + dc.shutdown(); + dirCtrl.shutdown(); + dc.waitForShutdown(); + } + + public void testRegisterAndQuery() throws Exception { + + Map attrs = new HashMap(); + + attrs.put("type", "OSD"); + try { + RPCResponse resp = dc.registerEntity("test1", attrs, 1, nullAuth); + long v = resp.get(); + resp.freeBuffers(); + assertTrue(v > 0); + } catch (HttpErrorException ex) { + ex.printStackTrace(); + if (ex.getStatusCode() == HTTPUtils.SC_USER_EXCEPTION) { + throw ex; + } + } + + attrs.clear(); + attrs.put("type", "MRC"); + try { + RPCResponse resp = dc.registerEntity("test2", attrs, 1, nullAuth); + long v = resp.get(); + resp.freeBuffers(); + assertTrue(v > 1); + } catch (HttpErrorException ex) { + ex.printStackTrace(); + if (ex.getStatusCode() == HTTPUtils.SC_USER_EXCEPTION) { + throw ex; + } + } + + Map query = new HashMap(); + query.put("type", "MRC"); + + List attrs2 = new ArrayList(2); + attrs2.add("version"); + attrs2.add("type"); + + RPCResponse>> resp = dc + .getEntities(query, attrs2, nullAuth); + Map> rv = resp.get(); + resp.freeBuffers(); + assertNotNull(rv); + + assertEquals(rv.size(), 1); + assertNotNull(rv.get("test2")); + + resp = dc.deregisterEntity("test2", nullAuth); + resp.waitForResponse(); + resp.freeBuffers(); + + query.clear(); + resp = dc.getEntities(query, attrs2, nullAuth); + rv = resp.get(); + resp.freeBuffers(); + assertNotNull(rv); + + assertEquals(rv.size(), 1); + assertNotNull(rv.get("test1")); + assertNull(rv.get("test2")); + } + + public void testReRegister() throws Exception { + + Map attrs = new HashMap(); + long v = 0; + + // register an entity + attrs.put("type", "OSD"); + try { + RPCResponse resp = dc.registerEntity("test", attrs, 1, nullAuth); + v = resp.get(); + resp.freeBuffers(); + assertTrue(v > 0); + } catch (HttpErrorException ex) { + ex.printStackTrace(); + if (ex.getStatusCode() == HTTPUtils.SC_USER_EXCEPTION) { + throw ex; + } + } + + // try to register an entity with wrong version number; this should fail + RPCResponse r1 = null; + try { + r1 = dc.registerEntity("test", attrs, 0, nullAuth); + r1.waitForResponse(); + fail("registration with wrong version number successful!"); + } catch (HttpErrorException ex) { + r1.freeBuffers(); + } + + // update a registered entity + try { + RPCResponse r2 = dc.registerEntity("test", attrs, v, nullAuth); + long v2 = r2.get(); + r2.freeBuffers(); + assertFalse(v == v2); + + } catch (HttpErrorException ex) { + } + + Map query = new HashMap(); + query.put("type", "OSD"); + + RPCResponse>> resp = dc.getEntities(query, null, nullAuth); + Map> rv = resp.get(); + resp.freeBuffers(); + assertNotNull(rv); + + assertEquals(1, rv.size()); + + Map map = rv.get("test"); + assertNotNull(map); + assertEquals(5, map.size()); + } + + public static void main(String[] args) { + TestRunner.run(DIRTest.class); + } + +} diff --git a/servers/test/org/xtreemfs/test/foundation/DigestAuthTest.java b/servers/test/org/xtreemfs/test/foundation/DigestAuthTest.java new file mode 100644 index 0000000000000000000000000000000000000000..db35e04a0e90ae67f0af5e98d2e6380eccdafbb7 --- /dev/null +++ b/servers/test/org/xtreemfs/test/foundation/DigestAuthTest.java @@ -0,0 +1,86 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package org.xtreemfs.test.foundation; + +import java.net.InetSocketAddress; +import junit.framework.TestCase; +import org.xtreemfs.common.clients.HttpErrorException; +import org.xtreemfs.common.clients.RPCClient; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.pinky.HTTPHeaders; +import org.xtreemfs.foundation.pinky.PinkyRequest; +import org.xtreemfs.foundation.pinky.PinkyRequestListener; +import org.xtreemfs.foundation.pinky.PipelinedPinky; +import org.xtreemfs.foundation.speedy.MultiSpeedy; + +/** + * + * @author bjko + */ +public class DigestAuthTest extends TestCase { + + private PipelinedPinky pinky; + private MultiSpeedy speedy; + + private InetSocketAddress me; + + public DigestAuthTest(String testName) { + super(testName); + me = new InetSocketAddress("localhost",12121); + Logging.start(Logging.LEVEL_DEBUG); + } + + @Override + protected void setUp() throws Exception { + super.setUp(); + pinky = new PipelinedPinky(12121, null, new PinkyRequestListener() { + + public void receiveRequest(PinkyRequest theRequest) { + if (theRequest.requestAuthentication("test", "test")) { + theRequest.setResponse(200); + System.out.println("got authenticated request..."); + } + pinky.sendResponse(theRequest); + } + }); + pinky.start(); + + speedy = new MultiSpeedy(); + speedy.start(); + + pinky.waitForStartup(); + speedy.waitForStartup(); + + } + + @Override + protected void tearDown() throws Exception { + super.tearDown(); + pinky.shutdown(); + pinky.waitForShutdown(); + + speedy.shutdown(); + speedy.waitForShutdown(); + } + + /*public void testAuth() throws Exception { + RPCClient c = new RPCClient(speedy); + RPCResponse r = null; + try { + r = c.sendRPC(me, "", null, "yagga yagag", null); + r.waitForResponse(); + } catch (HttpErrorException ex) { + if (ex.authenticationRequest()) { + HTTPHeaders addHdr = new HTTPHeaders(); + addHdr.addHeader(HTTPHeaders.HDR_AUTHORIZATION, RPCClient.createAuthResponseHeader(r.getSpeedyRequest(),"test","test")); + r = c.sendRPC(me, "", null, "", addHdr); + r.waitForResponse(); + } + } + }*/ + +} diff --git a/servers/test/org/xtreemfs/test/foundation/PinkyTest.java b/servers/test/org/xtreemfs/test/foundation/PinkyTest.java new file mode 100644 index 0000000000000000000000000000000000000000..f806c4baefb4af8b2f685ed3107ff0e20e91f488 --- /dev/null +++ b/servers/test/org/xtreemfs/test/foundation/PinkyTest.java @@ -0,0 +1,195 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB), Christian Lorenz (ZIB) + */ + +package org.xtreemfs.test.foundation; + +import junit.framework.*; +import org.apache.commons.httpclient.HttpClient; +import org.apache.commons.httpclient.methods.GetMethod; +import org.apache.commons.httpclient.methods.PostMethod; +import org.apache.commons.httpclient.methods.StringRequestEntity; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.pinky.HTTPUtils; +import org.xtreemfs.foundation.pinky.PinkyRequest; +import org.xtreemfs.foundation.pinky.PipelinedPinky; +import org.xtreemfs.foundation.pinky.PinkyRequestListener; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.json.JSONString; +import org.xtreemfs.test.SetupUtils; + +/** + * + * @author bjko + */ +public class PinkyTest extends TestCase { + + Thread test; + + PipelinedPinky sthr; + + public static final int PORT = 12345; + + private static final String URL = "http://localhost:"+PORT+"/"; + + HttpClient client; + + public PinkyTest(String testName) { + super(testName); + Logging.start(SetupUtils.DEBUG_LEVEL); + } + + protected void setUp() throws Exception { + System.out.println("TEST: " + getClass().getSimpleName() + "." + getName()); + + client = new HttpClient(); + + // create a new Pinky server + sthr = new PipelinedPinky(PORT, null, null); + + // register a request listener that is called by pinky when + // receiving a request + sthr.registerListener(new PinkyRequestListener() { + public void receiveRequest(PinkyRequest theRequest) { + try { + // unpack body, parse it, write back JSON and send that + // back to the client + if (theRequest.requestBody != null) { + byte bdy[] = null; + if (theRequest.requestBody.hasArray()) { + bdy = theRequest.requestBody.array(); + } else { + bdy = new byte[theRequest.requestBody + .capacity()]; + theRequest.requestBody.position(0); + theRequest.requestBody.get(bdy); + } + + String body = new String(bdy, "utf-8"); + Object o = JSONParser + .parseJSON(new JSONString(body)); + String respBdy = JSONParser.writeJSON(o); + theRequest.setResponse(HTTPUtils.SC_OKAY, + ReusableBuffer.wrap(respBdy.getBytes("utf-8")), + HTTPUtils.DATA_TYPE.JSON); + } else { + theRequest.setResponse(HTTPUtils.SC_OKAY); + } + sthr.sendResponse(theRequest); + } catch (Exception ex) { + ex.printStackTrace(); + try { + theRequest.setResponse(HTTPUtils.SC_SERVER_ERROR); + } catch (Exception e) { + // ignore that + e.printStackTrace(); + } + theRequest.setClose(true); + sthr.sendResponse(theRequest); + } + } + }); + // start the Pinky server in a new thread +// test = new Thread(sthr); +// test.start(); + sthr.start(); + } + + protected void tearDown() throws Exception { + sthr.shutdown(); + sthr.waitForShutdown(); +// synchronized (this) { +// this.wait(2000); +// } + + } + + // TODO add test methods here. The name must begin with 'test'. For example: + // public void testHello() {} + + public void testJSONEcho() throws Exception { + + PostMethod method = new PostMethod(URL); + String content = "[\"Hallo\"]"; + method.setRequestEntity(new StringRequestEntity(content,"text/plain","utf-8")); + int rc = client.executeMethod(method); + assertEquals(rc,200); + String response = method.getResponseBodyAsString(); + assertEquals(response,content); + + } + + public void testEmptyJSON() throws Exception { + + PostMethod method = new PostMethod(URL); + String content = ""; + method.setRequestEntity(new StringRequestEntity(content,"text/plain","utf-8")); + int rc = client.executeMethod(method); + assertEquals(rc,200); + String response = method.getResponseBodyAsString(); + assertEquals(response,content); + + } + + public void testEmptyBody() throws Exception { + + PostMethod method = new PostMethod(URL); + int rc = client.executeMethod(method); + assertEquals(rc,200); + String response = method.getResponseBodyAsString(); + assertEquals(response.length(),0); + + } + + public void testGet() throws Exception { + + GetMethod method = new GetMethod(URL); + int rc = client.executeMethod(method); + assertEquals(rc,200); + String response = method.getResponseBodyAsString(); + assertEquals(response.length(),0); + + } + + public void testMultipleJSONEcho() throws Exception { + String content = "[\"Hallo\"]"; + StringRequestEntity request = new StringRequestEntity(content,"text/plain","utf-8"); + + int NUMBER_OF_CLIENTS = 100; + int rc; + + for(int i=0; i < NUMBER_OF_CLIENTS; i++){ + PostMethod method = new PostMethod(URL); + method.setRequestEntity(request); + + client = new HttpClient(); + rc = client.executeMethod(method); + + assertEquals(rc,200); + String response = method.getResponseBodyAsString(); + assertEquals(response,content); + } + } + +} diff --git a/servers/test/org/xtreemfs/test/foundation/SpeedyPinkyTest.java b/servers/test/org/xtreemfs/test/foundation/SpeedyPinkyTest.java new file mode 100644 index 0000000000000000000000000000000000000000..da7f6238de1e9a09fc8a997cadaed4b5c8dffee6 --- /dev/null +++ b/servers/test/org/xtreemfs/test/foundation/SpeedyPinkyTest.java @@ -0,0 +1,229 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB), Christian Lorenz (ZIB) + */ +package org.xtreemfs.test.foundation; + +import java.net.InetSocketAddress; + +import junit.framework.TestCase; +import junit.textui.TestRunner; + +import org.xtreemfs.common.TimeSync; +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.pinky.HTTPHeaders; +import org.xtreemfs.foundation.pinky.HTTPUtils; +import org.xtreemfs.foundation.pinky.PinkyRequest; +import org.xtreemfs.foundation.pinky.PinkyRequestListener; +import org.xtreemfs.foundation.pinky.PipelinedPinky; +import org.xtreemfs.foundation.pinky.HTTPUtils.DATA_TYPE; +import org.xtreemfs.foundation.speedy.MultiSpeedy; +import org.xtreemfs.foundation.speedy.SpeedyRequest; +import org.xtreemfs.foundation.speedy.SpeedyResponseListener; +import org.xtreemfs.test.SetupUtils; + +public class SpeedyPinkyTest extends TestCase { + + private static final int PORT = 12345; + + MultiSpeedy speedy; + + PipelinedPinky pinky; + + int responses; + + public SpeedyPinkyTest(String testName) { + super(testName); + Logging.start(SetupUtils.DEBUG_LEVEL); + TimeSync.initialize(null, 10000000, 50, null); + } + + protected void setUp() throws Exception { + + System.out.println("TEST: " + getClass().getSimpleName() + "." + getName()); + + speedy = new MultiSpeedy(); + speedy.start(); + speedy.waitForStartup(); + + PinkyRequestListener listener = new PinkyRequestListener() { + + public void receiveRequest(PinkyRequest theRequest) { + + int length = Integer.parseInt(new String(theRequest.requestBody.array())); + + ReusableBuffer body = generateData(length); + + HTTPHeaders headers = new HTTPHeaders(); + headers.addHeader(HTTPHeaders.HDR_CONTENT_TYPE, HTTPUtils.JSON_TYPE); + + theRequest.setResponse(HTTPUtils.SC_OKAY, body, HTTPUtils.DATA_TYPE.JSON, headers); + pinky.sendResponse(theRequest); + } + + }; + + pinky = new PipelinedPinky(PORT, null, listener); + pinky.start(); + pinky.waitForStartup(); + } + + protected void tearDown() throws Exception { + + speedy.shutdown(); + pinky.shutdown(); + + speedy.waitForShutdown(); + pinky.waitForShutdown(); + } + + /** + * Sends a large amount of requests for responses of varying sizes, without + * waiting before sending the next request. + * + * @throws Exception + */ + public void testAsync() throws Exception { + + final InetSocketAddress endpoint = new InetSocketAddress("localhost", PORT); + + responses = 0; + + speedy.registerListener(new SpeedyResponseListener() { + + public void receiveRequest(SpeedyRequest resp) { + + responses++; + checkResponse(resp); + + synchronized (speedy) { + speedy.notify(); + } + } + + }, endpoint); + + final int numReqs = 2000; + + for (int i = 0; i < numReqs; i++) { + ReusableBuffer body = ReusableBuffer.wrap(Integer.toString(i).getBytes()); + SpeedyRequest sr = new SpeedyRequest("GET", "/", null, null, body, DATA_TYPE.JSON); + speedy.sendRequest(sr, endpoint); + } + + synchronized (speedy) { + while (responses < numReqs) + speedy.wait(); + } + + } + + /** + * Sends a large amount of requests for responses of varying sizes, and + * waits for the response for the next k requests before sending the next k + * requests. + * + * @throws Exception + */ + public void testkSync() throws Exception { + + final InetSocketAddress endpoint = new InetSocketAddress("localhost", PORT); + + responses = 0; + + speedy.registerListener(new SpeedyResponseListener() { + + public void receiveRequest(SpeedyRequest resp) { + + responses++; + checkResponse(resp); + + synchronized (speedy) { + speedy.notify(); + } + } + + }, endpoint); + + final int numReqs = 10000; + final int k = 5; + + for (int i = 0; i < numReqs; i += k) { + + // send the next k requests + for (int j = 0; j < k; j++) { + ReusableBuffer body = ReusableBuffer.wrap(Integer.toString(i + j).getBytes()); + SpeedyRequest sr = new SpeedyRequest("GET", "/", null, null, body, DATA_TYPE.JSON); + speedy.sendRequest(sr, endpoint); + } + + // wait for the next k responses + synchronized (speedy) { + while (responses < i + k) + speedy.wait(); + } + } + } + + private ReusableBuffer generateData(int length) { + + byte[] len = Integer.toString(length).getBytes(); + ReusableBuffer buf = BufferPool.allocate(length + len.length + 1); + buf.position(0); + buf.put(len); + buf.put((byte) 0x20); // space + + for (int i = 0; i < length; i++) + buf.put((byte) 65); + + return buf; + } + + private void checkResponse(SpeedyRequest resp) { + try { + if (resp.status == SpeedyRequest.RequestStatus.FAILED) { + fail("HTTP request failed for unknown reason"); + } else { + int returncode = resp.statusCode; + assertEquals(returncode, 200); + + byte bdy[] = resp.responseBody.array(); + String response = new String(bdy, "ascii"); + int len = Integer.parseInt(response.substring(0, response.indexOf(' '))); + String content = response.substring(response.indexOf(' ') + 1); + assertEquals(content.length(), len); + } + + } catch (Throwable ex) { + fail("Exception occurred in responseListener: " + ex); + } finally { + if (resp != null) + resp.freeBuffer(); + } + } + + public static void main(String[] args) { + TestRunner.run(SpeedyPinkyTest.class); + } +} diff --git a/servers/test/org/xtreemfs/test/foundation/SpeedyTest.java b/servers/test/org/xtreemfs/test/foundation/SpeedyTest.java new file mode 100644 index 0000000000000000000000000000000000000000..b1cfa8a768b3ec9391cdd5c4c09a046bad396a69 --- /dev/null +++ b/servers/test/org/xtreemfs/test/foundation/SpeedyTest.java @@ -0,0 +1,151 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB), Christian Lorenz (ZIB) + */ + +package org.xtreemfs.test.foundation; + +import java.io.IOException; +import java.net.InetSocketAddress; + +import junit.framework.TestCase; +import junit.textui.TestRunner; + +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.speedy.MultiSpeedy; +import org.xtreemfs.foundation.speedy.SpeedyRequest; +import org.xtreemfs.foundation.speedy.SpeedyResponseListener; +import org.xtreemfs.test.SetupUtils; + +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; +import org.xtreemfs.common.TimeSync; + +/** + * + * @author bjko + */ +public class SpeedyTest extends TestCase { + + public static final int PORT = 12345; + + HttpServer server; + + MultiSpeedy client; + + String response = null; + + int returncode = 0; + + public SpeedyTest(String testName) { + super(testName); + Logging.start(SetupUtils.DEBUG_LEVEL); + TimeSync.initialize(null, 10000000, 50, null); + } + + protected void setUp() throws Exception { + + System.out.println("TEST: " + getClass().getSimpleName() + "." + getName()); + + server = HttpServer.create(new InetSocketAddress("localhost", PORT), 0); + server.createContext("/", new HttpHandler() { + public void handle(HttpExchange httpExchange) throws IOException { + byte[] content = "simpleContents".getBytes("ascii"); + httpExchange.sendResponseHeaders(200, content.length); + httpExchange.getResponseBody().write(content); + httpExchange.getResponseBody().close(); + } + }); + server.start(); + + client = new MultiSpeedy(); + client.start(); + client.waitForStartup(); + } + + protected void tearDown() throws Exception { + server.stop(0); + client.shutdown(); + client.waitForShutdown(); + } + + public void testSpeedy() throws Exception { + + final InetSocketAddress endpoint = new InetSocketAddress("localhost", PORT); + + client.registerListener(new SpeedyResponseListener() { + int numR = 0; + + public void receiveRequest(SpeedyRequest resp) { + try { + if (resp.status == SpeedyRequest.RequestStatus.FAILED) { + fail("HTTP request failed for unknown reason"); + } else { + byte bdy[] = null; + returncode = resp.statusCode; + // System.out.println("sc="+resp.statusCode+" / + // "+resp.responseBody); + if (resp.responseBody == null) { + response = null; + } else { + if (resp.responseBody.hasArray()) { + bdy = resp.responseBody.array(); + } else { + bdy = new byte[resp.responseBody.capacity()]; + resp.responseBody.position(0); + resp.responseBody.get(bdy); + } + + response = new String(bdy, "ascii"); + } + + synchronized (resp) { + resp.notifyAll(); + } + } + + } catch (Exception ex) { + fail("Exception occurred in responseListener: " + ex); + } finally { + if (resp != null) + resp.freeBuffer(); + } + } + }, endpoint); + + SpeedyRequest sr = new SpeedyRequest("GET", "/", null, null); + + client.sendRequest(sr, endpoint); + + synchronized (sr) { + sr.wait(5000); + } + assertEquals(returncode, 200); + assertEquals(response, "simpleContents"); + + } + + public static void main(String[] args) { + TestRunner.run(SpeedyTest.class); + } +} diff --git a/servers/test/org/xtreemfs/test/foundation/ssl/SSLPinkyTest.java b/servers/test/org/xtreemfs/test/foundation/ssl/SSLPinkyTest.java new file mode 100644 index 0000000000000000000000000000000000000000..e13184313ed2f2394bd1961d7fd37041d3b59df4 --- /dev/null +++ b/servers/test/org/xtreemfs/test/foundation/ssl/SSLPinkyTest.java @@ -0,0 +1,221 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Christian Lorenz (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.test.foundation.ssl; + +import java.io.File; +import java.io.InputStreamReader; +import java.net.URL; + +import junit.framework.TestCase; + +import org.apache.commons.httpclient.HttpClient; +import org.apache.commons.httpclient.contrib.ssl.AuthSSLProtocolSocketFactory; +import org.apache.commons.httpclient.methods.GetMethod; +import org.apache.commons.httpclient.methods.PostMethod; +import org.apache.commons.httpclient.methods.StringRequestEntity; +import org.apache.commons.httpclient.protocol.Protocol; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.json.JSONString; +import org.xtreemfs.foundation.pinky.HTTPUtils; +import org.xtreemfs.foundation.pinky.PinkyRequest; +import org.xtreemfs.foundation.pinky.PinkyRequestListener; +import org.xtreemfs.foundation.pinky.PipelinedPinky; +import org.xtreemfs.foundation.pinky.SSLOptions; + +/** + * + * @author clorenz + */ +public class SSLPinkyTest extends TestCase { + + Thread test; + + PipelinedPinky sthr; + + public static final int PORT = 12345; + + private static final String URL = "https://localhost:"+PORT+"/"; + + private String PATH = "config/certs/"; + + HttpClient client; + + public SSLPinkyTest(String testName) { + super(testName); + Logging.start(Logging.LEVEL_DEBUG); + + File testfile = new File("testfile"); + if (testfile.getAbsolutePath().endsWith("java/testfile")) { + PATH = "../"+PATH; + } else { + PATH = "./"+PATH; + } + } + + protected void setUp() throws Exception { + System.out.println("TEST: " + getClass().getSimpleName() + "." + getName()); + + client = new HttpClient(); + + SSLOptions sslOptions = new SSLOptions(PATH + "service1.jks", + "passphrase", SSLOptions.JKS_CONTAINER, PATH + "trust.jks", + "passphrase", SSLOptions.JKS_CONTAINER, false); + + // create a new Pinky server + sthr = new PipelinedPinky(PORT, null, null, sslOptions); + + // register a request listener that is called by pinky when + // receiving a request + sthr.registerListener(new PinkyRequestListener() { + public void receiveRequest(PinkyRequest theRequest) { + try { + // unpack body, parse it, write back JSON and send that + // back to the client + if (theRequest.requestBody != null) { + byte bdy[] = null; + if (theRequest.requestBody.hasArray()) { + bdy = theRequest.requestBody.array(); + } else { + bdy = new byte[theRequest.requestBody + .capacity()]; + theRequest.requestBody.position(0); + theRequest.requestBody.get(bdy); + } + + String body = new String(bdy, "utf-8"); + Object o = JSONParser + .parseJSON(new JSONString(body)); + String respBdy = JSONParser.writeJSON(o); + theRequest.setResponse(HTTPUtils.SC_OKAY, + ReusableBuffer.wrap(respBdy.getBytes("utf-8")), + HTTPUtils.DATA_TYPE.JSON); + } else { + theRequest.setResponse(HTTPUtils.SC_OKAY); + } + sthr.sendResponse(theRequest); + } catch (Exception ex) { + ex.printStackTrace(); + try { + theRequest.setResponse(HTTPUtils.SC_SERVER_ERROR); + } catch (Exception e) { + // ignore that + e.printStackTrace(); + } + theRequest.setClose(true); + sthr.sendResponse(theRequest); + } + } + }); + + // init certs + Protocol authhttps = new Protocol("https", + new AuthSSLProtocolSocketFactory( + new URL("file:"+PATH+"/service2.jks"), "passphrase", + new URL("file:"+PATH+"/trust.jks"), "passphrase"), 443); + Protocol.registerProtocol("https", authhttps); + + // start the Pinky server in a new thread +// test = new Thread(sthr); +// test.start(); + sthr.start(); + } + + protected void tearDown() throws Exception { + sthr.shutdown(); + sthr.waitForShutdown(); +// synchronized (this) { +// this.wait(2000); +// } + + } + + // TODO add test methods here. The name must begin with 'test'. For example: + // public void testHello() {} + + public void testJSONEcho() throws Exception { + + PostMethod method = new PostMethod(URL); + String content = "[\"Hallo\"]"; + method.setRequestEntity(new StringRequestEntity(content,"text/plain","utf-8")); + int rc = client.executeMethod(method); + assertEquals(rc,200); + String response = method.getResponseBodyAsString(); + assertEquals(response,content); + + } + + public void testEmptyJSON() throws Exception { + + PostMethod method = new PostMethod(URL); + String content = ""; + method.setRequestEntity(new StringRequestEntity(content,"text/plain","utf-8")); + int rc = client.executeMethod(method); + assertEquals(rc,200); + String response = method.getResponseBodyAsString(); + assertEquals(response,content); + + } + + public void testEmptyBody() throws Exception { + + PostMethod method = new PostMethod(URL); + int rc = client.executeMethod(method); + assertEquals(rc,200); + String response = method.getResponseBodyAsString(); + assertEquals(response.length(),0); + + } + + public void testGet() throws Exception { + + GetMethod method = new GetMethod(URL); + int rc = client.executeMethod(method); + assertEquals(rc,200); + String response = method.getResponseBodyAsString(); + assertEquals(response.length(),0); + + } + + public static void main(String[] args){ + SSLPinkyTest pinkyTest = new SSLPinkyTest("SSLPinkyTest"); + try { + pinkyTest.setUp(); + + InputStreamReader in = new InputStreamReader(System.in); + + System.out.println("Push 'Enter' to close the program!"); + in.read(); + in.close(); + + pinkyTest.tearDown(); + } catch (Exception e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + } + +} diff --git a/servers/test/org/xtreemfs/test/foundation/ssl/SSLSpeedyPinkyTest.java b/servers/test/org/xtreemfs/test/foundation/ssl/SSLSpeedyPinkyTest.java new file mode 100644 index 0000000000000000000000000000000000000000..798555f9755c1700b387743cafcee781e163fb4e --- /dev/null +++ b/servers/test/org/xtreemfs/test/foundation/ssl/SSLSpeedyPinkyTest.java @@ -0,0 +1,273 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Christian Lorenz (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.test.foundation.ssl; + +import java.io.File; +import java.net.InetSocketAddress; +import junit.framework.*; + +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.clients.RPCClient; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.json.JSONString; +import org.xtreemfs.foundation.pinky.HTTPUtils; +import org.xtreemfs.foundation.pinky.PinkyRequest; +import org.xtreemfs.foundation.pinky.PinkyRequestListener; +import org.xtreemfs.foundation.pinky.PipelinedPinky; +import org.xtreemfs.foundation.pinky.SSLOptions; + + +/** + * + * @author clorenz + */ +public class SSLSpeedyPinkyTest extends TestCase { + + public static final int PORT = 12345; + + private static final String URL = "https://localhost:"+PORT+"/"; + + private String PATH = "config/certs/"; + + PipelinedPinky pinky; + + RPCClient speedy; + + public SSLSpeedyPinkyTest(String testName) { + super(testName); + Logging.start(Logging.LEVEL_DEBUG); + + File testfile = new File("testfile"); + if (testfile.getAbsolutePath().endsWith("java/testfile")) { + PATH = "../"+PATH; + } else { + PATH = "./"+PATH; + } + } + + protected void setUp() throws Exception { + + System.out.println("TEST: " + getClass().getSimpleName() + "." + + getName()); + + SSLOptions pinkySslOptions = new SSLOptions(PATH + "service1.jks", + "passphrase", SSLOptions.JKS_CONTAINER, PATH + "trust.jks", + "passphrase", SSLOptions.JKS_CONTAINER, false); + + pinky = new PipelinedPinky(PORT, null, null, pinkySslOptions); +// pinky = new PipelinedPinky(PORT, null); + + + // register a request listener that is called by pinky when + // receiving a request + pinky.registerListener(new PinkyRequestListener() { + public void receiveRequest(PinkyRequest theRequest) { + try { + // unpack body, parse it, write back JSON and send that + // back to the client + if (theRequest.requestBody != null) { + byte bdy[] = null; + if (theRequest.requestBody.hasArray()) { + bdy = theRequest.requestBody.array(); + } else { + bdy = new byte[theRequest.requestBody + .capacity()]; + theRequest.requestBody.position(0); + theRequest.requestBody.get(bdy); + } + + String body = new String(bdy, "utf-8"); + Object o = JSONParser + .parseJSON(new JSONString(body)); + String respBdy = JSONParser.writeJSON(o); + theRequest.setResponse(HTTPUtils.SC_OKAY, + ReusableBuffer.wrap(respBdy.getBytes("utf-8")), + HTTPUtils.DATA_TYPE.JSON); + } else { + theRequest.setResponse(HTTPUtils.SC_OKAY); + } + pinky.sendResponse(theRequest); + } catch (Exception ex) { + ex.printStackTrace(); + try { + theRequest.setResponse(HTTPUtils.SC_SERVER_ERROR); + } catch (Exception e) { + // ignore that + e.printStackTrace(); + } + theRequest.setClose(true); + pinky.sendResponse(theRequest); + } + } + }); + pinky.start(); + + SSLOptions speedySslOptions = new SSLOptions(PATH + "client1.p12", + "passphrase", SSLOptions.PKCS12_CONTAINER, PATH + "trust.jks", + "passphrase", SSLOptions.JKS_CONTAINER, false); + speedy = new RPCClient(500000000, speedySslOptions); +// speedy = new RPCClient(null, 5000); + + } + + protected void tearDown() throws Exception { + pinky.shutdown(); + speedy.shutdown(); + pinky.waitForShutdown(); + speedy.waitForShutdown(); + } + + // TODO add test methods here. The name must begin with 'test'. For example: + // public void testHello() {} + + public void testSimple() throws Exception { + + RPCResponse rp = null; + + InetSocketAddress local = new InetSocketAddress("localhost", PORT); + rp = speedy.sendRPC(local, "/bla", null, "bla", null); + assertEquals(rp.getStatusCode(), 200); + rp.freeBuffers(); + + RPCResponse rp2 = null; + + rp2 = speedy.sendRPC(local, "/bla", null, "bla", null); + assertEquals(rp2.getStatusCode(), 200); + rp2.freeBuffers(); + + RPCResponse rp3 = null; + + rp3 = speedy.sendRPC(local, "/bla", null, "bla", null); + assertEquals(rp3.getStatusCode(), 200); + rp3.freeBuffers(); + + } + +/* public void testErrorCases() throws Exception { + + RPCResponse rp = null; + try { + InetSocketAddress nonexiting = new InetSocketAddress( + "yabba-brabbel.zib.de", 80); + rp = speedy.sendRPC(nonexiting, "bla", null, "bla", null); + rp.waitForResponse(); + fail("IOException should have been thrown."); + } catch (UnresolvedAddressException ex) { + } finally { + if (rp != null) + rp.freeBuffers(); + } + + InetSocketAddress local = new InetSocketAddress("localhost", PORT); + rp = speedy.sendRPC(local, "/bla", null, "bla", null); + assertEquals(rp.getStatusCode(), 200); + rp.freeBuffers(); + + InetSocketAddress local500 = null; + try { + local500 = new InetSocketAddress("localhost",PORT); + rp = speedy.sendRPC(local500,"/bla",null,"bla",null); + rp.waitForResponse(); + fail("HttpErrorException should have been thrown."); + } catch (HttpErrorException ex) { + assertEquals(ex.getStatusCode(), 500); + } finally { + if (rp != null) + rp.freeBuffers(); + } + + InetSocketAddress localWait = null; + try { + localWait = new InetSocketAddress("localhost",PORT); + rp = speedy.sendRPC(localWait,"/bla",null,"bla",null); + rp.waitForResponse(); + fail("IOException should have been thrown."); + } catch (IOException ex) { + } finally { + if (rp != null) + rp.freeBuffers(); + } + + rp = speedy.sendRPC(local,"/bla",null,"bla",null); + final AtomicBoolean hasResponse = new AtomicBoolean(false); + final Object me = this; + rp.setResponseListener(new RPCResponseListener() { + + @Override + public void responseAvailable(RPCResponse response) { + hasResponse.set(true); + synchronized (me) { + me.notify(); + } + } + }); + synchronized (this) { + try { + this.wait(1000); + + } catch (InterruptedException interruptedException) { + } + + } + assertTrue(hasResponse.get()); + + rp = speedy.sendRPC(localWait,"/bla",null,"bla",null); + final AtomicBoolean hasNoResponse = new AtomicBoolean(true); + rp.setResponseListener(new RPCResponseListener() { + + @Override + public void responseAvailable(RPCResponse response) { + hasNoResponse.set(false); + synchronized (me) { + me.notify(); + } + } + }); + synchronized (this) { + try { + this.wait(500); + + } catch (InterruptedException interruptedException) { + } + + } + rp.freeBuffers(); + assertTrue(hasNoResponse.get()); + System.out.println("wait for response!"); + synchronized (this) { + try { + this.wait(10000); + + } catch (InterruptedException interruptedException) { + interruptedException.printStackTrace(); + } + + } + System.out.println("waiting done"); + + }*/ +} diff --git a/servers/test/org/xtreemfs/test/foundation/ssl/SSLSpeedyTest.java b/servers/test/org/xtreemfs/test/foundation/ssl/SSLSpeedyTest.java new file mode 100644 index 0000000000000000000000000000000000000000..41d1f9381412a9f524e5d40a2b77becb2b0cdc5e --- /dev/null +++ b/servers/test/org/xtreemfs/test/foundation/ssl/SSLSpeedyTest.java @@ -0,0 +1,183 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Christian Lorenz (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.test.foundation.ssl; + +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpsConfigurator; +import com.sun.net.httpserver.HttpsParameters; +import com.sun.net.httpserver.HttpsServer; + +import java.io.File; +import java.io.IOException; +import java.net.InetSocketAddress; + +import javax.net.ssl.SSLParameters; + +import junit.framework.*; + +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.pinky.SSLOptions; +import org.xtreemfs.foundation.speedy.MultiSpeedy; +import org.xtreemfs.foundation.speedy.SpeedyResponseListener; +import org.xtreemfs.foundation.speedy.SpeedyRequest; +import org.xtreemfs.test.SetupUtils; + +/** + * + * @author clorenz + */ +public class SSLSpeedyTest extends TestCase { + + public static final int PORT = 12345; + + HttpsServer server; + + MultiSpeedy client; + + String response = null; + + int returncode = 0; + + private static final String URL = "https://localhost:"+PORT+"/"; + + private String PATH = "config/certs/"; + + public SSLSpeedyTest(String testName) { + super(testName); + Logging.start(SetupUtils.DEBUG_LEVEL); + + File testfile = new File("testfile"); + if (testfile.getAbsolutePath().endsWith("java/testfile")) { + PATH = "../"+PATH; + } else { + PATH = "./"+PATH; + } + } + + protected void setUp() throws Exception { + + System.out.println("TEST: " + getClass().getSimpleName() + "." + getName()); + + server = HttpsServer.create(new InetSocketAddress("localhost",PORT),0); + SSLOptions sslOptions = new SSLOptions(PATH + "service1.jks", + "passphrase", SSLOptions.JKS_CONTAINER, PATH + "trust.jks", + "passphrase", SSLOptions.JKS_CONTAINER, false); + server.setHttpsConfigurator (new HttpsConfigurator(sslOptions.getSSLContext()) { + public void configure (HttpsParameters params) { + // get the default parameters + SSLParameters sslParams = getSSLContext().getDefaultSSLParameters(); + + // set ssl params for speedy + sslParams.setProtocols(getSSLContext().getSupportedSSLParameters().getProtocols()); + sslParams.setCipherSuites(getSSLContext().getSupportedSSLParameters().getCipherSuites()); + sslParams.setNeedClientAuth(true); + + params.setSSLParameters(sslParams); + } + }); +// server.setHttpsConfigurator(new HttpsConfigurator(sslOptions.getSSLContext())); + server.createContext("/",new HttpHandler() { + public void handle(HttpExchange httpExchange) throws IOException { + byte[] content = "simpleContents".getBytes("ascii"); + httpExchange.sendResponseHeaders(200,content.length); + httpExchange.getResponseBody().write(content); + httpExchange.getResponseBody().close(); + } + }); + server.start(); + + SSLOptions sslOptions2 = new SSLOptions(PATH + "service2.jks", + "passphrase", SSLOptions.JKS_CONTAINER, PATH + "trust.jks", + "passphrase", SSLOptions.JKS_CONTAINER, false); + + client = new MultiSpeedy(sslOptions2); + } + + protected void tearDown() throws Exception { + client.shutdown(); + client.waitForShutdown(); + server.stop(0); + } + + public void testSpeedy() throws Exception { + + final InetSocketAddress endpoint = new InetSocketAddress( + "localhost", PORT); + + client.registerListener(new SpeedyResponseListener() { + int numR = 0; + + public void receiveRequest(SpeedyRequest resp) { + try { + if (resp.status == SpeedyRequest.RequestStatus.FAILED) { + fail("HTTP request failed for unknown reason"); + } else { + byte bdy[] = null; + returncode = resp.statusCode; +// System.out.println("sc="+resp.statusCode+" / "+resp.responseBody); + if (resp.responseBody == null) { + response = null; + } else { + if (resp.responseBody.hasArray()) { + bdy = resp.responseBody.array(); + } else { + bdy = new byte[resp.responseBody.capacity()]; + resp.responseBody.position(0); + resp.responseBody.get(bdy); + } + + response = new String(bdy, "ascii"); + } + + synchronized (resp) { + resp.notifyAll(); + } + } + } catch (Exception ex) { + fail("Exception occurred in responseListener: "+ex); + } finally { + if (resp != null) + resp.freeBuffer(); + } + } + }, endpoint); + + Thread test = new Thread(client); + test.start(); + Thread.currentThread().yield(); + + SpeedyRequest sr = new SpeedyRequest("GET","/",null,null); + + client.sendRequest(sr,endpoint); + + synchronized (sr) { + sr.wait(5000); + } + assertEquals(returncode,200); + assertEquals(response,"simpleContents"); + } + +} diff --git a/servers/test/org/xtreemfs/test/io/ByteMapperTest.java b/servers/test/org/xtreemfs/test/io/ByteMapperTest.java new file mode 100644 index 0000000000000000000000000000000000000000..a160a6ef582c71955f866a8b41de45fd5bea8f73 --- /dev/null +++ b/servers/test/org/xtreemfs/test/io/ByteMapperTest.java @@ -0,0 +1,100 @@ +package org.xtreemfs.test.io; + +import java.io.IOException; + +import junit.framework.TestCase; + +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.clients.HttpErrorException; +import org.xtreemfs.common.clients.io.ByteMapper; +import org.xtreemfs.common.clients.io.ByteMapperFactory; +import org.xtreemfs.common.clients.io.ObjectStore; +import org.xtreemfs.foundation.json.JSONException; + +public class ByteMapperTest extends TestCase{ + + public void setUp() { + + System.out.println("TEST: " + getClass().getSimpleName() + "." + + getName()); + } + + public void tearDown(){ + } + + public void testRead() throws Exception{ + + ByteMapper byteMapperRAID0 = ByteMapperFactory.createByteMapper("RADI0", 2, new TestObjectStore()); + int offset = 0; + int bytesToRead = 6; + byte[] resultBuffer = new byte[bytesToRead]; + assertEquals(byteMapperRAID0.read(resultBuffer, offset, bytesToRead,0), 6); + + bytesToRead = 2; + resultBuffer = new byte[bytesToRead]; + assertEquals(byteMapperRAID0.read(resultBuffer, offset, bytesToRead,0), 2); + + bytesToRead = 1; + resultBuffer = new byte[bytesToRead]; + assertEquals(byteMapperRAID0.read(resultBuffer, offset, bytesToRead,0), 1); + + offset = 2; + bytesToRead = 6; + resultBuffer = new byte[bytesToRead+2]; + assertEquals(byteMapperRAID0.read(resultBuffer, offset, bytesToRead,0), 6); + + resultBuffer = new byte[bytesToRead -1]; + try{ + byteMapperRAID0.read(resultBuffer, offset, bytesToRead,0); + fail("the resultBuffer is to small"); + }catch(Exception e){} + + byteMapperRAID0 = ByteMapperFactory.createByteMapper("RAID0", 2, new EmptyObjectStore()); + bytesToRead = 1; + offset = 0; + resultBuffer = new byte[bytesToRead]; + assertEquals(byteMapperRAID0.read(resultBuffer, offset, bytesToRead,0), 0); + + } + + public void testWrite() throws Exception{ + ByteMapper byteMapperRAID0 = ByteMapperFactory.createByteMapper("RADI0", 2, new TestObjectStore()); + byte[] writeFromBuffer = "Hello World".getBytes(); + int offset = 0; + int bytesToWrite = 6; + assertEquals(byteMapperRAID0.write(writeFromBuffer, offset, bytesToWrite,0), 6); + bytesToWrite = 11; + assertEquals(byteMapperRAID0.write(writeFromBuffer, offset, bytesToWrite,0),11); + bytesToWrite = 12; + try{ + byteMapperRAID0.write(writeFromBuffer, offset, bytesToWrite,0); + fail("bytesToWrite > length of writeFromBuffer"); + }catch(Exception e){} + } + + class TestObjectStore implements ObjectStore{ + public ReusableBuffer readObject(long objectNo, long offset, long length){ + String content = "Hallo World"; + return ReusableBuffer.wrap(content.substring((int) offset, (int) (offset+length)).getBytes()); + } + + public void writeObject(long objectNo, long offset, ReusableBuffer buffer) throws IOException, + JSONException, InterruptedException, HttpErrorException { + + } + } + + class EmptyObjectStore implements ObjectStore{ + public ReusableBuffer readObject(long objectNo, long offset, long length){ + return ReusableBuffer.wrap("".getBytes()); + } + + public void writeObject(long objectNo, long offset, ReusableBuffer buffer) throws IOException, + JSONException, InterruptedException, HttpErrorException { + + } + } + + + +} diff --git a/servers/test/org/xtreemfs/test/io/RandomAccessFileTest.java b/servers/test/org/xtreemfs/test/io/RandomAccessFileTest.java new file mode 100644 index 0000000000000000000000000000000000000000..1ab30986d485eaa5e3a6bd65a4406e0acab73b09 --- /dev/null +++ b/servers/test/org/xtreemfs/test/io/RandomAccessFileTest.java @@ -0,0 +1,197 @@ +package org.xtreemfs.test.io; + + +import java.io.File; +import java.net.InetSocketAddress; +import java.util.Map; + +import junit.framework.TestCase; + +import org.xtreemfs.common.auth.NullAuthProvider; +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.clients.io.RandomAccessFile; +import org.xtreemfs.common.clients.mrc.MRCClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.util.FSUtils; +import org.xtreemfs.dir.DIRConfig; +import org.xtreemfs.foundation.speedy.MultiSpeedy; +import org.xtreemfs.mrc.MRCConfig; +import org.xtreemfs.mrc.RequestController; +import org.xtreemfs.osd.OSD; +import org.xtreemfs.osd.OSDConfig; +import org.xtreemfs.test.SetupUtils; + +public class RandomAccessFileTest extends TestCase { + RandomAccessFile randomAccessFile; + + private RequestController mrc1; + + private org.xtreemfs.dir.RequestController dirService; + + private MRCConfig mrcCfg1; + + private OSDConfig osdConfig1, osdConfig2; + + private DIRConfig dsCfg; + + private OSD osd1, osd2; + + private InetSocketAddress mrc1Address; + + private MRCClient client; + + private MultiSpeedy speedy; + + private String authString; + + private String volumeName; + + public RandomAccessFileTest() { + Logging.start(Logging.LEVEL_TRACE); + } + + + public void setUp() throws Exception { + + System.out.println("TEST: " + getClass().getSimpleName() + "." + + getName()); + + dsCfg = SetupUtils.createDIRConfig(); + + mrcCfg1 = SetupUtils.createMRC1Config(); + mrc1Address = SetupUtils.getMRC1Addr(); + + osdConfig1 = SetupUtils.createOSD1Config(); + osdConfig2 = SetupUtils.createOSD2Config(); + // cleanup + File testDir = new File(SetupUtils.TEST_DIR); + + FSUtils.delTree(testDir); + testDir.mkdirs(); + + // start the Directory Service + dirService = new org.xtreemfs.dir.RequestController(dsCfg); + dirService.startup(); + + // start the OSDs + osd1 = new OSD(osdConfig1); + osd2 = new OSD(osdConfig2); + + // start MRC + mrc1 = new RequestController(mrcCfg1); + mrc1.startup(); + + client = SetupUtils.createMRCClient(10000); + + speedy = new MultiSpeedy(); + speedy.start(); + + String authString = NullAuthProvider.createAuthString("userXY", + MRCClient.generateStringList("groupZ")); + + volumeName = "testVolume"; + + // create a volume (no access control) + client.createVolume(mrc1Address, volumeName, authString); + + // create some files and directories + client.createDir(mrc1Address, volumeName + "/myDir", authString); + + for (int i = 0; i < 10; i++) + client.createFile(mrc1Address, volumeName + "/myDir/test" + i + + ".txt", authString); + + } + + + public void tearDown() throws Exception { + mrc1.shutdown(); + client.shutdown(); + osd1.shutdown(); + osd2.shutdown(); + dirService.shutdown(); + speedy.shutdown(); + + client.waitForShutdown(); + speedy.waitForShutdown(); + + Logging.logMessage(Logging.LEVEL_DEBUG, this, BufferPool.getStatus()); + } + + + public void testReadAndWrite() throws Exception{ + randomAccessFile = new RandomAccessFile("w", mrc1Address, volumeName + + "/myDir/test1.txt", speedy); + + byte[] bytesIn = new byte[(int)(3*randomAccessFile.getStripeSize()+2)]; + for (int i = 0; i < 3 * randomAccessFile.getStripeSize() + 2; i++) { + bytesIn[i] = (byte)(i % 25 + 65); + } + int length = bytesIn.length; + int result = randomAccessFile.write(bytesIn, 0, length); + assertEquals(length, result); + + byte[] bytesOut = new byte[length]; + result = randomAccessFile.read(bytesOut, 0, length); + + assertEquals(0,result); + + bytesOut = new byte[length]; + randomAccessFile.seek(0); + result = randomAccessFile.read(bytesOut, 0, length); + + assertEquals(length, result); + assertEquals(new String(bytesIn), new String(bytesOut)); + + + bytesOut = new byte[4]; + + + bytesIn = "Hello World".getBytes(); + randomAccessFile.seek(0); + randomAccessFile.write(bytesIn, 0, bytesIn.length); + + randomAccessFile.seek(0); + result = randomAccessFile.read(bytesOut, 0,4); + assertEquals(new String(bytesOut), new String("Hell")); + + randomAccessFile.seek(1); + bytesOut = new byte[4]; + result = randomAccessFile.read(bytesOut, 0,4); + assertEquals(new String(bytesOut), new String("ello")); + + } + + + public void testReadAndWriteObject() throws Exception { + randomAccessFile = new RandomAccessFile("w", mrc1Address, volumeName + + "/myDir/test1.txt", speedy); + + byte[] bytesIn = new String("Hallo").getBytes(); + int length = bytesIn.length; + ReusableBuffer data = ReusableBuffer.wrap(bytesIn); + randomAccessFile.writeObject(0, 0, data); + + ReusableBuffer result = randomAccessFile.readObject(0, 0, length); + assertEquals(new String(bytesIn), new String(result.array())); + int bytesRead = randomAccessFile.readObject(0); + assertEquals(5, bytesRead); + + String content = ""; + for (int i = 0; i < 6000; i++) + content = content.concat("Hello World "); + bytesIn = content.getBytes(); + assertEquals(bytesIn.length, 72000); + + length = bytesIn.length; + + randomAccessFile.write(bytesIn, 0, length); + + int res = randomAccessFile.readObject(0); + assertEquals(65536,res); + res = randomAccessFile.readObject(1); + assertEquals(6464,res); + } + +} diff --git a/servers/test/org/xtreemfs/test/mrc/BufferBackedMetadataTest.java b/servers/test/org/xtreemfs/test/mrc/BufferBackedMetadataTest.java new file mode 100644 index 0000000000000000000000000000000000000000..417b15d980ee0a13c95d3c3201a47c6ddfb796b1 --- /dev/null +++ b/servers/test/org/xtreemfs/test/mrc/BufferBackedMetadataTest.java @@ -0,0 +1,798 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.test.mrc; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + +import junit.framework.TestCase; +import junit.textui.TestRunner; + +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.mrc.brain.metadata.ACL; +import org.xtreemfs.mrc.brain.metadata.BufferBackedACL; +import org.xtreemfs.mrc.brain.metadata.BufferBackedDirObject; +import org.xtreemfs.mrc.brain.metadata.BufferBackedFileObject; +import org.xtreemfs.mrc.brain.metadata.BufferBackedStripingPolicy; +import org.xtreemfs.mrc.brain.metadata.BufferBackedXAttrs; +import org.xtreemfs.mrc.brain.metadata.BufferBackedXLoc; +import org.xtreemfs.mrc.brain.metadata.BufferBackedXLocList; +import org.xtreemfs.mrc.brain.metadata.FSObject; +import org.xtreemfs.mrc.brain.metadata.FileObject; +import org.xtreemfs.mrc.brain.metadata.StripingPolicy; +import org.xtreemfs.mrc.brain.metadata.XAttrs; +import org.xtreemfs.mrc.brain.metadata.XLoc; +import org.xtreemfs.mrc.brain.metadata.XLocList; +import org.xtreemfs.test.SetupUtils; + +public class BufferBackedMetadataTest extends TestCase { + + public BufferBackedMetadataTest() { + Logging.start(SetupUtils.DEBUG_LEVEL); + } + + protected void setUp() throws Exception { + System.out.println("TEST: " + getClass().getSimpleName() + "." + getName()); + } + + protected void tearDown() throws Exception { + } + + public void testBufferBackedACL() throws Exception { + + { + final List entities = generateStrList("me", "someone", "him", "us"); + final List rights = generateIntList(32, 0, 222, 4873872); + + // create ACL + BufferBackedACL acl1 = new BufferBackedACL(toArray(entities), toArray(rights)); + checkACL(entities, rights, acl1); + + // copy ACL + BufferBackedACL acl2 = new BufferBackedACL(acl1.getBuffer(), true, true); + checkACL(entities, rights, acl2); + + acl2.deleteEntry("someone"); + entities.remove(1); + rights.remove(1); + checkACL(entities, rights, acl2); + + acl2.editEntry("them", 433); + entities.add("them"); + rights.add(433); + checkACL(entities, rights, acl2); + + acl2.editEntry("me", 111); + rights.remove(0); + rights.add(0, 111); + checkACL(entities, rights, acl2); + + // test iterator + Iterator it = acl2.iterator(); + while (it.hasNext()) + it.next(); + + acl1.destroy(); + acl2.destroy(); + } + + { + final List entities = generateStrList("this"); + final List rights = generateIntList(Integer.MAX_VALUE); + + // create ACL + BufferBackedACL acl1 = new BufferBackedACL(toArray(entities), toArray(rights)); + checkACL(entities, rights, acl1); + + // copy ACL + BufferBackedACL acl2 = new BufferBackedACL(acl1.getBuffer(), true, true); + checkACL(entities, rights, acl2); + + acl2.deleteEntry("this"); + entities.remove(0); + rights.remove(0); + checkACL(entities, rights, acl2); + + acl1.destroy(); + acl2.destroy(); + } + + { + final List entities = generateStrList(); + final List rights = generateIntList(); + + // create ACL + BufferBackedACL acl1 = new BufferBackedACL(toArray(entities), toArray(rights)); + checkACL(entities, rights, acl1); + + // copy ACL + BufferBackedACL acl2 = new BufferBackedACL(acl1.getBuffer(), true, true); + checkACL(entities, rights, acl2); + + acl2.editEntry("blubberbla", Integer.MAX_VALUE); + acl2.editEntry("blubberbla", Integer.MAX_VALUE); + acl2.editEntry("blubberbla", Integer.MAX_VALUE); + entities.add("blubberbla"); + rights.add(Integer.MAX_VALUE); + checkACL(entities, rights, acl2); + + acl1.destroy(); + acl2.destroy(); + } + } + + public void testBufferBackedStripingPolicy() throws Exception { + + { + String pattern = "RAID0"; + int stripeSize = 256; + int width = 5; + + // create striping policy + BufferBackedStripingPolicy sp1 = new BufferBackedStripingPolicy(pattern, stripeSize, + width); + checkSP(pattern, stripeSize, width, sp1); + + // copy striping policy + BufferBackedStripingPolicy sp2 = new BufferBackedStripingPolicy(sp1.getBuffer(), true, + true); + checkSP(pattern, stripeSize, width, sp2); + + pattern = "AAAAAAAA"; + stripeSize = 432; + width = 43333; + sp2.setPattern(pattern); + sp2.setStripeSize(stripeSize); + sp2.setWidth(width); + checkSP(pattern, stripeSize, width, sp2); + + sp1.destroy(); + sp2.destroy(); + } + + { + final String pattern = "RAID0"; + final int stripeSize = 16; + final int width = 1; + + // create striping policy + BufferBackedStripingPolicy sp1 = new BufferBackedStripingPolicy(pattern, stripeSize, + width); + checkSP(pattern, stripeSize, width, sp1); + + // copy striping policy + BufferBackedStripingPolicy sp2 = new BufferBackedStripingPolicy(sp1.getBuffer(), true, + true); + checkSP(pattern, stripeSize, width, sp2); + + sp1.destroy(); + sp2.destroy(); + } + } + + public void testBufferBackedXAttrs() throws Exception { + + { + final List keys = generateStrList("someAttr", "anotherAttr", "attr"); + final List values = generateStrList("someValue", "anotherValue", "attrValue"); + final List uids = generateStrList("myUID", "me", ""); + + // create XAttrs + BufferBackedXAttrs xattrs1 = new BufferBackedXAttrs(toArray(keys), toArray(values), + toArray(uids)); + checkXAttrs(keys, values, uids, xattrs1); + + // copy XAttrs + BufferBackedXAttrs xattrs2 = new BufferBackedXAttrs(xattrs1.getBuffer(), true, true); + checkXAttrs(keys, values, uids, xattrs2); + + xattrs1.destroy(); + xattrs2.destroy(); + } + { + final List keys = generateStrList("k1", "k2", "k3", "k4", "k5", "k6", "k7", + "k8"); + final List values = generateStrList("v1", "v2", "v3", "v4", "v5", "v6", "v7", + "v8"); + final List uids = generateStrList("me1", "me2", "me3", "me4", "me5", "me6", + "me7", "me8"); + + // create XAttrs + BufferBackedXAttrs xattrs1 = new BufferBackedXAttrs(toArray(keys), toArray(values), + toArray(uids)); + checkXAttrs(keys, values, uids, xattrs1); + + // clone XAttrs + BufferBackedXAttrs xattrs2 = new BufferBackedXAttrs(xattrs1.getBuffer(), true, true); + checkXAttrs(keys, values, uids, xattrs2); + + // delete entry + xattrs2.deleteEntry("k4", "me4"); + keys.remove(3); + values.remove(3); + uids.remove(3); + checkXAttrs(keys, values, uids, xattrs2); + + // add new entry + xattrs2.editEntry("new", "val", "someone"); + keys.add("new"); + values.add("val"); + uids.add("someone"); + checkXAttrs(keys, values, uids, xattrs2); + + // add trailing entry w/ existing key and different uid + xattrs2.editEntry("k7", "bla", "me4"); + keys.add("k7"); + values.add("bla"); + uids.add("me4"); + checkXAttrs(keys, values, uids, xattrs2); + + // replace trailing entry + xattrs2.editEntry("k7", "blub", "me4"); + values.remove("bla"); + values.add("blub"); + checkXAttrs(keys, values, uids, xattrs2); + + // replace inner entry + xattrs2.editEntry("k5", "8282828", "me5"); + values.remove(3); + values.add(3, "8282828"); + checkXAttrs(keys, values, uids, xattrs2); + + // test iterator + Iterator it = xattrs2.iterator(); + while (it.hasNext()) + it.next(); + + xattrs1.destroy(); + xattrs2.destroy(); + } + + } + + public void testBufferBackedXLoc() throws Exception { + + { + final String[] osds = { "someOSD", "anotherOSD", "myOSD" }; + final BufferBackedStripingPolicy sp = new BufferBackedStripingPolicy("RAID0", 1024, 4); + + // create XLoc + BufferBackedXLoc xloc1 = new BufferBackedXLoc(sp, osds); + checkXLoc(osds, sp, xloc1); + + // copy XLoc + BufferBackedXLoc xloc2 = new BufferBackedXLoc(xloc1.getBuffer(), true, true); + checkXLoc(osds, sp, xloc2); + + xloc1.destroy(); + xloc2.destroy(); + sp.destroy(); + } + + } + + public void testBufferBackedXLocList() throws Exception { + + { + final List sp = generateSPList( + new BufferBackedStripingPolicy("RAID0", 5, 1), new BufferBackedStripingPolicy( + "RAID5", 99, 33), new BufferBackedStripingPolicy("asfd", 34, -1)); + + final List replicas = generateXLocList(new BufferBackedXLoc( + sp.get(0), new String[] { "dasfk", "asfd", "afastfads4" }), new BufferBackedXLoc(sp + .get(1), new String[] { "fdsay", "34", "4" }), new BufferBackedXLoc(sp.get(2), + new String[] { "354", ",mn", "asdf" })); + int version = 37; + + final BufferBackedXLoc newRepl = new BufferBackedXLoc(sp.get(1), new String[] { + "324432", "kkakslfdllslfldslfd", "4554" }); + + // create XLocList + BufferBackedXLocList xlocList1 = new BufferBackedXLocList(toArray(replicas), version); + checkXLocList(replicas, version, xlocList1); + + // copy XLocList + BufferBackedXLocList xlocList2 = new BufferBackedXLocList(xlocList1.getBuffer(), true, + true); + checkXLocList(replicas, version, xlocList2); + + // add a replica + xlocList2.addReplica(newRepl, true); + replicas.add(newRepl); + checkXLocList(replicas, ++version, xlocList2); + + // delete a replica + xlocList2.removeReplica(2, false); + replicas.remove(2).destroy(); + checkXLocList(replicas, version, xlocList2); + + // delete last replica + int last = xlocList2.getReplicaCount() - 1; + xlocList2.removeReplica(last, false); + replicas.remove(last).destroy(); + checkXLocList(replicas, version, xlocList2); + + // test iterator + Iterator it = xlocList2.iterator(); + while (it.hasNext()) + it.next(); + + xlocList1.destroy(); + xlocList2.destroy(); + + for (BufferBackedStripingPolicy spol : sp) + spol.destroy(); + + for (BufferBackedXLoc xloc : replicas) + xloc.destroy(); + } + + } + + public void testBufferBackedDirObject() throws Exception { + + { + final long id = 99999; + final int atime = 999; + final int ctime = 888; + final int mtime = 777; + final String owner = "someone"; + final String group = "somegroup"; + final String linkTarget = "linkTarget"; + final BufferBackedACL acl = null; + final BufferBackedStripingPolicy defaultSP = null; + final BufferBackedXAttrs xattrs = null; + + // create dir object + BufferBackedDirObject dirObj1 = new BufferBackedDirObject(id, atime, ctime, mtime, + owner, group, acl, defaultSP, linkTarget, xattrs); + checkDirObject(id, atime, ctime, mtime, owner, group, linkTarget, acl, defaultSP, + xattrs, dirObj1); + + // copy dir object + BufferBackedDirObject dirObj2 = new BufferBackedDirObject(dirObj1.getBuffer(), true, + true); + checkDirObject(id, atime, ctime, mtime, owner, group, linkTarget, acl, defaultSP, + xattrs, dirObj2); + + dirObj1.destroy(); + dirObj2.destroy(); + } + + { + long id = 34223; + int atime = 12; + int ctime = 11; + int mtime = 0; + String owner = "me"; + String group = "mygroup"; + String linkTarget = null; + final BufferBackedACL acl = new BufferBackedACL(new String[] { "me" }, new int[] { 23 }); + final BufferBackedACL acl2 = new BufferBackedACL(new String[] { "someone", + "someoneelse" }, new int[] { 77, 32 }); + final BufferBackedStripingPolicy defaultSP = new BufferBackedStripingPolicy("RAID0", + 32, 99999); + final BufferBackedStripingPolicy defaultSP2 = new BufferBackedStripingPolicy("344334", + 298, 12); + final BufferBackedXAttrs xattrs = new BufferBackedXAttrs(new String[] { "attr1", + "attr2" }, new String[] { "value1", "" }, new String[] { "me", "" }); + final BufferBackedXAttrs xattrs2 = new BufferBackedXAttrs(new String[] { "gds", }, + new String[] { "rwe432" }, new String[] { "us" }); + + // create dir object + BufferBackedDirObject dirObj1 = new BufferBackedDirObject(id, atime, ctime, mtime, + owner, group, acl, defaultSP, linkTarget, xattrs); + checkDirObject(id, atime, ctime, mtime, owner, group, linkTarget, acl, defaultSP, + xattrs, dirObj1); + + // copy dir object + BufferBackedDirObject dirObj2 = new BufferBackedDirObject(dirObj1.getBuffer(), true, + true); + checkDirObject(id, atime, ctime, mtime, owner, group, linkTarget, acl, defaultSP, + xattrs, dirObj2); + + BufferBackedDirObject dirObj3 = new BufferBackedDirObject(dirObj2.getId(), dirObj2 + .getAtime(), dirObj2.getCtime(), dirObj2.getMtime(), dirObj2.getOwnerId() + .toString(), dirObj2.getOwningGroupId().toString(), dirObj2.getAcl(), dirObj2 + .getStripingPolicy(), null, dirObj2.getXAttrs()); + checkDirObject(id, atime, ctime, mtime, owner, group, null, acl, defaultSP, xattrs, + dirObj3); + + id = 77; + atime = 111; + ctime = 111; + mtime = 111; + owner = "blub"; + group = "blubberbla"; + linkTarget = "somewhere"; + + dirObj3.setId(id); + dirObj3.setAtime(atime); + dirObj3.setCtime(ctime); + dirObj3.setMtime(mtime); + dirObj3.setOwnerId(owner); + dirObj3.setOwningGroupId(group); + dirObj3.setLinkTarget(linkTarget); + dirObj3.setACL(acl2); + dirObj3.setStripingPolicy(defaultSP2); + dirObj3.setXAttrs(xattrs2); + checkDirObject(id, atime, ctime, mtime, owner, group, linkTarget, acl2, defaultSP2, + xattrs2, dirObj3); + + dirObj1.destroy(); + dirObj2.destroy(); + dirObj3.destroy(); + + xattrs.destroy(); + xattrs2.destroy(); + defaultSP.destroy(); + defaultSP2.destroy(); + acl.destroy(); + acl2.destroy(); + } + } + + public void testBufferBackedFileObject() throws Exception { + + { + final long id = 122111; + final int atime = 43; + final int ctime = Integer.MAX_VALUE; + final int mtime = 0; + final long size = 3298438; + final short linkcount = 3; + final int epoch = 4; + final int issuedEpoch = 5; + final boolean readonly = false; + final String owner = "vyxcvcxy"; + final String group = "afdsafdsafds"; + final String linkTarget = "linkTarget"; + final BufferBackedACL acl = null; + final BufferBackedXLocList xlocList = null; + final BufferBackedStripingPolicy sp = null; + final BufferBackedXAttrs xattrs = null; + + // create file object + BufferBackedFileObject fileObj1 = new BufferBackedFileObject(id, atime, ctime, mtime, + size, linkcount, epoch, issuedEpoch, readonly, owner, group, acl, xlocList, sp, + linkTarget, xattrs); + checkFileObject(id, atime, ctime, mtime, size, linkcount, epoch, issuedEpoch, readonly, + owner, group, linkTarget, acl, xlocList, sp, xattrs, fileObj1); + + // copy file object + BufferBackedFileObject fileObj2 = new BufferBackedFileObject(fileObj1.getBuffer(), + true, true); + checkFileObject(id, atime, ctime, mtime, size, linkcount, epoch, issuedEpoch, readonly, + owner, group, linkTarget, acl, xlocList, sp, xattrs, fileObj2); + + fileObj1.destroy(); + fileObj2.destroy(); + } + + { + long id = 43; + int atime = 421; + int ctime = 4343; + int mtime = 2; + long size = Long.MAX_VALUE; + short linkcount = 1; + int epoch = 0; + int issuedEpoch = 0; + boolean readonly = false; + String owner = "fdsa"; + String group = "54"; + String linkTarget = null; + + final BufferBackedACL acl = new BufferBackedACL(new String[] { "me" }, new int[] { 23 }); + final BufferBackedACL acl2 = new BufferBackedACL(new String[] { "342fwa" }, + new int[] { 2435 }); + final BufferBackedStripingPolicy sp = new BufferBackedStripingPolicy("RAID0", 32, 99999); + final BufferBackedStripingPolicy sp2 = new BufferBackedStripingPolicy("543gfsa", 111, + 95472); + final BufferBackedXLoc[] xloc = new BufferBackedXLoc[] { + new BufferBackedXLoc(sp, new String[] { "fasd", "fasd", + "http://faksfljdasdkjfjkads.dfd" }), + new BufferBackedXLoc(sp, new String[] { "fasd", "fasd", "http://7413121.com" }) }; + final BufferBackedXLoc[] xloc2 = new BufferBackedXLoc[0]; + final BufferBackedXLocList xlocList = new BufferBackedXLocList(xloc, 43); + final BufferBackedXLocList xlocList2 = new BufferBackedXLocList(xloc2, 1); + final BufferBackedXAttrs xattrs = new BufferBackedXAttrs(new String[] { "attr1", + "attr2" }, new String[] { "value1", "" }, new String[] { "me", "" }); + final BufferBackedXAttrs xattrs2 = new BufferBackedXAttrs(new String[0], new String[0], + new String[0]); + + // create file object + BufferBackedFileObject fileObj1 = new BufferBackedFileObject(id, atime, ctime, mtime, + size, linkcount, epoch, issuedEpoch, readonly, owner, group, acl, xlocList, sp, + linkTarget, xattrs); + checkFileObject(id, atime, ctime, mtime, size, linkcount, epoch, issuedEpoch, readonly, + owner, group, linkTarget, acl, xlocList, sp, xattrs, fileObj1); + + // copy file object + BufferBackedFileObject fileObj2 = new BufferBackedFileObject(fileObj1.getBuffer(), + true, true); + checkFileObject(id, atime, ctime, mtime, size, linkcount, epoch, issuedEpoch, readonly, + owner, group, linkTarget, acl, xlocList, sp, xattrs, fileObj2); + + BufferBackedFileObject fileObj3 = new BufferBackedFileObject(fileObj2.getId(), fileObj2 + .getAtime(), fileObj2.getCtime(), fileObj2.getMtime(), fileObj2.getSize(), + fileObj2.getLinkCount(), fileObj2.getEpoch(), fileObj2.getIssuedEpoch(), fileObj2 + .isReadOnly(), fileObj2.getOwnerId().toString(), fileObj2 + .getOwningGroupId().toString(), fileObj2.getAcl(), fileObj2.getXLocList(), + fileObj2.getStripingPolicy(), null, fileObj2.getXAttrs()); + checkFileObject(id, atime, ctime, mtime, size, linkcount, epoch, issuedEpoch, readonly, + owner, group, linkTarget, acl, xlocList, sp, xattrs, fileObj3); + + id = 8488484; + atime = 422342343; + ctime = 452156; + mtime = 44; + size = 73299; + linkcount = 32; + epoch = 5; + issuedEpoch = 5; + readonly = true; + owner = "fasdfsad"; + group = "gdgfd"; + linkTarget = "43eagaasfdfdasdfg"; + + fileObj3.setId(id); + fileObj3.setAtime(atime); + fileObj3.setCtime(ctime); + fileObj3.setMtime(mtime); + fileObj3.setSize(size); + fileObj3.setLinkCount(linkcount); + fileObj3.setEpoch(epoch); + fileObj3.setIssuedEpoch(issuedEpoch); + fileObj3.setReadOnly(readonly); + fileObj3.setOwnerId(owner); + fileObj3.setOwningGroupId(group); + fileObj3.setLinkTarget(linkTarget); + fileObj3.setACL(acl2); + fileObj3.setStripingPolicy(sp2); + fileObj3.setXAttrs(xattrs2); + fileObj3.setXLocList(xlocList2); + checkFileObject(id, atime, ctime, mtime, size, linkcount, epoch, issuedEpoch, readonly, + owner, group, linkTarget, acl2, xlocList2, sp2, xattrs2, fileObj3); + + fileObj1.destroy(); + fileObj2.destroy(); + fileObj3.destroy(); + + xattrs.destroy(); + xattrs2.destroy(); + sp.destroy(); + sp2.destroy(); + acl.destroy(); + acl2.destroy(); + for (XLoc loc : xloc) + loc.destroy(); + xlocList.destroy(); + for (XLoc loc : xloc2) + loc.destroy(); + xlocList2.destroy(); + } + } + + private void checkACL(List entities, List rights, ACL acl) { + + ACL.Entry[] entries = new ACL.Entry[entities.size()]; + for (int i = 0; i < entities.size(); i++) + entries[i] = new BufferBackedACL.Entry(entities.get(i), rights.get(i)); + + assertEquals(entities.size(), acl.getEntryCount()); + for (ACL.Entry entry : entries) + assertEquals(entry.getRights(), acl.getRights(entry.getEntity()).intValue()); + } + + private void checkSP(String pattern, int stripeSize, int width, BufferBackedStripingPolicy sp) { + assertEquals(pattern, sp.getPattern().toString()); + assertEquals(width, sp.getWidth()); + assertEquals(stripeSize, sp.getStripeSize()); + } + + private void checkXAttrs(List keys, List values, List uids, + BufferBackedXAttrs xattrs) { + + XAttrs.Entry[] entries = new XAttrs.Entry[keys.size()]; + for (int i = 0; i < keys.size(); i++) + entries[i] = new BufferBackedXAttrs.Entry(keys.get(i), uids.get(i), values.get(i)); + + assertEquals(entries.length, xattrs.getEntryCount()); + for (XAttrs.Entry entry : entries) + assertEquals(entry.getValue(), xattrs.getValue(entry.getKey(), entry.getUID()) + .toString()); + } + + private void checkXLoc(String[] osds, StripingPolicy sp, BufferBackedXLoc xloc) { + + final StripingPolicy xlocSP = xloc.getStripingPolicy(); + + assertEquals(sp.toString(), xlocSP.toString()); + assertEquals(sp.getPattern(), xlocSP.getPattern()); + assertEquals(sp.getWidth(), xlocSP.getWidth()); + assertEquals(sp.getStripeSize(), xlocSP.getStripeSize()); + + assertEquals(osds.length, xloc.getOSDCount()); + for (int i = 0; i < osds.length; i++) + assertEquals(osds[i], xloc.getOSD(i).toString()); + } + + private void checkXLocList(List replicas, int version, + BufferBackedXLocList xlocList) { + + assertEquals(version, xlocList.getVersion()); + assertEquals(replicas.size(), xlocList.getReplicaCount()); + + for (int i = 0; i < replicas.size(); i++) + assertEquals(replicas.get(i).toString(), xlocList.getReplica(i).toString()); + } + + private void checkDirObject(long id, int atime, int ctime, int mtime, String owner, + String group, String linkTarget, ACL acl, BufferBackedStripingPolicy defaultSP, + BufferBackedXAttrs xattrs, FSObject obj) { + + assertEquals(id, obj.getId()); + assertEquals(atime, obj.getAtime()); + assertEquals(ctime, obj.getCtime()); + assertEquals(mtime, obj.getMtime()); + assertEquals(owner, obj.getOwnerId().toString()); + assertEquals(group, obj.getOwningGroupId().toString()); + + if (linkTarget == null) + assertNull(obj.getLinkTarget()); + else + assertEquals(linkTarget, obj.getLinkTarget().toString()); + + // check the ACL + ACL objACL = obj.getAcl(); + if (acl == null) + assertNull(objACL); + else + assertEquals(acl.toString(), objACL.toString()); + + // check the default striping policy + StripingPolicy objSP = obj.getStripingPolicy(); + if (defaultSP == null) + assertNull(objSP); + else + assertEquals(defaultSP.toString(), objSP.toString()); + + // check the XAttrs list + XAttrs objxattrs = obj.getXAttrs(); + if (xattrs == null) + assertNull(objxattrs); + else + assertEquals(xattrs.toString(), objxattrs.toString()); + + } + + private void checkFileObject(long id, int atime, int ctime, int mtime, long size, + short linkcount, int epoch, int issuedEpoch, boolean readonly, String owner, String group, + String linkTarget, ACL acl, XLocList xlocList, BufferBackedStripingPolicy sp, + BufferBackedXAttrs xattrs, FileObject obj) { + + assertEquals(id, obj.getId()); + assertEquals(atime, obj.getAtime()); + assertEquals(ctime, obj.getCtime()); + assertEquals(mtime, obj.getMtime()); + assertEquals(size, obj.getSize()); + assertEquals(linkcount, obj.getLinkCount()); + assertEquals(epoch, obj.getEpoch()); + assertEquals(issuedEpoch, obj.getIssuedEpoch()); + assertEquals(readonly, obj.isReadOnly()); + assertEquals(owner, obj.getOwnerId().toString()); + assertEquals(group, obj.getOwningGroupId().toString()); + + if (linkTarget == null) + assertNull(obj.getLinkTarget()); + else + assertEquals(linkTarget, obj.getLinkTarget().toString()); + + // check the ACL + ACL objACL = obj.getAcl(); + if (acl == null) + assertNull(objACL); + else + assertEquals(acl.toString(), objACL.toString()); + + // check the XLocList + XLocList xlocObj = obj.getXLocList(); + if (xlocObj == null) + assertNull(xlocObj); + else + assertEquals(xlocList.toString(), xlocObj.toString()); + + // check the striping policy + StripingPolicy objSP = obj.getStripingPolicy(); + if (sp == null) + assertNull(objSP); + else + assertEquals(sp.toString(), objSP.toString()); + + // check the XAttrs list + XAttrs objxattrs = obj.getXAttrs(); + if (xattrs == null) + assertNull(objxattrs); + else + assertEquals(xattrs.toString(), objxattrs.toString()); + + } + + public static void main(String[] args) { + TestRunner.run(BufferBackedMetadataTest.class); + } + + private List generateStrList(String... arr) { + List list = new ArrayList(arr.length); + for (String s : arr) + list.add(s); + + return list; + } + + private List generateIntList(int... arr) { + List list = new ArrayList(arr.length); + for (int s : arr) + list.add(s); + + return list; + } + + public List generateXLocList(BufferBackedXLoc... arr) { + List list = new ArrayList(arr.length); + for (BufferBackedXLoc x : arr) + list.add(x); + + return list; + } + + public List generateSPList(BufferBackedStripingPolicy... arr) { + List list = new ArrayList( + arr.length); + for (BufferBackedStripingPolicy s : arr) + list.add(s); + + return list; + } + + private String[] toArray(List list) { + return list.toArray(new String[list.size()]); + } + + private BufferBackedXLoc[] toArray(List list) { + return list.toArray(new BufferBackedXLoc[list.size()]); + } + + private int[] toArray(List list) { + int[] ints = new int[list.size()]; + for (int i = 0; i < ints.length; i++) + ints[i] = list.get(i); + + return ints; + } + +} diff --git a/servers/test/org/xtreemfs/test/mrc/DiskLoggerTest.java b/servers/test/org/xtreemfs/test/mrc/DiskLoggerTest.java new file mode 100644 index 0000000000000000000000000000000000000000..37a14a952e4b8a32afc2e5c557d6133ec1c5f247 --- /dev/null +++ b/servers/test/org/xtreemfs/test/mrc/DiskLoggerTest.java @@ -0,0 +1,128 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.test.mrc; + +import junit.framework.*; +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.mrc.brain.storage.DiskLogger; +import org.xtreemfs.mrc.brain.storage.LogEntry; +import org.xtreemfs.mrc.brain.storage.SliceID; +import org.xtreemfs.mrc.brain.storage.SyncListener; +import org.xtreemfs.test.SetupUtils; + +/** + * + * @author bjko + */ +public class DiskLoggerTest extends TestCase { + + public static boolean success; + + private DiskLogger dl; + + public DiskLoggerTest(String testName) { + super(testName); + Logging.start(SetupUtils.DEBUG_LEVEL); + } + + protected void setUp() throws Exception { + System.out.println("TEST: " + getClass().getSimpleName() + "." + getName()); + + dl = new DiskLogger("/tmp/testlog.mrc",false); + dl.start(); + } + + protected void tearDown() throws Exception { + dl.shutdown(); + } + + public void testSliceID() throws Exception { + + SliceID si1 = new SliceID(1); + SliceID si2 = new SliceID(si1,1); + + assertEquals(si1.toString().length(),SliceID.SIZE_IN_BYTES*2); + + assertEquals(si1,si2); + + String srs1 = si1.toString(); + SliceID si3 = new SliceID(srs1); + + assertEquals(si1,si3); + + ReusableBuffer buf = BufferPool.allocate(SliceID.SIZE_IN_BYTES); + si1.write(buf); + buf.position(0); + si3 = new SliceID(buf); + assertEquals(si1,si3); + BufferPool.free(buf); + } + + public void testMarshalling() throws Exception { + + LogEntry e = new LogEntry(0xFF11, 0xFFAA7700, new SliceID(1), + (byte) 1, "test", "", "", + ReusableBuffer.wrap(new byte[]{1,3,2}), null); + + ReusableBuffer me = e.marshall(); + + LogEntry cmp = new LogEntry(me); + + assertTrue(e.equals(cmp)); + cmp.payload.position(0); + assertTrue(cmp.payload.hasRemaining()); + assertEquals(cmp.payload.get(),(byte)1); + assertEquals(cmp.payload.get(),(byte)3); + assertEquals(cmp.payload.get(),(byte)2); + + BufferPool.free(me); + } + + public void testLog() throws Exception { + + LogEntry e = new LogEntry(0xFF11, 0xFFAA7700, new SliceID(1), + (byte) 1, "test", "", "", + ReusableBuffer.wrap(new byte[]{1,3,2}), null); + + success = false; + e.registerListener(new SyncListener() { + public void failed(LogEntry entry, Exception ex) { + fail("Sync failed:"+ex); + } + public void synced(LogEntry entry) { + success = true; + } + } + ); + dl.append(e); + synchronized (this) { + this.wait(500); + } + assertTrue(success); + } + +} diff --git a/servers/test/org/xtreemfs/test/mrc/LogReplayTest.java b/servers/test/org/xtreemfs/test/mrc/LogReplayTest.java new file mode 100644 index 0000000000000000000000000000000000000000..d0c3d7713359ae3d0f8930b972f0081caa28dea7 --- /dev/null +++ b/servers/test/org/xtreemfs/test/mrc/LogReplayTest.java @@ -0,0 +1,155 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.test.mrc; + +import java.io.File; +import java.net.InetSocketAddress; +import java.util.List; +import java.util.Map; + +import junit.framework.TestCase; +import junit.textui.TestRunner; + +import org.xtreemfs.common.auth.NullAuthProvider; +import org.xtreemfs.common.clients.mrc.MRCClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.util.FSUtils; +import org.xtreemfs.dir.DIRConfig; +import org.xtreemfs.mrc.MRCConfig; +import org.xtreemfs.mrc.RequestController; +import org.xtreemfs.osd.OSD; +import org.xtreemfs.osd.OSDConfig; +import org.xtreemfs.test.SetupUtils; + +/** + * + * @author bjko + */ +public class LogReplayTest extends TestCase { + + private static final boolean DEBUG = false; + + private static final String TEST_DIR = "/tmp/xtreemfs-test"; + + private RequestController mrc1; + + private org.xtreemfs.dir.RequestController dirService; + + private MRCClient client; + + private MRCConfig mrcCfg1; + + private DIRConfig dsCfg; + + private OSDConfig osdConfig; + + private OSD osd; + + private InetSocketAddress mrc1Address; + + protected void setUp() throws Exception { + + System.out.println("TEST: " + getClass().getSimpleName() + "." + + getName()); + + dsCfg = SetupUtils.createDIRConfig(); + + mrcCfg1 = SetupUtils.createMRC1Config(); + mrc1Address = SetupUtils.getMRC1Addr(); + + osdConfig = SetupUtils.createOSD1Config(); + + // cleanup + File testDir = new File(TEST_DIR); + + FSUtils.delTree(testDir); + testDir.mkdirs(); + + // start services + dirService = new org.xtreemfs.dir.RequestController(dsCfg); + dirService.startup(); + + osd = new OSD(osdConfig); + + mrc1 = new RequestController(mrcCfg1); + mrc1.startup(); + + client = SetupUtils.createMRCClient(10000); + } + + public LogReplayTest(String testName) { + super(testName); + Logging.start(SetupUtils.DEBUG_LEVEL); + } + + protected void tearDown() throws Exception { + client.shutdown(); + mrc1.shutdown(); + dirService.shutdown(); + osd.shutdown(); + + client.waitForShutdown(); + } + + public void testReplay() throws Exception { + + final String authString = NullAuthProvider.createAuthString("someUser", MRCClient.generateStringList("someGroup")); + + // create a volume, directory and file + client.createVolume(mrc1Address, "testVolumeREPL", authString); + client.createDir(mrc1Address, "testVolumeREPL/bla", authString); + client.createFile(mrc1Address, "testVolumeREPL/bla/yabba", authString); + + // open the file and update its file size + Map xcap = client.open(mrc1Address, + "testVolumeREPL/bla/yabba", "r", authString); + client.updateFileSize(mrc1Address, xcap.get("X-Capability"), + "[1024,1]", authString); + mrc1.dropDead(); + + synchronized (this) { + try { + this.wait(2000); + } catch (InterruptedException ex) { + ex.printStackTrace(); + } + } + + mrc1 = new RequestController(mrcCfg1); + mrc1.startup(); + + List dir = client.readDir(mrc1Address, "testVolumeREPL/bla", + authString); + assertTrue(dir.size() == 1); + assertTrue(dir.get(0).equals("yabba")); + Map statInfo = client.stat(mrc1Address, "testVolumeREPL/bla/yabba", false, false, false, authString); + assertEquals(1024L, statInfo.get("size")); + } + + public static void main(String[] args) { + TestRunner.run(LogReplayTest.class); + } + +} diff --git a/servers/test/org/xtreemfs/test/mrc/MRCTest.java b/servers/test/org/xtreemfs/test/mrc/MRCTest.java new file mode 100644 index 0000000000000000000000000000000000000000..43598744c18f24d1a4765abf1394ba33a610e35e --- /dev/null +++ b/servers/test/org/xtreemfs/test/mrc/MRCTest.java @@ -0,0 +1,1250 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.test.mrc; + +import java.io.File; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import junit.framework.TestCase; +import junit.textui.TestRunner; + +import org.xtreemfs.common.auth.NullAuthProvider; +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.clients.RPCClient; +import org.xtreemfs.common.clients.mrc.MRCClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.util.FSUtils; +import org.xtreemfs.dir.DIRConfig; +import org.xtreemfs.foundation.json.JSONParser; +import org.xtreemfs.foundation.json.JSONString; +import org.xtreemfs.foundation.pinky.HTTPHeaders; +import org.xtreemfs.mrc.MRCConfig; +import org.xtreemfs.mrc.RequestController; +import org.xtreemfs.mrc.ac.POSIXFileAccessPolicy; +import org.xtreemfs.mrc.ac.VolumeACLFileAccessPolicy; +import org.xtreemfs.mrc.ac.YesToAnyoneFileAccessPolicy; +import org.xtreemfs.mrc.osdselection.RandomSelectionPolicy; +import org.xtreemfs.mrc.slices.DefaultPartitioningPolicy; +import org.xtreemfs.osd.OSD; +import org.xtreemfs.osd.OSDConfig; +import org.xtreemfs.test.SetupUtils; + +/** + * XtreemFS integration test case. + * + * @author stender + */ +public class MRCTest extends TestCase { + + private RequestController mrc1; + + private RequestController mrc2; + + private org.xtreemfs.dir.RequestController dirService; + + private MRCClient client; + + private MRCConfig mrcCfg1; + + private MRCConfig mrcCfg2; + + private OSDConfig osdConfig; + + private DIRConfig dsCfg; + + private OSD osd; + + private InetSocketAddress mrc1Address; + + private InetSocketAddress mrc2Address; + + public MRCTest() { + Logging.start(SetupUtils.DEBUG_LEVEL); + } + + protected void setUp() throws Exception { + + System.out.println("TEST: " + getClass().getSimpleName() + "." + getName()); + + dsCfg = SetupUtils.createDIRConfig(); + + mrcCfg1 = SetupUtils.createMRC1Config(); + mrc1Address = SetupUtils.getMRC1Addr(); + + mrcCfg2 = SetupUtils.createMRC2Config(); + mrc2Address = SetupUtils.getMRC2Addr(); + + osdConfig = SetupUtils.createOSD1Config(); + + // cleanup + File testDir = new File(SetupUtils.TEST_DIR); + + FSUtils.delTree(testDir); + testDir.mkdirs(); + + // start the Directory Service + dirService = new org.xtreemfs.dir.RequestController(dsCfg); + dirService.startup(); + + // start the OSD + osd = new OSD(osdConfig); + + // start two MRCs + mrc1 = new RequestController(mrcCfg1); + mrc1.startup(); + mrc2 = new RequestController(mrcCfg2); + mrc2.startup(); + + client = SetupUtils.createMRCClient(10000); + } + + protected void tearDown() throws Exception { + + // shut down all services + mrc1.shutdown(); + mrc2.shutdown(); + client.shutdown(); + osd.shutdown(); + dirService.shutdown(); + + client.waitForShutdown(); + + Logging.logMessage(Logging.LEVEL_DEBUG, this, BufferPool.getStatus()); + + } + + public void testCreateDelete() throws Exception { + + final String authString = NullAuthProvider.createAuthString("userXY", MRCClient + .generateStringList("groupZ")); + final String rootAuthString = NullAuthProvider.createAuthString("root", MRCClient + .generateStringList("root")); + final String volumeName = "testVolume"; + + // create and delete a volume + client.createVolume(mrc1Address, volumeName, authString); + Map localVols = client.getLocalVolumes(mrc1Address, authString); + assertEquals(1, localVols.size()); + assertEquals(volumeName, localVols.values().iterator().next()); + client.deleteVolume(mrc1Address, volumeName, authString); + localVols = client.getLocalVolumes(mrc1Address, authString); + assertEquals(0, localVols.size()); + + // create a volume (no access control) + client.createVolume(mrc1Address, volumeName, authString); + + // create some files and directories + client.createDir(mrc1Address, volumeName + "/myDir", authString); + client.createDir(mrc1Address, volumeName + "/anotherDir", authString); + + for (int i = 0; i < 10; i++) + client.createFile(mrc1Address, volumeName + "/myDir/test" + i + ".txt", authString); + + try { + client.createFile(mrc1Address, volumeName, authString); + fail("missing filename"); + } catch (Exception exc) { + } + + try { + client.createFile(mrc1Address, volumeName + "/myDir/test0.txt", authString); + fail("duplicate file creation"); + } catch (Exception exc) { + } + + try { + client.createFile(mrc1Address, volumeName + "/myDir/test0.txt/bla.txt", authString); + fail("file in file creation"); + } catch (Exception exc) { + } + + // test 'readDir' and 'stat' + + List list = client.readDir(mrc1Address, volumeName, authString); + assertEquals(list.size(), 2); + list = client.readDir(mrc1Address, volumeName + "/myDir", authString); + assertEquals(list.size(), 10); + + Map statInfo = client.stat(mrc1Address, volumeName + "/myDir/test2.txt", + true, true, true, authString); + assertNotNull(statInfo.get("fileId")); + assertEquals(statInfo.get("ownerId"), "userXY"); + assertEquals(statInfo.get("objType").toString(), "1"); + assertEquals(statInfo.get("size").toString(), "0"); + assertTrue(((Long) statInfo.get("ctime")) > 0); + assertEquals(statInfo.get("posixAccessMode").toString(), "511"); + assertEquals(statInfo.get("linkTarget"), null); + + // test 'delete' + + client.delete(mrc1Address, volumeName + "/myDir/test3.txt", authString); + client.delete(mrc1Address, volumeName + "/anotherDir", authString); + + // test 'init' + client.initFileSystem(mrc1Address, rootAuthString); + client.createVolume(mrc1Address, volumeName, authString); + assertEquals(client.readDir(mrc1Address, volumeName, authString).size(), 0); + Map capability = client.createFile(mrc1Address, volumeName + "/test.txt", + null, null, 511L, true, authString); + assertNotNull(capability.get("X-Locations")); + assertNotNull(capability.get("X-Capability")); + } + + public void testUserAttributes() throws Exception { + + final String authString = NullAuthProvider.createAuthString("userXY", MRCClient + .generateStringList("groupZ")); + final String volumeName = "testVolume"; + final long accessMode = 511; // rwxrwxrwx + + client.createVolume(mrc1Address, volumeName, authString); + + // add and delete some user attributes to files + + Map attrs = new HashMap(); + attrs.put("key1", "quark"); + attrs.put("key2", "quatsch"); + client.createFile(mrc1Address, volumeName + "/test.txt", attrs, null, accessMode, + authString); + + Map attrs2 = new HashMap(); + attrs2.put("myAttr", "171"); + attrs2.put("key1", "blub"); + client.setXAttrs(mrc1Address, volumeName + "/test.txt", attrs2, authString); + + Map attrs3 = (Map) client.stat(mrc1Address, + volumeName + "/test.txt", false, true, false, authString).get("xAttrs"); + assertEquals("171", attrs3.get("myAttr")); + + String val = client.getXAttr(mrc1Address, volumeName + "/test.txt", "key1", authString); + assertEquals("blub", val); + + client.createFile(mrc1Address, volumeName + "/test2.txt", authString); + client.setXAttrs(mrc1Address, volumeName + "/test2.txt", attrs, authString); + + List keys = new ArrayList(1); + keys.add("key2"); + client.removeXAttrs(mrc1Address, volumeName + "/test2.txt", keys, authString); + attrs3 = (Map) client.stat(mrc1Address, volumeName + "/test2.txt", false, + true, false, authString).get("xAttrs"); + assertEquals("quark", attrs3.get("key1")); + + keys.add("key1"); + client.removeXAttrs(mrc1Address, volumeName + "/test2.txt", keys, authString); + attrs3 = (Map) client.stat(mrc1Address, volumeName + "/test2.txt", false, + true, false, authString).get("xAttrs"); + assertNull(attrs3.get("key1")); + + client.removeXAttrs(mrc1Address, volumeName + "/test.txt", new ArrayList(attrs + .keySet()), authString); + attrs3 = (Map) client.stat(mrc1Address, volumeName + "/test2.txt", false, + true, false, authString).get("xAttrs"); + assertNull(attrs3.get("key1")); + + String sysAttr = client.getXAttr(mrc1Address, volumeName + "/test.txt", + "xtreemfs.object_type", authString); + assertEquals("1", sysAttr); + } + + public void testSymlink() throws Exception { + + final String authString = NullAuthProvider.createAuthString("userXY", MRCClient + .generateStringList("groupZ")); + final String volumeName = "testVolume"; + + client.createVolume(mrc1Address, volumeName, authString); + client.createFile(mrc1Address, volumeName + "/test.txt", authString); + + // create and test a symbolic link + + client.createSymbolicLink(mrc1Address, volumeName + "/testAlias.txt", volumeName + + "/test.txt", authString); + Map statInfo = client.stat(mrc1Address, volumeName + "/testAlias.txt", + false, false, false, authString); + assertEquals(statInfo.get("objType"), 3L); + assertEquals(statInfo.get("linkTarget"), volumeName + "/test.txt"); + } + + public void testHardLink() throws Exception { + + final String authString = NullAuthProvider.createAuthString("userXY", MRCClient + .generateStringList("groupZ")); + final String volumeName = "testVolume"; + + client.createVolume(mrc1Address, volumeName, authString); + + // create a new file + client.createFile(mrc1Address, volumeName + "/test1.txt", authString); + + // create a new link to the file + client.createLink(mrc1Address, volumeName + "/test2.txt", volumeName + "/test1.txt", + authString); + + // check whether both links refer to the same file + Map statInfo1 = client.stat(mrc1Address, volumeName + "/test1.txt", false, + false, false, authString); + Map statInfo2 = client.stat(mrc1Address, volumeName + "/test2.txt", false, + false, false, authString); + assertEquals(statInfo1.get("fileId"), statInfo2.get("fileId")); + assertEquals(2l, statInfo1.get("linkCount")); + + // delete both files + client.delete(mrc1Address, volumeName + "/test1.txt", authString); + assertEquals(1l, client.stat(mrc1Address, volumeName + "/test2.txt", false, false, false, + authString).get("linkCount")); + client.delete(mrc1Address, volumeName + "/test2.txt", authString); + + try { + client.stat(mrc1Address, volumeName + "/test1.txt", false, false, false, authString); + fail("file should not exist anymore"); + } catch (Exception exc) { + } + + try { + client.stat(mrc1Address, volumeName + "/test2.txt", false, false, false, authString); + fail("file should not exist anymore"); + } catch (Exception exc) { + } + + // create two links to a directory + client.createDir(mrc1Address, volumeName + "/testDir1", authString); + try { + client.createLink(mrc1Address, volumeName + "/testDir1/testDir2", volumeName + + "/testDir1", authString); + fail("links to directories should not be allowed"); + } catch (Exception exc) { + } + } + + public void testReplicas() throws Exception { + + final String authString = NullAuthProvider.createAuthString("userXY", MRCClient + .generateStringList("groupZ")); + final String volumeName = "testVolume"; + final long accessMode = 511; // rwxrwxrwx + + client.createVolume(mrc1Address, volumeName, RandomSelectionPolicy.POLICY_ID, + getDefaultStripingPolicy(), POSIXFileAccessPolicy.POLICY_ID, + DefaultPartitioningPolicy.POLICY_ID, null, authString); + client.createFile(mrc1Address, volumeName + "/test.txt", authString); + Map attrs = RPCClient.generateMap("xtreemfs.read_only", true); + client.setXAttrs(mrc1Address, volumeName + "/test.txt", attrs, authString); + + // test adding and retrieval of replicas + Map statInfo = client.stat(mrc1Address, volumeName + "/test.txt", true, + false, false, authString); + String globalFileId = (String) statInfo.get("fileId"); + assertNotNull(globalFileId); + assertNull(statInfo.get("replicas")); + + List osdList = new ArrayList(); + osdList.add("177.127.77.90:7477"); + client.addReplica(mrc1Address, globalFileId, null, osdList, authString); + statInfo = client + .stat(mrc1Address, volumeName + "/test.txt", true, false, true, authString); + + assertEquals(((List) ((List) ((List) statInfo.get("replicas")) + .get(0)).get(0)).size(), 2); + assertEquals(((List) ((List) ((List) ((List) statInfo + .get("replicas")).get(0)).get(0)).get(1)).get(0), "177.127.77.90:7477"); + } + + public void testOpen() throws Exception { + + final String authString = NullAuthProvider.createAuthString("userXY", MRCClient + .generateStringList("groupZ")); + final String volumeName = "testVolume"; + + client.createVolume(mrc1Address, volumeName, RandomSelectionPolicy.POLICY_ID, + getDefaultStripingPolicy(), POSIXFileAccessPolicy.POLICY_ID, + DefaultPartitioningPolicy.POLICY_ID, null, authString); + client.createFile(mrc1Address, volumeName + "/test.txt", authString); + + // test capabilities + Map capability = client.open(mrc1Address, volumeName + "/test.txt", "c", + authString); + assertNotNull(capability.get("X-Locations")); + assertNotNull(capability.get("X-Capability")); + + capability = client.open(mrc1Address, volumeName + "/test.txt", "w", authString); + assertNotNull(capability.get("X-Capability")); + assertNotNull(capability.get("X-Locations")); + + Map statInfo = client.stat(mrc1Address, volumeName + "/test.txt", true, + false, true, authString); + Map acl = (Map) statInfo.get("acl"); + acl.put("user::", 128); // sr + client.createFile(mrc1Address, volumeName + "/test2.txt", null, getDefaultStripingPolicy(), + 0, authString); + client.setACLEntries(mrc1Address, volumeName + "/test2.txt", acl, authString); + + capability = client.open(mrc1Address, volumeName + "/test2.txt", "sr", authString); + assertNotNull(capability.get("X-Locations")); + assertNotNull(capability.get("X-Capability")); + + capability = client.open(mrc1Address, volumeName + "/test2.txt", "r", authString); + assertNull(capability.get("X-Locations")); + assertNull(capability.get("X-Capability")); + + // symlinks and directories ... + client.createDir(mrc1Address, volumeName + "/dir", authString); + client.createSymbolicLink(mrc1Address, volumeName + "/link", volumeName + "/test2.txt", + authString); + client.createSymbolicLink(mrc1Address, volumeName + "/link2", "somewhere", authString); + + try { + client.open(mrc1Address, volumeName + "/dir", "sr", authString); + fail("opened directory"); + } catch (Exception exc) { + } + + capability = client.open(mrc1Address, volumeName + "/link", "sr", authString); + assertNotNull(capability.get(HTTPHeaders.HDR_XLOCATIONS)); + + String xCapStr = (String) capability.get(HTTPHeaders.HDR_XCAPABILITY); + assertNotNull(xCapStr); + List xCap = (List) JSONParser.parseJSON(new JSONString(xCapStr)); + + // wait one second before renewing the capability + Thread.sleep(1000); + + // test renewing a capability + Map newCapability = client.renew(mrc1Address, capability, authString); + + String newXCapStr = (String) newCapability.get(HTTPHeaders.HDR_XCAPABILITY); + assertNotNull(newXCapStr); + List newXCap = (List) JSONParser.parseJSON(new JSONString(newXCapStr)); + + assertEquals(xCap.get(0), newXCap.get(0)); + assertEquals(xCap.get(1), newXCap.get(1)); + assertTrue((Long) xCap.get(2) < (Long) newXCap.get(2)); + assertEquals(xCap.get(3), newXCap.get(3)); + assertEquals(xCap.get(4), newXCap.get(4)); + assertFalse(xCap.get(5).equals(newXCap.get(5))); + + try { + capability = client.open(mrc1Address, volumeName + "/link2", "r", authString); + fail("should have been redirected"); + } catch (Exception exc) { + } + + } + + public void testLocalMove() throws Exception { + + final String authString = NullAuthProvider.createAuthString("userXY", MRCClient + .generateStringList("groupZ")); + final String volumeName = "testVolume"; + + client.createVolume(mrc1Address, volumeName, authString); + + client.createFile(mrc1Address, volumeName + "/test.txt", authString); + client.createFile(mrc1Address, volumeName + "/blub.txt", authString); + client.createDir(mrc1Address, volumeName + "/mainDir", authString); + client.createDir(mrc1Address, volumeName + "/mainDir/subDir", authString); + client.createDir(mrc1Address, volumeName + "/mainDir/subDir/newDir", authString); + + assertTree(mrc1Address, authString, volumeName + "/test.txt", volumeName + "/blub.txt", + volumeName + "/mainDir", volumeName + "/mainDir/subDir", volumeName + + "/mainDir/subDir/newDir"); + + // move some files and directories + + // file -> none (create w/ different name) + client.move(mrc1Address, volumeName + "/test.txt", volumeName + "/mainDir/bla.txt", + authString); + assertTree(mrc1Address, authString, volumeName + "/mainDir/bla.txt", volumeName + + "/blub.txt", volumeName + "/mainDir", volumeName + "/mainDir/subDir", volumeName + + "/mainDir/subDir/newDir"); + + // file -> file (overwrite) + client.move(mrc1Address, volumeName + "/mainDir/bla.txt", volumeName + "/blub.txt", + authString); + assertTree(mrc1Address, authString, volumeName + "/blub.txt", volumeName + "/mainDir", + volumeName + "/mainDir/subDir", volumeName + "/mainDir/subDir/newDir"); + + // file -> none (create w/ same name) + client.move(mrc1Address, volumeName + "/blub.txt", volumeName + "/mainDir/blub.txt", + authString); + assertTree(mrc1Address, authString, volumeName + "/mainDir/blub.txt", volumeName + + "/mainDir", volumeName + "/mainDir/subDir", volumeName + "/mainDir/subDir/newDir"); + + // file -> dir (invalid operation) + try { + client.move(mrc1Address, volumeName + "/mainDir/blub.txt", volumeName + + "/mainDir/subDir", authString); + fail("move file -> directory should not be possible"); + } catch (Exception exc) { + } + + // file -> file (same path, should have no effect) + client.move(mrc1Address, volumeName + "/mainDir/blub.txt", + volumeName + "/mainDir/blub.txt", authString); + assertTree(mrc1Address, authString, volumeName + "/mainDir/blub.txt", volumeName + + "/mainDir", volumeName + "/mainDir/subDir", volumeName + "/mainDir/subDir/newDir"); + + // file -> file (same directory) + client.move(mrc1Address, volumeName + "/mainDir/blub.txt", volumeName + + "/mainDir/blub2.txt", authString); + assertTree(mrc1Address, authString, volumeName + "/mainDir/blub2.txt", volumeName + + "/mainDir", volumeName + "/mainDir/subDir", volumeName + "/mainDir/subDir/newDir"); + + // dir -> none (create w/ same name) + client + .move(mrc1Address, volumeName + "/mainDir/subDir", volumeName + "/subDir", + authString); + assertTree(mrc1Address, authString, volumeName + "/mainDir/blub2.txt", volumeName + + "/mainDir", volumeName + "/subDir", volumeName + "/subDir/newDir"); + + // dir -> dir (overwrite, should fail because of non-empty subdirectory) + try { + client.move(mrc1Address, volumeName + "/subDir", volumeName + "/mainDir", authString); + fail("moved directory to non-empty directory"); + } catch (Exception exc) { + } + + // dir -> dir (overwrite) + client.delete(mrc1Address, volumeName + "/mainDir/blub2.txt", authString); + client.move(mrc1Address, volumeName + "/subDir", volumeName + "/mainDir", authString); + assertTree(mrc1Address, authString, volumeName + "/mainDir", volumeName + "/mainDir/newDir"); + + // dir -> volume (should fail because volume can't be overwritten) + try { + client.move(mrc1Address, volumeName + "/mainDir/newDir", volumeName, authString); + fail("move overwrote volume"); + } catch (Exception exc) { + } + + // dir -> invalid volume (should fail) + try { + client.move(mrc1Address, volumeName, "somewhere", authString); + fail("moved to invalid volume"); + } catch (Exception exc) { + } + + assertTree(mrc1Address, authString, volumeName + "/mainDir", volumeName + "/mainDir/newDir"); + } + + // public void testLocalInterVolumeMove() throws Exception { + + // final String authString = MRCClient.createAuthString("nullauth", + // "userXY", MRCClient.generateStringList("groupZ")); + // final String volumeName1 = "testVolume"; + // final String volumeName2 = "testVolume2"; + // final long accessMode = 511; // rwxrwxrwx + // + // // create and populate volume 1 + // client.createVolume(mrc1Address, volumeName1, authString); + // + // client.createSymbolicLink(mrc1Address, volumeName1 + "/test.txt", + // "blub", authString); + // client.createFile(mrc1Address, volumeName1 + "/test2.txt", authString); + // + // // create and populate volume 2 + // client.createVolume(mrc1Address, volumeName2, authString); + // + // client.createDir(mrc1Address, volumeName2 + "/testDir", authString); + // client.createDir(mrc1Address, volumeName2 + "/testDir/subDir", + // authString); + // + // // create a file with attributes and a striping policy + // Map xAttrs = new HashMap(); + // xAttrs.put("attr", "value"); + // xAttrs.put("attr2", "value2"); + // Map stripingPolicy = getDefaultStripingPolicy(); + // + // client.createFile(mrc1Address, volumeName2 + "/testDir/blub.txt", + // xAttrs, stripingPolicy, accessMode, authString); + // + // // open the file in order to assign an OSD to it + // Thread.sleep(SHORT_DELAY); + // client.open(mrc1Address, volumeName2 + "/testDir/blub.txt", "sr", + // authString); + // + // assertTree(mrc1Address, authString, volumeName1 + "/test.txt", + // volumeName1 + "/test2.txt", volumeName2 + "/testDir", volumeName2 + // + "/testDir/subDir", volumeName2 + "/testDir/blub.txt"); + // + // // move some files and directories + // + // // file -> none (create w/ new name) + // client.move(mrc1Address, volumeName1 + "/test.txt", volumeName2 + // + "/newTest.txt", authString); + // assertTree(mrc1Address, authString, volumeName1 + "/test2.txt", + // volumeName2 + "/newTest.txt", volumeName2 + "/testDir", volumeName2 + // + "/testDir/subDir", volumeName2 + "/testDir/blub.txt"); + // Map statInfo = client.stat(mrc1Address, volumeName2 + // + "/newTest.txt", false, false, false, authString); + // assertEquals(statInfo.get("linkTarget"), "blub"); + // + // // file -> file (overwrite) + // Map cap = client.move(mrc1Address, volumeName2 + // + "/newTest.txt", volumeName1 + "/test2.txt", authString); + // assertTree(mrc1Address, authString, volumeName1 + "/test2.txt", + // volumeName2 + "/testDir", volumeName2 + "/testDir/subDir", + // volumeName2 + "/testDir/blub.txt"); + // assertNotNull(cap); + // + // // file -> none (create w/ same name) + // client.move(mrc1Address, volumeName1 + "/test2.txt", volumeName2 + // + "/testDir/test2.txt", authString); + // assertTree(mrc1Address, authString, volumeName2 + "/testDir/test2.txt", + // volumeName2 + "/testDir", volumeName2 + "/testDir/subDir", + // volumeName2 + "/testDir/blub.txt"); + // + // // file -> dir (should fail) + // try { + // client.move(mrc1Address, volumeName2 + "/testDir/test2.txt", + // volumeName2, authString); + // fail("moved file to directory"); + // } catch (Exception exc) { + // } + // + // try { + // client.move(mrc1Address, volumeName2 + "/testDir/test2.txt", + // volumeName2 + "/testDir/subDir", authString); + // fail("moved file to directory"); + // } catch (Exception exc) { + // } + // + // assertTree(mrc1Address, authString, volumeName2 + "/testDir/test2.txt", + // volumeName2 + "/testDir", volumeName2 + "/testDir/subDir", + // volumeName2 + "/testDir/blub.txt"); + // + // // dir -> none + // client.move(mrc1Address, volumeName2 + "/testDir", volumeName1 + // + "/mainDir", authString); + // assertTree(mrc1Address, authString, volumeName1 + "/mainDir/test2.txt", + // volumeName1 + "/mainDir", volumeName1 + "/mainDir/subDir", + // volumeName1 + "/mainDir/blub.txt"); + // statInfo = client.stat(mrc1Address, volumeName1 + "/mainDir/blub.txt", + // true, true, true, authString); + // xAttrs = (Map) statInfo.get("xAttrs"); + // assertEquals(xAttrs.size(), 2); + // assertEquals(xAttrs.get("attr"), "value"); + // assertEquals(xAttrs.get("attr2"), "value2"); + // + // // dir -> dir (non-empty, should fail) + // + // client.createDir(mrc1Address, volumeName2 + "/someDir", authString); + // client + // .createDir(mrc1Address, volumeName1 + "/someOtherDir", + // authString); + // try { + // client.move(mrc1Address, volumeName2 + "/someDir", volumeName1 + // + "/mainDir", authString); + // fail("moved directory to non-empty directory"); + // } catch (Exception exc) { + // } + // + // // dir -> empty dir + // client.move(mrc1Address, volumeName2 + "/someDir", volumeName1 + // + "/someOtherDir", authString); + // + // assertTree(mrc1Address, authString, volumeName1 + "/mainDir/test2.txt", + // volumeName1 + "/mainDir", volumeName1 + "/mainDir/subDir", + // volumeName1 + "/mainDir/blub.txt", volumeName1 + "/someOtherDir"); + // + // // dir -> file (should fail) + // try { + // client.move(mrc1Address, volumeName2 + "/testDir", volumeName1 + // + "/mainDir", authString); + // fail("moved directory to file"); + // } catch (Exception exc) { + // } + // + // // dir -> volume (should fail) + // try { + // client.move(mrc1Address, volumeName1 + "/mainDir", volumeName2, + // authString); + // fail("renamed dir to volume"); + // } catch (Exception exc) { + // } + // + // assertTree(mrc1Address, authString, volumeName1 + "/mainDir/test2.txt", + // volumeName1 + "/mainDir", volumeName1 + "/mainDir/subDir", + // volumeName1 + "/mainDir/blub.txt", volumeName1 + "/someOtherDir"); + // } + // + // public void testRemoteInterVolumeMove() throws Exception { + // + // final String authString = "nullauth userXY groupZ"; + // final String volumeName1 = "testVolume"; + // final String volumeName2 = "testVolume2"; + // final long accessMode = 511; // rwxrwxrwx + // + // // create and populate volume 1 + // client.createVolume(mrc1Address, volumeName1, authString); + // + // client.createSymbolicLink(mrc1Address, volumeName1 + "/test.txt", + // "blub", authString); + // client.createFile(mrc1Address, volumeName1 + "/test2.txt", authString); + // + // // create and populate volume 2 + // client.createVolume(mrc2Address, volumeName2, authString); + // + // client.createDir(mrc2Address, volumeName2 + "/testDir", authString); + // client.createDir(mrc2Address, volumeName2 + "/testDir/subDir", + // authString); + // + // // create a file with attributes and a striping policy + // Map xAttrs = new HashMap(); + // xAttrs.put("attr", "value"); + // xAttrs.put("attr2", "value2"); + // Map stripingPolicy = getDefaultStripingPolicy(); + // Map acl = new HashMap(); + // acl.put("userXY", 90); + // acl.put("userBlub", 3); + // + // client.createFile(mrc2Address, volumeName2 + "/testDir/blub.txt", + // xAttrs, stripingPolicy, accessMode, authString); + // + // // open the file in order to assign an OSD to it + // Thread.sleep(SHORT_DELAY); + // client.open(mrc2Address, volumeName2 + "/testDir/blub.txt", "sr", + // authString); + // + // assertTree(mrc1Address, authString, volumeName1 + "/test.txt", + // volumeName1 + "/test2.txt"); + // assertTree(mrc2Address, authString, volumeName2 + "/testDir", + // volumeName2 + "/testDir/subDir", volumeName2 + "/testDir/blub.txt"); + // + // // move some files and directories + // + // // file -> none (create w/ new name) + // client.move(mrc1Address, volumeName1 + "/test.txt", volumeName2 + // + "/newTest.txt", authString); + // assertTree(mrc1Address, authString, volumeName1 + "/test2.txt"); + // assertTree(mrc2Address, authString, volumeName2 + "/newTest.txt", + // volumeName2 + "/testDir", volumeName2 + "/testDir/subDir", + // volumeName2 + "/testDir/blub.txt"); + // Map statInfo = client.stat(mrc2Address, volumeName2 + // + "/newTest.txt", false, false, false, authString); + // assertEquals(statInfo.get("linkTarget"), "blub"); + // + // // file -> file (overwrite) + // Map map = client.move(mrc2Address, volumeName2 + // + "/newTest.txt", volumeName1 + "/test2.txt", authString); + // assertTree(mrc1Address, authString, volumeName1 + "/test2.txt"); + // assertTree(mrc2Address, authString, volumeName2 + "/testDir", + // volumeName2 + "/testDir/subDir", volumeName2 + "/testDir/blub.txt"); + // assertNotNull(map); + // + // // file -> none (create w/ same name) + // client.move(mrc1Address, volumeName1 + "/test2.txt", volumeName2 + // + "/testDir/test2.txt", authString); + // assertTree(mrc1Address, authString); + // assertTree(mrc2Address, authString, volumeName2 + "/testDir/test2.txt", + // volumeName2 + "/testDir", volumeName2 + "/testDir/subDir", + // volumeName2 + "/testDir/blub.txt"); + // + // // file -> dir (should fail) + // try { + // client.move(mrc2Address, volumeName2 + "/testDir/test2.txt", + // volumeName2, authString); + // fail("moved file to volume"); + // } catch (Exception exc) { + // } + // + // try { + // client.move(mrc2Address, volumeName2 + "/testDir/test2.txt", + // volumeName2 + "/testDir/subDir", authString); + // fail("moved file to directory"); + // } catch (Exception exc) { + // } + // + // assertTree(mrc1Address, authString); + // assertTree(mrc2Address, authString, volumeName2 + "/testDir/test2.txt", + // volumeName2 + "/testDir", volumeName2 + "/testDir/subDir", + // volumeName2 + "/testDir/blub.txt"); + // + // // dir -> none + // client.move(mrc2Address, volumeName2 + "/testDir", volumeName1 + // + "/mainDir", authString); + // assertTree(mrc1Address, authString, volumeName1 + "/mainDir/test2.txt", + // volumeName1 + "/mainDir", volumeName1 + "/mainDir/subDir", + // volumeName1 + "/mainDir/blub.txt"); + // assertTree(mrc2Address, authString); + // + // statInfo = client.stat(mrc1Address, volumeName1 + "/mainDir/blub.txt", + // true, true, true, authString); + // xAttrs = (Map) statInfo.get("xAttrs"); + // assertEquals(xAttrs.size(), 2); + // assertEquals(xAttrs.get("attr"), "value"); + // assertEquals(xAttrs.get("attr2"), "value2"); + // assertNotNull(statInfo.get("replicas")); + // + // // dir -> dir (non-empty, should fail) + // + // client.createDir(mrc2Address, volumeName2 + "/someDir", authString); + // client + // .createDir(mrc1Address, volumeName1 + "/someOtherDir", + // authString); + // try { + // client.move(mrc2Address, volumeName2 + "/someDir", volumeName1 + // + "/mainDir", authString); + // fail("moved directory to non-empty directory"); + // } catch (Exception exc) { + // } + // + // // dir -> empty dir + // client.move(mrc2Address, volumeName2 + "/someDir", volumeName1 + // + "/someOtherDir", authString); + // + // assertTree(mrc1Address, authString, volumeName1 + "/mainDir/test2.txt", + // volumeName1 + "/mainDir", volumeName1 + "/mainDir/subDir", + // volumeName1 + "/mainDir/blub.txt", volumeName1 + "/someOtherDir"); + // assertTree(mrc2Address, authString); + // + // // dir -> file (should fail) + // try { + // client.move(mrc2Address, volumeName2 + "/testDir", volumeName1 + // + "/mainDir", authString); + // fail("moved directory to file"); + // } catch (Exception exc) { + // } + // + // // dir -> volume (should fail) + // try { + // client.move(mrc1Address, volumeName1 + "/mainDir", volumeName2, + // authString); + // fail("renamed dir to volume"); + // } catch (Exception exc) { + // } + // + // assertTree(mrc1Address, authString, volumeName1 + "/mainDir/test2.txt", + // volumeName1 + "/mainDir", volumeName1 + "/mainDir/subDir", + // volumeName1 + "/mainDir/blub.txt", volumeName1 + "/someOtherDir"); + // assertTree(mrc2Address, authString); + // } + + public void testAccessControl() throws Exception { + + final String authString1 = NullAuthProvider.createAuthString("userXY", MRCClient + .generateStringList("groupZ")); + final String authString2 = NullAuthProvider.createAuthString("userAB", MRCClient + .generateStringList("groupA")); + final String authString3 = NullAuthProvider.createAuthString("userZZ", MRCClient + .generateStringList("groupY")); + final String noACVolumeName = "noACVol"; + final String volACVolumeName = "volACVol"; + final String posixVolName = "acVol"; + + Map acl = new HashMap(); + acl.put("userXY", (1 << 0) | (1 << 1)); // read, write + acl.put("userAB", 1); // read + + // NO ACCESS CONTROL + + // create a volume + client.createVolume(mrc1Address, noACVolumeName, RandomSelectionPolicy.POLICY_ID, null, + YesToAnyoneFileAccessPolicy.POLICY_ID, DefaultPartitioningPolicy.POLICY_ID, null, + authString1); + + // test chown + client.createFile(mrc1Address, noACVolumeName + "/chownTestFile", authString1); + client.changeOwner(mrc1Address, noACVolumeName + "/chownTestFile", "newUser", "newGroup", + authString1); + Map stat = client.stat(mrc1Address, noACVolumeName + "/chownTestFile", + false, false, false, authString3); + assertEquals("newUser", stat.get("ownerId")); + assertEquals("newGroup", stat.get("groupId")); + client.delete(mrc1Address, noACVolumeName + "/chownTestFile", authString3); + + // create a new directory: should succeed + client.createDir(mrc1Address, noACVolumeName + "/newDir", null, 0, authString1); + + // create a new file inside the dir: should succeed (in spite of + // having set an empty ACL on the parent directory) + client.createFile(mrc1Address, noACVolumeName + "/newDir/newFile", authString2); + + final String someone = NullAuthProvider.createAuthString("someone", MRCClient + .generateStringList("somegroup")); + + assertTrue(client.checkAccess(mrc1Address, noACVolumeName + "/newDir/newFile", "rwx", + someone)); + + // VOLUME ACLs + + // create a volume + client.createVolume(mrc1Address, volACVolumeName, RandomSelectionPolicy.POLICY_ID, null, + VolumeACLFileAccessPolicy.POLICY_ID, DefaultPartitioningPolicy.POLICY_ID, acl, + authString1); + + // create a new directory: should succeed for 'authString1', fail + // for 'authString2' + client.createDir(mrc1Address, volACVolumeName + "/newDir", null, 0, authString1); + + try { + client.createDir(mrc1Address, volACVolumeName + "/newDir2", authString2); + fail("access should have been denied"); + } catch (Exception exc) { + } + + // readdir: should succeed for both 'authString1' and 'authString2' + // and fail for 'authString3' + assertEquals(client.readDir(mrc1Address, volACVolumeName + "/newDir", authString1).size(), + 0); + assertEquals(client.readDir(mrc1Address, volACVolumeName + "/newDir", authString2).size(), + 0); + + try { + client.readDir(mrc1Address, volACVolumeName + "/newDir", authString3); + fail("access should have been denied"); + } catch (Exception exc) { + } + + // create a new file inside the dir: should succeed (in spite of + // having set an empty ACL on the parent directory) + client.createFile(mrc1Address, volACVolumeName + "/newDir/newFile", authString1); + + // POSIX ACLs + + // create a volume + client + .createVolume(mrc1Address, posixVolName, RandomSelectionPolicy.POLICY_ID, null, + POSIXFileAccessPolicy.POLICY_ID, DefaultPartitioningPolicy.POLICY_ID, null, + authString1); + + // create a new directory: should succeed for 'authString1', fail + // for 'authString2' + client.createDir(mrc1Address, posixVolName + "/newDir", authString1); + + assertTrue(client.checkAccess(mrc1Address, posixVolName + "/newDir", "rwx", authString1)); + + try { + client.createDir(mrc1Address, posixVolName + "/newDir2", authString2); + fail("access should have been denied"); + } catch (Exception exc) { + } + + // retrieve the ACL + Map statInfo = client.stat(mrc1Address, posixVolName + "/newDir", false, + false, true, authString1); + acl = (Map) statInfo.get("acl"); + assertEquals(3, acl.size()); + assertEquals(acl.get("user::"), 511L); + assertEquals(acl.get("group::"), 511L); + assertEquals(acl.get("other::"), 511L); + + // try to change an ACL entry: should fail for 'authString2', + // succeed for 'authString1' + Map newEntries = new HashMap(); + newEntries.put("group::", 5); + try { + client.setACLEntries(mrc1Address, posixVolName + "/newDir", newEntries, authString2); + fail("attempt to modify ACl as non-owner should have failed"); + } catch (Exception exc) { + } + + newEntries.clear(); + newEntries.put("group::", 2); + newEntries.put("mask::", 3); + client.setACLEntries(mrc1Address, posixVolName + "/newDir", newEntries, authString1); + + statInfo = client.stat(mrc1Address, posixVolName + "/newDir", false, false, true, + authString1); + acl = (Map) statInfo.get("acl"); + assertEquals(acl.size(), 4); + assertEquals(acl.get("user::"), 511L); + assertEquals(acl.get("group::"), 2L); + assertEquals(acl.get("other::"), 511L); + assertEquals(acl.get("mask::"), 3L); + + // change the access mode + client.changeAccessMode(mrc1Address, posixVolName + "/newDir", 0, authString1); + statInfo = client.stat(mrc1Address, posixVolName + "/newDir", false, false, true, + authString1); + acl = (Map) statInfo.get("acl"); + assertEquals(acl.size(), 4); + assertEquals(acl.get("user::"), 0L); + assertEquals(acl.get("group::"), 2L); + assertEquals(acl.get("other::"), 0L); + assertEquals(acl.get("mask::"), 0L); + + // readdir on "/newDir": should fail for any user now + try { + client.readDir(mrc1Address, posixVolName + "/newDir", authString1); + fail("access should have been denied"); + } catch (Exception exc) { + } + + try { + client.readDir(mrc1Address, posixVolName + "/newDir", authString2); + fail("access should have been denied"); + } catch (Exception exc) { + } + + // add an entry (and mask) for 'authString2' to 'newDir' + newEntries.clear(); + newEntries.put("user:userAB", 511); + newEntries.put("mask::", 511); + client.setACLEntries(mrc1Address, posixVolName + "/newDir", newEntries, authString1); + + try { + client.readDir(mrc1Address, posixVolName + "/newDir", authString2); + fail("access should have been denied due to insufficient search permissions"); + } catch (Exception exc) { + } + + // add an entry (and mask) for 'authString2' to volume + client.setACLEntries(mrc1Address, posixVolName, newEntries, authString1); + + assertTrue(client.checkAccess(mrc1Address, posixVolName + "/newDir", "w", authString2)); + + assertEquals(client.readDir(mrc1Address, posixVolName + "/newDir", authString2).size(), 0); + + client.removeACLEntries(mrc1Address, posixVolName + "/newDir", new ArrayList( + newEntries.keySet()), authString1); + + try { + client.readDir(mrc1Address, posixVolName + "/newDir", authString2); + fail("access should have been denied due to insufficient search permissions"); + } catch (Exception exc) { + } + + client.changeAccessMode(mrc1Address, posixVolName, 0005, authString1); // others + // = + // rx + assertEquals(client.readDir(mrc1Address, posixVolName, authString3).size(), 1); + assertFalse(client.checkAccess(mrc1Address, posixVolName, "w", authString3)); + + // create a POSIX ACL new volume and test "chmod" + client.deleteVolume(mrc1Address, posixVolName, authString1); + client + .createVolume(mrc1Address, posixVolName, RandomSelectionPolicy.POLICY_ID, null, + POSIXFileAccessPolicy.POLICY_ID, DefaultPartitioningPolicy.POLICY_ID, null, + authString1); + + client + .createFile(mrc1Address, posixVolName + "/someFile.txt", null, null, 224, + authString1); + statInfo = client.stat(mrc1Address, posixVolName + "/someFile.txt", false, false, false, + authString1); + long accessMode = (Long) statInfo.get("posixAccessMode"); + assertEquals(224, accessMode); + client.changeAccessMode(mrc1Address, posixVolName + "/someFile.txt", accessMode & 192, + authString1); + statInfo = client.stat(mrc1Address, posixVolName + "/someFile.txt", false, false, false, + authString1); + accessMode = (Long) statInfo.get("posixAccessMode"); + assertEquals(192, accessMode); + + // make root directory accessible for anyone + client.changeAccessMode(mrc1Address, posixVolName, 511, authString1); + + // create a new directory w/ search access for anyone + client.createDir(mrc1Address, posixVolName + "/stickyDir", null, 511, authString1); + + // create and delete/rename a file w/ different user IDs: this should + // work + client.createFile(mrc1Address, posixVolName + "/stickyDir/newfile.txt", authString2); + client.delete(mrc1Address, posixVolName + "/stickyDir/newfile.txt", authString1); + client.createFile(mrc1Address, posixVolName + "/stickyDir/newfile.txt", authString2); + client.move(mrc1Address, posixVolName + "/stickyDir/newfile.txt", posixVolName + + "/stickyDir/newfile2.txt", authString1); + + // set sticky bit; now, only the owner should be allowed to + // delete/rename the + // nested file + client.createFile(mrc1Address, posixVolName + "/stickyDir/newfile.txt", authString2); + client.changeAccessMode(mrc1Address, posixVolName + "/stickyDir", 512 | 511, authString1); + try { + client.delete(mrc1Address, posixVolName + "/stickyDir/newfile.txt", authString1); + fail("access should have been denied due to insufficient delete permissions (sticky bit)"); + } catch (Exception exc) { + } + try { + client.move(mrc1Address, posixVolName + "/stickyDir/newfile.txt", posixVolName + + "/stickyDir/newfile2.txt", authString1); + fail("access should have been denied due to insufficient renaming permissions (sticky bit)"); + } catch (Exception exc) { + } + + client.move(mrc1Address, posixVolName + "/stickyDir/newfile.txt", posixVolName + + "/stickyDir/newfile2.txt", authString2); + client.delete(mrc1Address, posixVolName + "/stickyDir/newfile2.txt", authString2); + } + + public void testFileSizeUpdate() throws Exception { + + final String authString = NullAuthProvider.createAuthString("userXY", MRCClient + .generateStringList("groupZ")); + final String volumeName = "testVolume"; + final String fileName = volumeName + "/testFile"; + + // create a new file in a new volume + client.createVolume(mrc1Address, volumeName, authString); + client.createFile(mrc1Address, fileName, authString); + + // check and update file sizes repeatedly + Map headers = client.open(mrc1Address, fileName, "r", authString); + Map statInfo = client.stat(mrc1Address, fileName, false, false, false, + authString); + assertEquals(0l, statInfo.get("size")); + + client.updateFileSize(mrc1Address, headers.get(HTTPHeaders.HDR_XCAPABILITY), "[27,0]", + authString); + statInfo = client.stat(mrc1Address, fileName, false, false, false, authString); + assertEquals(27l, statInfo.get("size")); + + client.updateFileSize(mrc1Address, headers.get(HTTPHeaders.HDR_XCAPABILITY), "[12,0]", + authString); + statInfo = client.stat(mrc1Address, fileName, false, false, false, authString); + assertEquals(27l, statInfo.get("size")); + + client.updateFileSize(mrc1Address, headers.get(HTTPHeaders.HDR_XCAPABILITY), "[34,0]", + authString); + statInfo = client.stat(mrc1Address, fileName, false, false, false, authString); + assertEquals(34l, statInfo.get("size")); + + client.updateFileSize(mrc1Address, headers.get(HTTPHeaders.HDR_XCAPABILITY), "[10,1]", + authString); + statInfo = client.stat(mrc1Address, fileName, false, false, false, authString); + assertEquals(10l, statInfo.get("size")); + + client.updateFileSize(mrc1Address, headers.get(HTTPHeaders.HDR_XCAPABILITY), "[34,1]", + authString); + statInfo = client.stat(mrc1Address, fileName, false, false, false, authString); + assertEquals(34l, statInfo.get("size")); + + client.updateFileSize(mrc1Address, headers.get(HTTPHeaders.HDR_XCAPABILITY), "[10,1]", + authString); + statInfo = client.stat(mrc1Address, fileName, false, false, false, authString); + assertEquals(34l, statInfo.get("size")); + + client.updateFileSize(mrc1Address, headers.get(HTTPHeaders.HDR_XCAPABILITY), "[0,2]", + authString); + statInfo = client.stat(mrc1Address, fileName, false, false, false, authString); + assertEquals(0l, statInfo.get("size")); + + client.updateFileSize(mrc1Address, headers.get(HTTPHeaders.HDR_XCAPABILITY), "[12,0]", + authString); + statInfo = client.stat(mrc1Address, fileName, false, false, false, authString); + assertEquals(0l, statInfo.get("size")); + + client.updateFileSize(mrc1Address, headers.get(HTTPHeaders.HDR_XCAPABILITY), "[32,4]", + authString); + statInfo = client.stat(mrc1Address, fileName, false, false, false, authString); + assertEquals(32l, statInfo.get("size")); + } + + public void testDefaultStripingPolicies() throws Exception { + + final String authString = NullAuthProvider.createAuthString("userXY", MRCClient + .generateStringList("groupZ")); + final String volumeName = "testVolume"; + final String dirName = volumeName + "/dir"; + final String fileName1 = dirName + "/testFile"; + final String fileName2 = dirName + "/testFile2"; + final String fileName3 = dirName + "/testFile3"; + + Map sp1 = RPCClient.generateMap("width", 1L, "policy", "RAID0", + "stripe-size", 64L); + Map sp2 = RPCClient.generateMap("width", 1L, "policy", "RAID1", + "stripe-size", 256L); + Map sp3 = RPCClient.generateMap("width", 1L, "policy", "RAID0", + "stripe-size", 128L); + + // create a new file in a directory in a new volume + client.createVolume(mrc1Address, volumeName, 1, sp1, 1, 1, null, authString); + client.createDir(mrc1Address, dirName, authString); + client.createFile(mrc1Address, fileName1, authString); + client.createFile(mrc1Address, fileName2, authString); + client.createFile(mrc1Address, fileName3, null, sp3, 0, authString); + + String xLocHeader = client.open(mrc1Address, fileName1, "c", authString).get( + HTTPHeaders.HDR_XLOCATIONS); + List header = (List) JSONParser.parseJSON(new JSONString(xLocHeader)); + Map spol = (Map) ((List) ((List) header.get(0)).get(0)) + .get(0); + assertEquals(sp1, spol); + + client.setDefaultStripingPolicy(mrc1Address, dirName, sp2, authString); + + xLocHeader = client.open(mrc1Address, fileName2, "c", authString).get( + HTTPHeaders.HDR_XLOCATIONS); + header = (List) JSONParser.parseJSON(new JSONString(xLocHeader)); + spol = (Map) ((List) ((List) header.get(0)).get(0)).get(0); + assertEquals(sp2, spol); + + xLocHeader = client.open(mrc1Address, fileName3, "c", authString).get( + HTTPHeaders.HDR_XLOCATIONS); + header = (List) JSONParser.parseJSON(new JSONString(xLocHeader)); + spol = (Map) ((List) ((List) header.get(0)).get(0)).get(0); + assertEquals(sp3, spol); + + } + + private void assertTree(InetSocketAddress server, String authString, String... paths) + throws Exception { + + // check whether all paths exist exactly once + for (String path : paths) { + + try { + Map statInfo = client.stat(server, path, false, false, false, + authString); + + // continue if the path does not point to a directory + if (!statInfo.get("objType").equals(2L)) + continue; + + } catch (Exception exc) { + throw new Exception("path '" + path + "' does not exist"); + } + + // if the path points to a directory, check whether the number of + // subdirectories is correct + int size = client.readDir(server, path, authString).size(); + int count = 0; + for (String otherPath : paths) { + if (!otherPath.startsWith(path + "/")) + continue; + + if (otherPath.substring(path.length() + 1).indexOf('/') == -1) + count++; + } + + assertEquals(count, size); + } + } + + public static void main(String[] args) { + TestRunner.run(MRCTest.class); + } + + private static Map getDefaultStripingPolicy() { + Map map = new HashMap(); + map.put("policy", "RAID0"); + map.put("stripe-size", 1000L); + map.put("width", 1L); + return map; + } +} diff --git a/servers/test/org/xtreemfs/test/mrc/MSReplicationTest.java b/servers/test/org/xtreemfs/test/mrc/MSReplicationTest.java new file mode 100644 index 0000000000000000000000000000000000000000..f159543f4f15d2dfbd1dae4cf074f62d307192cf --- /dev/null +++ b/servers/test/org/xtreemfs/test/mrc/MSReplicationTest.java @@ -0,0 +1,447 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.test.mrc; + +import java.io.File; +import java.net.InetSocketAddress; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import junit.framework.TestCase; +import junit.textui.TestRunner; + +import org.xtreemfs.common.auth.NullAuthProvider; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.clients.mrc.MRCClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.util.FSUtils; +import org.xtreemfs.dir.DIRConfig; +import org.xtreemfs.mrc.MRCConfig; +import org.xtreemfs.mrc.RequestController; +import org.xtreemfs.test.SetupUtils; + +/** + * + * @author bjko + */ +public class MSReplicationTest extends TestCase { + + private static final boolean DEBUG = true; + + private static final String TEST_DIR = "/tmp/xtreemfs-test"; + + private RequestController mrc1; + + private RequestController mrc2; + + private org.xtreemfs.dir.RequestController dir; + + private MRCClient client; + + private MRCConfig mrcCfg1; + + private MRCConfig mrcCfg2; + + private DIRConfig dsCfg; + + private InetSocketAddress mrc1Address; + + private InetSocketAddress mrc2Address; + + public MSReplicationTest(String testName) { + super(testName); + //Logging.start(SetupUtils.DEBUG_LEVEL); + Logging.start(Logging.LEVEL_DEBUG); + } + + protected void setUp() throws Exception { + + System.out.println("TEST: " + getClass().getSimpleName() + "." + getName()); + + dsCfg = SetupUtils.createDIRConfig(); + + mrcCfg1 = SetupUtils.createMRC1Config(); + mrc1Address = SetupUtils.getMRC1Addr(); + + mrcCfg2 = SetupUtils.createMRC2Config(); + mrc2Address = SetupUtils.getMRC2Addr(); + + // cleanup + File testDir = new File(SetupUtils.TEST_DIR); + + FSUtils.delTree(testDir); + testDir.mkdirs(); + + // start services + dir = new org.xtreemfs.dir.RequestController(dsCfg); + dir.startup(); + + mrc1 = new RequestController(mrcCfg1); + mrc1.startup(); + mrc2 = new RequestController(mrcCfg2); + mrc2.startup(); + + client = SetupUtils.createMRCClient(10000); + } + + protected void tearDown() throws Exception { + // shut down all services + if (mrc1 != null) + mrc1.shutdown(); + if (mrc2 != null) + mrc2.shutdown(); + client.shutdown(); + dir.shutdown(); + + client.waitForShutdown(); + } + + public void testReplication() throws Exception { + + final String authString = NullAuthProvider.createAuthString("userXY", MRCClient.generateStringList("groupG")); + final String volumeName = "testVolume"; + + client.createVolume(mrc1Address, volumeName, authString); + + client.createFile(mrc1Address, "testVolume/DUMMY", authString); + + RPCResponse> mrcr = client.sendRPC(mrc1Address, ".Rinfo", + null, null, null); + List info = mrcr.get(); + mrcr.freeBuffers(); + + String sliceID = null; + + for (Object tmp : info) { + Map slice = (Map) tmp; +// System.out.println("info: " + slice.get("volumeName")); + if (slice.get("volumeName").equals(volumeName)) + sliceID = (String) slice.get("sliceID"); + + } + assertNotNull(sliceID); + + List args = new LinkedList(); + args.add(sliceID); + List slaves = new LinkedList(); + slaves.add(mrc2Address.getHostName() + ":" + mrc2Address.getPort()); + args.add(slaves); + args.add(false); + mrcr = client.sendRPC(mrc1Address, ".RnewMasterSlice", args, null, null); + mrcr.waitForResponse(); + mrcr.freeBuffers(); + + // wait a sec because the master is setting up the slaves in async mode + Thread.sleep(1000); +// System.out.println("wait done"); + List entries = client.readDir(mrc2Address, "testVolume", + authString); +// for (String tmp : entries) +// System.out.println("entry: " + tmp); + + assertTrue(entries.contains("DUMMY")); + + } + + public void testSlaveRemoteReplay() throws Exception { + + final String authString = NullAuthProvider.createAuthString("userXY", MRCClient.generateStringList("groupG")); + final String volumeName = "testVolume"; + + client.createVolume(mrc1Address, volumeName, authString); + + client.createFile(mrc1Address, "testVolume/DUMMY", authString); + + RPCResponse> mrcr = client.sendRPC(mrc1Address, ".Rinfo", + null, null, null); + List info = mrcr.get(); + mrcr.freeBuffers(); + + String sliceID = null; + + for (Object tmp : info) { + Map slice = (Map) tmp; +// System.out.println("info: " + slice.get("volumeName")); + if (slice.get("volumeName").equals(volumeName)) + sliceID = (String) slice.get("sliceID"); + + } + assertNotNull(sliceID); + + List args = new LinkedList(); + args.add(sliceID); + List slaves = new LinkedList(); + slaves.add(mrc2Address.getHostName() + ":" + mrc2Address.getPort()); + args.add(slaves); + args.add(false); + mrcr = client.sendRPC(mrc1Address, ".RnewMasterSlice", args, null, null); + mrcr.waitForResponse(); + mrcr.freeBuffers(); + + // wait a sec because the master is setting up the slaves in async mode + Thread.sleep(1000); + List entries = client.readDir(mrc2Address, "testVolume", + authString); +// for (String tmp : entries) +// System.out.println("entry: " + tmp); + + assertTrue(entries.contains("DUMMY")); + + // shut down slave + mrc2.shutdown(); + mrc2 = null; + + // create a new entry + client.createFile(mrc1Address, "testVolume/DUMMY_TWO", authString); + client.createFile(mrc1Address, "testVolume/DUMMY_THREE", authString); + client.createFile(mrc1Address, "testVolume/DUMMY_FOUR", authString); + + Thread.sleep(200); + + // start up the slave again + mrc2 = new RequestController(mrcCfg2); + mrc2.startup(); + + // check if the slave has recovered all entries it missied while offline + entries = client.readDir(mrc2Address, "testVolume", authString); +// for (String tmp : entries) +// System.out.println("entry: " + tmp); + + assertTrue(entries.contains("DUMMY_TWO")); + assertTrue(entries.contains("DUMMY_THREE")); + assertTrue(entries.contains("DUMMY_FOUR")); + + } + + public void testMasterOffline() throws Exception { + + final String authString = NullAuthProvider.createAuthString("userXY", MRCClient.generateStringList("groupG")); + final String volumeName = "testVolume"; + + client.createVolume(mrc1Address, volumeName, authString); + + client.createFile(mrc1Address, "testVolume/DUMMY", authString); + + RPCResponse> mrcr = client.sendRPC(mrc1Address, ".Rinfo", + null, null, null); + List info = mrcr.get(); + mrcr.freeBuffers(); + + String sliceID = null; + + for (Object tmp : info) { + Map slice = (Map) tmp; +// System.out.println("info: " + slice.get("volumeName")); + if (slice.get("volumeName").equals(volumeName)) + sliceID = (String) slice.get("sliceID"); + + } + assertNotNull(sliceID); + + List args = new LinkedList(); + args.add(sliceID); + List slaves = new LinkedList(); + slaves.add(mrc2Address.getHostName() + ":" + mrc2Address.getPort()); + args.add(slaves); + args.add(false); + mrcr = client.sendRPC(mrc1Address, ".RnewMasterSlice", args, null, null); + mrcr.waitForResponse(); + mrcr.freeBuffers(); + + // wait a sec because the master is setting up the slaves in async mode + Thread.sleep(1000); + List entries = client.readDir(mrc2Address, "testVolume", + authString); +// for (String tmp : entries) +// System.out.println("entry: " + tmp); + + assertTrue(entries.contains("DUMMY")); + + // shut down slave + mrc2.shutdown(); + mrc2 = null; + + // shut down master + mrc1.shutdown(); + mrc1 = null; + + Thread.sleep(1000); + + // start up the slave again + mrc2 = new RequestController(mrcCfg2); + mrc2.startup(); + + // now the status of the slice should be OFFLINE + RPCResponse> resp = client.sendRPC(mrc2Address, ".Rinfo", + authString, null, null); + List slices = resp.get(); + resp.freeBuffers(); + for (Object tmp : slices) { + Map sl = (Map) tmp; + if (sl.get("sliceID").equals(sliceID)) { +// System.out.println("STATUS is " + sl.get("status")); + assertTrue(sl.get("status").equals("OFFLINE")); + break; + } + } + Thread.sleep(1000); +// System.out.println("yabba bla"); + + } + + public void testSlaveOnline() throws Exception { + + final String authString = NullAuthProvider.createAuthString("userXY", MRCClient.generateStringList("groupG")); + final String volumeName = "testVolume"; + + client.createVolume(mrc1Address, volumeName, authString); + + client.createFile(mrc1Address, "testVolume/DUMMY", authString); + + RPCResponse> mrcr = client.sendRPC(mrc1Address, ".Rinfo", + null, null, null); + List info = mrcr.get(); + mrcr.freeBuffers(); + + String sliceID = null; + + for (Object tmp : info) { + Map slice = (Map) tmp; +// System.out.println("info: " + slice.get("volumeName")); + if (slice.get("volumeName").equals(volumeName)) + sliceID = (String) slice.get("sliceID"); + + } + assertNotNull(sliceID); + + List args = new LinkedList(); + args.add(sliceID); + List slaves = new LinkedList(); + slaves.add(mrc2Address.getHostName() + ":" + mrc2Address.getPort()); + args.add(slaves); + args.add(false); + mrcr = client.sendRPC(mrc1Address, ".RnewMasterSlice", args, null, null); + mrcr.waitForResponse(); + mrcr.freeBuffers(); + + // wait a sec because the master is setting up the slaves in async mode + Thread.sleep(1000); + List entries = client.readDir(mrc2Address, "testVolume", + authString); +// for (String tmp : entries) +// System.out.println("entry: " + tmp); + + assertTrue(entries.contains("DUMMY")); + + // create a new entry + client.createFile(mrc1Address, "testVolume/DUMMY_TWO", authString); + client.createFile(mrc1Address, "testVolume/DUMMY_THREE", authString); + client.createFile(mrc1Address, "testVolume/DUMMY_FOUR", authString); + + Thread.sleep(200); + + // check if the slave has recovered all entries it missied while offline + entries = client.readDir(mrc2Address, "testVolume", authString); +// for (String tmp : entries) +// System.out.println("entry: " + tmp); + + assertTrue(entries.contains("DUMMY_TWO")); + assertTrue(entries.contains("DUMMY_THREE")); + assertTrue(entries.contains("DUMMY_FOUR")); + + } + + public void testUnreplication() throws Exception { + + final String authString = NullAuthProvider.createAuthString("userXY", MRCClient.generateStringList("groupG")); + final String volumeName = "testVolume"; + + client.createVolume(mrc1Address, volumeName, authString); + + client.createFile(mrc1Address, "testVolume/DUMMY", authString); + + RPCResponse> mrcr = client.sendRPC(mrc1Address, ".Rinfo", + null, null, null); + List info = mrcr.get(); + mrcr.freeBuffers(); + + String sliceID = null; + + for (Object tmp : info) { + Map slice = (Map) tmp; +// System.out.println("info: " + slice.get("volumeName")); + if (slice.get("volumeName").equals(volumeName)) + sliceID = (String) slice.get("sliceID"); + + } + assertNotNull(sliceID); + + List args = new LinkedList(); + args.add(sliceID); + List slaves = new LinkedList(); + slaves.add(mrc2Address.getHostName() + ":" + mrc2Address.getPort()); + args.add(slaves); + args.add(false); + mrcr = client.sendRPC(mrc1Address, ".RnewMasterSlice", args, null, null); + mrcr.waitForResponse(); + mrcr.freeBuffers(); + + // wait a sec because the master is setting up the slaves in async mode + Thread.sleep(1000); +// System.out.println("wait done"); + List entries = client.readDir(mrc2Address, "testVolume", + authString); +// for (String tmp : entries) +// System.out.println("entry: " + tmp); + + assertTrue(entries.contains("DUMMY")); + + args = new LinkedList(); + args.add(sliceID); + mrcr = client.sendRPC(mrc1Address, ".RnoReplication", args, null, null); + mrcr.waitForResponse(); + mrcr.freeBuffers(); + mrcr = client.sendRPC(mrc2Address, ".RnoReplication", args, null, null); + mrcr.waitForResponse(); + mrcr.freeBuffers(); + + Thread.sleep(200); + client.createFile(mrc1Address, "testVolume/DUMMY_TWO", authString); + Thread.sleep(200); + + entries = client.readDir(mrc2Address, "testVolume", authString); +// for (String tmp : entries) +// System.out.println("entry: " + tmp); + + assertFalse(entries.contains("DUMMY_TWO")); + + } + + public static void main(String[] args) { + TestRunner.run(MSReplicationTest.class); + } + +} diff --git a/servers/test/org/xtreemfs/test/mrc/SelectionPolicyTest.java b/servers/test/org/xtreemfs/test/mrc/SelectionPolicyTest.java new file mode 100644 index 0000000000000000000000000000000000000000..4798e0cf65fc973bb96dcd8b2e15a2f94c38a514 --- /dev/null +++ b/servers/test/org/xtreemfs/test/mrc/SelectionPolicyTest.java @@ -0,0 +1,132 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Nele Andersen (ZIB) + */ + +package org.xtreemfs.test.mrc; + +import java.net.InetAddress; +import java.net.URI; +import java.util.HashMap; +import java.util.Map; + +import junit.framework.TestCase; +import junit.textui.TestRunner; + +import org.xtreemfs.mrc.osdselection.ProximitySelectionPolicy; + +public class SelectionPolicyTest extends TestCase { + + private static final long MIN_FREE_CAPACITY = 32 * 1024 * 1024; + + private ProximitySelectionPolicy policy; + + private Map> osdMap; + + private InetAddress clientAddress; + + public void setUp() throws Exception { + + System.out.println("TEST: " + getClass().getSimpleName() + "." + getName()); + + policy = new ProximitySelectionPolicy(); + + clientAddress = InetAddress.getByName(URI.create("http://01.xtreemfs.com").getHost()); + + osdMap = new HashMap>(); + + Map attr1 = new HashMap(); + attr1.put("free", Long.toString(MIN_FREE_CAPACITY + 1)); + attr1.put("uri", "http://itu.dk"); + osdMap.put("1", attr1); + + Map attr2 = new HashMap(); + attr2.put("free", Long.toString(MIN_FREE_CAPACITY + 1)); + attr2.put("uri", "http://wiut.uz"); + osdMap.put("2", attr2); + + Map attr3 = new HashMap(); + attr3.put("free", Long.toString(MIN_FREE_CAPACITY + 1)); + attr3.put("uri", "http://pku.edu.cn"); + osdMap.put("3", attr3); + + Map attr4 = new HashMap(); + attr4.put("free", Long.toString(MIN_FREE_CAPACITY + 1)); + attr4.put("uri", "http://xtreemfs2.zib.de"); + osdMap.put("4", attr4); + + Map attr5 = new HashMap(); + attr5.put("free", Long.toString(MIN_FREE_CAPACITY + 1)); + attr5.put("uri", "http://xtreemfs2.zib.de"); + osdMap.put("5", attr5); + + } + + public void tearDown() throws Exception { + + } + + public void testInetAddressToInteger() throws Exception { + + byte[] bytes = { (byte) 130, (byte) 226, (byte) 142, 3 }; + assertEquals(130226142003L, policy.inetAddressToLong(bytes)); + + byte[] bytes2 = { (byte) 80, (byte) 80, (byte) 214, 93 }; + assertEquals(80080214093L, policy.inetAddressToLong(bytes2)); + } + + public void testGetOSDsForNewFile() { + + String[] osds = policy.getOSDsForNewFile(osdMap, clientAddress, 4, null); + assertEquals(osds.length, 4); + assertTrue(contains(osds, "1")); + assertTrue(contains(osds, "3")); + assertTrue(contains(osds, "4")); + assertTrue(contains(osds, "5")); + + osds = policy.getOSDsForNewFile(osdMap, clientAddress, 2, null); + assertEquals(osds.length, 2); + assertTrue(contains(osds, "4")); + assertTrue(contains(osds, "5")); + + osds = policy.getOSDsForNewFile(osdMap, clientAddress, 1, null); + assertEquals(osds.length, 1); + assertTrue(contains(osds, "4") || contains(osds, "5")); + + osds = policy.getOSDsForNewFile(osdMap, clientAddress, 6, null); + assertEquals(osds.length, 6); + assertEquals(osds[5], null); + } + + private boolean contains(String[] array, String s) { + for (int i = 0; i < array.length; i++) { + if (array[i].equals(s)) + return true; + } + return false; + } + + public static void main(String[] args) { + TestRunner.run(SelectionPolicyTest.class); + } + +} diff --git a/servers/test/org/xtreemfs/test/mrc/SliceManagerTest.java b/servers/test/org/xtreemfs/test/mrc/SliceManagerTest.java new file mode 100644 index 0000000000000000000000000000000000000000..a28a55e09ea1d63165ca2602a25a41f6e71f0f93 --- /dev/null +++ b/servers/test/org/xtreemfs/test/mrc/SliceManagerTest.java @@ -0,0 +1,115 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.test.mrc; + +import java.util.HashMap; +import java.util.Map; + +import junit.framework.TestCase; +import junit.textui.TestRunner; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.mrc.slices.SliceManager; +import org.xtreemfs.test.SetupUtils; + +public class SliceManagerTest extends TestCase { + + public static final String DB_DIRECTORY = "/tmp/database"; + + private SliceManager mngr; + + public SliceManagerTest() { + super(); + Logging.start(SetupUtils.DEBUG_LEVEL); + } + + protected void setUp() throws Exception { + + System.out.println("TEST: " + getClass().getSimpleName() + "." + getName()); + + // File dbDir = new File(DB_DIRECTORY); + // FSTools.delTree(dbDir); + // dbDir.mkdir(); + // + // mngr = new SliceManager(DB_DIRECTORY); + // mngr.startup(); + } + + protected void tearDown() throws Exception { + + // TODO + + // mngr.shutdown(); + } + + public void testAll() throws Exception { + + // TODO + + // final String volumeId = "12"; + // final String ownerId = "me"; + // final String groupId = "myGroup"; + // + // mngr.createVolume(volumeId, "myVolume", ownerId, groupId, + // getDefaultStripingPolicy(), 1, 1, false); + // assertEquals(mngr.getVolumes().size(), 1); + // + // VolumeInfo info = mngr.getVolumeByName("myVolume"); + // assertEquals(info.getId(), volumeId); + // + // // test backup + // mngr.backupDB(); + // assertTrue(mngr.hasDBBackup()); + // mngr.restoreDBBackup(); + // assertFalse(mngr.hasDBBackup()); + // assertNotNull(mngr.getVolumeById(volumeId)); + // + // // test sync and relink + // long fileId = mngr.getStorageManager(volumeId).createFile("bla.txt", + // null, 1, "me", "myGroup", null, false, null); + // mngr.syncSliceDB(); + // mngr.getStorageManager(volumeId).deleteFile(fileId); + // assertNull(mngr.getStorageManager(volumeId).getFileEntity(fileId)); + // + // mngr.relinkVolume(volumeId); + // assertEquals(mngr.getStorageManager(volumeId).getFileId("bla.txt"), + // fileId); + // assertNotNull(mngr.getVolumeById(volumeId)); + // + // mngr.reset(); + // assertEquals(mngr.getVolumes().size(), 0); + } + + public static void main(String[] args) { + TestRunner.run(SliceManagerTest.class); + } + + private static Map getDefaultStripingPolicy() { + Map map = new HashMap(); + map.put("policy", "RAID0"); + map.put("stripe-size", 1000L); + map.put("width", 1L); + return map; + } +} diff --git a/servers/test/org/xtreemfs/test/mrc/StorageManagerTest.java b/servers/test/org/xtreemfs/test/mrc/StorageManagerTest.java new file mode 100644 index 0000000000000000000000000000000000000000..4ef469c3b309f798b0dea39dcb6c971576a8aeb2 --- /dev/null +++ b/servers/test/org/xtreemfs/test/mrc/StorageManagerTest.java @@ -0,0 +1,297 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.test.mrc; + +import java.io.File; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import junit.framework.TestCase; +import junit.textui.TestRunner; + +import org.xtreemfs.common.TimeSync; +import org.xtreemfs.common.auth.NullAuthProvider; +import org.xtreemfs.common.clients.dir.DIRClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.util.FSUtils; +import org.xtreemfs.dir.DIRConfig; +import org.xtreemfs.dir.RequestController; +import org.xtreemfs.mrc.brain.storage.SliceID; +import org.xtreemfs.mrc.brain.storage.StorageManager; +import org.xtreemfs.mrc.brain.storage.entities.ACLEntry; +import org.xtreemfs.mrc.brain.storage.entities.AbstractFileEntity; +import org.xtreemfs.mrc.brain.storage.entities.FileEntity; +import org.xtreemfs.test.SetupUtils; + +public class StorageManagerTest extends TestCase { + + public static final String DB_DIRECTORY = "/tmp/xtreemfs-test"; + + private StorageManager mngr; + + private RequestController dir; + + private DIRClient dirClient; + + public StorageManagerTest() { + super(); + Logging.start(SetupUtils.DEBUG_LEVEL); + } + + protected void setUp() throws Exception { + + System.out.println("TEST: " + getClass().getSimpleName() + "." + getName()); + + // initialize Directory Service (for synchronized clocks...) + DIRConfig config = SetupUtils.createDIRConfig(); + dir = new RequestController(config); + dir.startup(); + dirClient = SetupUtils.createDIRClient(60000); + TimeSync.initialize(dirClient, 60000, 50, NullAuthProvider.createAuthString("bla", "bla")); + + // reset database + File dbDir = new File(DB_DIRECTORY); + FSUtils.delTree(dbDir); + dbDir.mkdirs(); + mngr = new StorageManager(DB_DIRECTORY, new SliceID(1)); + mngr.startup(); + } + + protected void tearDown() throws Exception { + mngr.cleanup(); + dirClient.shutdown(); + dir.shutdown(); + + dirClient.waitForShutdown(); + } + + public void testCreateDelete() throws Exception { + + final String userId = "me"; + final String groupId = "myGroup"; + final Map stripingPolicy = getDefaultStripingPolicy(); + + long rootDirId = mngr.createFile(null, userId, groupId, stripingPolicy, true, null); + mngr.linkFile("rootDir", rootDirId, 1); + assertFalse(rootDirId == -1); + + long subDirId = mngr.createFile(null, userId, groupId, stripingPolicy, true, null); + mngr.linkFile("subDir", subDirId, rootDirId); + assertFalse(subDirId == -1); + + long fileId = mngr.createFile(null, userId, groupId, stripingPolicy, false, null); + mngr.linkFile("file.txt", fileId, subDirId); + assertFalse(fileId == -1); + assertFalse(mngr.hasChildren(fileId)); + + assertEquals(mngr.getFileEntity("rootDir").getId(), rootDirId); + assertEquals(mngr.getFileEntity("rootDir/subDir").getId(), subDirId); + assertEquals(mngr.getFileEntity("rootDir/subDir/file.txt").getId(), fileId); + + assertTrue(mngr.getFileEntity(subDirId).isDirectory()); + assertTrue(mngr.hasChildren(subDirId)); + assertEquals(mngr.getChildren(subDirId).size(), 1); + assertEquals(mngr.getChildren(subDirId).get(0), "file.txt"); + assertEquals(mngr.getChildren(fileId).size(), 0); + + assertTrue(mngr.fileExists(rootDirId, "subDir")); + + Map fileData = mngr.getChildData(subDirId); + assertEquals(fileData.size(), 1); + mngr.unlinkFile("file.txt", fileId, subDirId); + + AbstractFileEntity file = fileData.values().iterator().next(); + file.setLinkCount(0); + file.setId(0); + fileId = mngr.createFile(file, null); + mngr.linkFile("newFile.txt", fileId, rootDirId); + assertEquals(1, mngr.getFileEntity(fileId).getLinkCount()); + mngr.linkFile("newFile.txt", fileId, subDirId); + assertEquals(2, mngr.getFileEntity(fileId).getLinkCount()); + assertEquals(mngr.getFileEntity("rootDir/subDir/newFile.txt").getId(), fileId); + assertEquals(mngr.getFileEntity("rootDir/newFile.txt").getId(), fileId); + + List children = mngr.getChildren(rootDirId); + assertEquals(children.size(), 2); + assertTrue(children.contains("newFile.txt") && children.contains("subDir")); + assertEquals(mngr.getChildren(subDirId).size(), 1); + + assertEquals(mngr.getStripingPolicy(rootDirId).getPolicy(), "RAID0"); + assertEquals(mngr.getStripingPolicy(rootDirId).getWidth(), 1L); + assertEquals(mngr.getStripingPolicy(rootDirId).getStripeSize(), 1000L); + + mngr.unlinkFile("newFile.txt", fileId, rootDirId); + assertNotNull(mngr.getFileEntity(fileId)); + assertEquals(1, mngr.getFileEntity(fileId).getLinkCount()); + mngr.unlinkFile("newFile.txt", fileId, subDirId); + assertNull(mngr.getFileEntity(fileId)); + + mngr.unlinkFile("subDir", subDirId, rootDirId); + assertEquals(mngr.getChildren(rootDirId).size(), 0); + mngr.unlinkFile("rootDir", rootDirId, 1); + assertNull(mngr.getFileEntity(rootDirId)); + + long id1 = mngr.createFile(null, userId, groupId, stripingPolicy, false, null); + long id2 = mngr.createFile(null, userId, groupId, stripingPolicy, false, null); + long id3 = mngr.createFile(null, userId, groupId, stripingPolicy, false, null); + mngr.linkFile("bla1.txt", id1, 1); + mngr.linkFile("bla2.txt", id2, 1); + mngr.linkFile("bla3.txt", id3, 1); + + children = mngr.getChildren(1); + assertTrue(children.contains("bla1.txt")); + assertTrue(children.contains("bla2.txt")); + assertTrue(children.contains("bla3.txt")); + assertEquals(children.size(), 3); + + mngr.unlinkFile("bla1.txt", id1, 1); + mngr.unlinkFile("bla2.txt", id2, 1); + mngr.unlinkFile("bla3.txt", id3, 1); + + assertFalse(mngr.fileExists(subDirId, "file.txt")); + } + + public void testSymlink() throws Exception { + + final String userId = "me"; + final String groupId = "myGroup"; + final Map stripingPolicy = getDefaultStripingPolicy(); + + long fileId = mngr.createFile("blub/bla.txt", userId, groupId, stripingPolicy, false, null); + mngr.linkFile("test.txt", fileId, 1); + assertEquals(mngr.getFileReference(fileId), "blub/bla.txt"); + } + + public void testAttributes() throws Exception { + + final String userId = "me"; + final String groupId = "myGroup"; + final Map stripingPolicy = getDefaultStripingPolicy(); + + Map attrs = new HashMap(); + attrs.put("myKey", "myValue"); + attrs.put("blaKey", "blaValue"); + + long fileId = mngr.createFile(null, userId, groupId, stripingPolicy, false, null); + mngr.linkFile("test.txt", fileId, 1); + mngr.addXAttributes(fileId, attrs); + + attrs = mngr.getXAttributes(fileId); + assertEquals(attrs.size(), 2); + + List list = new ArrayList(); + list.add("myKey"); + mngr.deleteXAttributes(fileId, list); + assertEquals(mngr.getXAttributes(fileId).size(), 1); + assertEquals(mngr.getXAttributes(fileId).get("blaKey"), "blaValue"); + assertNull(mngr.getXAttributes(fileId).get("myKey")); + + mngr.addXAttributes(fileId, attrs); + assertEquals(mngr.getXAttributes(fileId).size(), 2); + mngr.deleteXAttributes(fileId, null); + assertEquals(mngr.getXAttributes(fileId).size(), 0); + } + + public void testPosixAttributes() throws Exception { + + final String userId = "me"; + final String groupId = "myGroup"; + final Map stripingPolicy = getDefaultStripingPolicy(); + + long fileId = mngr.createFile(null, userId, groupId, stripingPolicy, false, null); + mngr.linkFile("test.txt", fileId, 1); + + mngr.setFileSize(fileId, 121, 0, 0); + assertEquals(mngr.getFileEntity("test.txt").getId(), fileId); + assertEquals(((FileEntity) mngr.getFileEntity("test.txt")).getSize(), 121); + } + + public void testACLs() throws Exception { + + final String userId = "me"; + final String groupId = "myGroup"; + final Map stripingPolicy = getDefaultStripingPolicy(); + + Map acl = new HashMap(); + acl.put("1", 3L); + acl.put("2", 7L); + acl.put("3", 1L); + + long fileId = mngr.createFile(null, userId, groupId, stripingPolicy, false, acl); + mngr.linkFile("test.txt", fileId, 1); + + ACLEntry[] aclArray = mngr.getFileEntity(fileId).getAcl(); + assertEquals(aclArray.length, 3); + for (ACLEntry entry : aclArray) + assertTrue((entry.getEntity().equals("1") && entry.getRights() == 3) + || (entry.getEntity().equals("2") && entry.getRights() == 7) + || (entry.getEntity().equals("3") && entry.getRights() == 1)); + + acl.clear(); + acl.put("4", 4L); + mngr.setFileACL(fileId, acl); + aclArray = mngr.getFileEntity(fileId).getAcl(); + assertEquals(aclArray.length, 1); + assertEquals(aclArray[0].getEntity(), "4"); + assertEquals(aclArray[0].getRights(), 4L); + } + + // public void testMisc() throws Exception { + // + // long fileId = mngr.createFile(null, "me", "myGroup", + // getDefaultStripingPolicy(), false, null); + // mngr.linkFile("newFile", fileId, 1); + // AbstractFileEntity file = mngr.getFileEntity(fileId); + // + // AbstractFileEntity copy = Converter + // .mapToFile((Map) Converter.fileTreeToList(mngr, + // file).get(0)); + // + // assertEquals(file.getAtime(), copy.getAtime()); + // assertEquals(file.getCtime(), copy.getCtime()); + // assertEquals(file.getMtime(), copy.getMtime()); + // assertEquals(file.getAcl(), copy.getAcl()); + // assertEquals(file.getUserId(), copy.getUserId()); + // assertEquals(file.getGroupId(), copy.getGroupId()); + // assertEquals(((FileEntity) file).getSize(), ((FileEntity) copy) + // .getSize()); + // assertEquals(((FileEntity) file).getXLocationsList(), + // ((FileEntity) copy).getXLocationsList()); + // } + + public static void main(String[] args) { + TestRunner.run(StorageManagerTest.class); + } + + private static Map getDefaultStripingPolicy() { + Map map = new HashMap(); + map.put("policy", "RAID0"); + map.put("stripe-size", 1000L); + map.put("width", 1L); + return map; + } +} diff --git a/servers/test/org/xtreemfs/test/osd/CleanUpTest.java b/servers/test/org/xtreemfs/test/osd/CleanUpTest.java new file mode 100644 index 0000000000000000000000000000000000000000..9143bea85ce5f8700bde11df63848f5f99a87dc0 --- /dev/null +++ b/servers/test/org/xtreemfs/test/osd/CleanUpTest.java @@ -0,0 +1,324 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin, + Barcelona Supercomputing Center - Centro Nacional de Supercomputacion and + Consiglio Nazionale delle Ricerche. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHOR: Felix Langner (ZIB) + */ + +package org.xtreemfs.test.osd; + + +import java.io.File; +import java.net.InetSocketAddress; +import java.nio.ByteBuffer; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import junit.framework.TestCase; + +import org.junit.After; +import org.junit.Before; +import org.xtreemfs.common.auth.NullAuthProvider; +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.clients.io.RandomAccessFile; +import org.xtreemfs.common.clients.mrc.MRCClient; +import org.xtreemfs.common.clients.osd.ConcurrentFileMap; +import org.xtreemfs.common.clients.osd.OSDClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.util.FSUtils; +import org.xtreemfs.dir.RequestController; +import org.xtreemfs.osd.OSD; +import org.xtreemfs.osd.storage.HashStorageLayout; +import org.xtreemfs.osd.storage.MetadataCache; +import org.xtreemfs.test.SetupUtils; +import org.xtreemfs.utils.cleanup_osd; + +/** + * + * @author langner + * + */ + +public class CleanUpTest extends TestCase{ + + private static String testVolume = "testVolume"; + + private RequestController dir; + private org.xtreemfs.mrc.RequestController mrc; + private MRCClient mrcClient; + private OSD osd; + private String authString; + private HashStorageLayout layout; + private ReusableBuffer data = new ReusableBuffer(ByteBuffer.wrap(((String) "zombie").getBytes())); + private OSDClient client; + private String volumeID; + private Set zombieNames; + + public CleanUpTest() { + Logging.start(Logging.LEVEL_WARN); + } + + @Before + public void setUp() throws Exception { + // cleanup + File testDir = new File(SetupUtils.TEST_DIR); + + FSUtils.delTree(testDir); + testDir.mkdirs(); + + System.out.println("TEST: " + getClass().getSimpleName() + "." + + getName()); + + // startup: DIR + dir = new RequestController(SetupUtils.createDIRConfig()); + dir.startup(); + + // startup: OSD + osd = new OSD(SetupUtils.createOSD1Config()); + + // startup: MRC + mrc = new org.xtreemfs.mrc.RequestController(SetupUtils.createMRC1Config()); + mrc.startup(); + + authString = NullAuthProvider.createAuthString("", ""); + zombieNames = new HashSet(); + zombieNames.add("666"); zombieNames.add("667"); + + layout = new HashStorageLayout(SetupUtils.createOSD1Config(),new MetadataCache()); + + client = SetupUtils.createOSDClient(OSDClient.DEFAULT_TIMEOUT); + + mrcClient = SetupUtils.createMRCClient(MRCClient.DEFAULT_TIMEOUT); + } + + @After + public void tearDown() throws Exception { + mrcClient.shutdown(); + client.shutdown(); + mrc.shutdown(); + osd.shutdown(); + dir.shutdown(); + client.waitForShutdown(); + mrcClient.waitForShutdown(); + + Logging.logMessage(Logging.LEVEL_DEBUG, this, BufferPool.getStatus()); + } + + /** + * Test the Cleanup function without any files on the OSD. + * @throws Exception + */ + public void testCleanUpEmpty() throws Exception{ + Map>> result = null; + + // start the cleanUp Operation + result = client.cleanUp(SetupUtils.getOSD1Addr(),authString).get(); + + assertNull(result); + } + + /** + * Test the Cleanup function with files without zombies on the OSD. + * @throws Exception + */ + public void testCleanUpFilesWithoutZombies() throws Exception{ + Map>> result = null; + + insertSomeTestData(); + // start the cleanUp Operation + result = client.cleanUp(SetupUtils.getOSD1Addr(),authString).get(); + + assertNull(result); + } + + /** + * Test the Cleanup function with files and with zombies on the OSD. + * @throws Exception + */ + public void testCleanUpFilesWithZombies() throws Exception{ + Map>> response = null; + ConcurrentFileMap result = null; + + insertSomeTestData(); + + //insert zombies + String[] zombieArray = zombieNames.toArray(new String[2]); + layout.writeObject(volumeID+":"+zombieArray[0], 1, data, 1, 0, "chksum1", null, 1); + layout.writeObject(volumeID+":"+zombieArray[1], 2, data, 1, 0, "chksum2", null, 1); + + // start the cleanUp Operation + response = client.cleanUp(SetupUtils.getOSD1Addr(),authString).get(); + + assertNotNull(response); + + result = new ConcurrentFileMap(response); + + assertTrue(result.getFileNumberSet(volumeID).equals(zombieNames)); + + //restore the zombie files + for (List volume : result.keySetList()){ + for (String file : result.getFileIDSet(volume)){ + Long fileNumber = Long.valueOf(file.substring(file.indexOf(":")+1, file.length())); + + mrcClient.restoreFile( + new InetSocketAddress(volume.get(1), + Integer.parseInt(volume.get(2))), + "lost+found", fileNumber, + result.getFileSize(volume, file),null, + authString,SetupUtils.getOSD1UUID().toString(),result.getObjectSize(volume,file),volume.get(0)); + } + } + + //check the osd once again + response = client.cleanUp(SetupUtils.getOSD1Addr(),authString).get(); + + assertNull(response); + } + + /** + * Test the Cleanup function with a volume registered at the DIR but not on the MRC. + * @throws Exception + */ + public void testCleanUpLostVolume() throws Exception{ + // can not be tested right now + assertTrue(true); + } + + /** + * Test the Cleanup function with files and with zombies(files of an unknown volume) on the OSD. + * @throws Exception + */ + public void testCleanUpUnkownVolume() throws Exception{ + Map>> response = null; + ConcurrentFileMap result = null; + + insertSomeTestData(); + + //insert zombies + String[] zombieArray = zombieNames.toArray(new String[2]); + layout.writeObject("002302340"+":"+zombieArray[0], 1, data, 1, 0, "chksum3", null, 1); + layout.writeObject("002302340"+":"+zombieArray[1], 2, data, 1, 0, "chksum4", null, 1); + + // start the cleanUp Operation + response = client.cleanUp(SetupUtils.getOSD1Addr(),authString).get(); + + assertNotNull(response); + + result = new ConcurrentFileMap(response); + + assertTrue(result.getFileNumberSet("unknown").equals(zombieNames)); + + //Delete the zombie files + client.cleanUpDelete(SetupUtils.getOSD1Addr(), authString, "002302340"+":"+zombieArray[0]).waitForResponse(0); + client.cleanUpDelete(SetupUtils.getOSD1Addr(), authString, "002302340"+":"+zombieArray[1]).waitForResponse(0); + + //check the osd once again + response = client.cleanUp(SetupUtils.getOSD1Addr(),authString).get(); + + System.out.println(response); + assertNull(response); + } + + /** + * Test the Cleanup function UI with zombies. + * @throws Exception + + public void testCleanUpUI() throws Exception{ + insertSomeTestData(); + + //insert zombies + String[] zombieArray = zombieNames.toArray(new String[2]); + layout.writeObject("002302340"+":"+zombieArray[0], 1, data, 1, 0, "chksum5", null, 1); + layout.writeObject("002302340"+":"+zombieArray[1], 2, data, 1, 0, "chksum6", null, 1); + layout.writeObject(volumeID+":"+zombieArray[0]+"1", 1, data, 1, 0, "chksum1", null, 1); + layout.writeObject(volumeID+":"+zombieArray[1]+"1", 2, data, 1, 0, "chksum2", null, 1); + + String[] args = new String[3]; + args[0] = "-d"; + args[1] = "http://"+SetupUtils.getDIRAddr().getHostName()+":"+SetupUtils.getDIRAddr().getPort()+"/"; + args[2] = "http://"+SetupUtils.getOSD1Addr().getHostName()+":"+SetupUtils.getOSD1Addr().getPort()+"/"; + cleanup_osd.main(args); + + assertTrue(true); + } + */ + + /** + * Test the Cleanup function UI with zombies and UUID support. + * @throws Exception + + public void testCleanUpUIUUID() throws Exception{ + insertSomeTestData(); + + //insert zombies + String[] zombieArray = zombieNames.toArray(new String[2]); + layout.writeObject("002302340"+":"+zombieArray[0], 1, data, 1, 0, "chksum5", null, 1); + layout.writeObject("002302340"+":"+zombieArray[1], 2, data, 1, 0, "chksum6", null, 1); + layout.writeObject(volumeID+":"+zombieArray[0]+"1", 1, data, 1, 0, "chksum1", null, 1); + layout.writeObject(volumeID+":"+zombieArray[1]+"1", 2, data, 1, 0, "chksum2", null, 1); + + String[] args = new String[1]; + args[0] = "uuid:"+SetupUtils.getOSD1UUID(); + cleanup_osd.main(args); + + assertTrue(true); + } + */ +/* + * private functions + */ + + private void insertSomeTestData() throws Exception{ + MRCClient mrcClient = SetupUtils.createMRCClient(10000); + mrcClient.createVolume(SetupUtils.getMRC1Addr(), testVolume, authString); + mrcClient.createDir(SetupUtils.getMRC1Addr(), testVolume+"/test", authString); + mrcClient.createDir(SetupUtils.getMRC1Addr(), testVolume+"/emptyDir", authString); + mrcClient.createDir(SetupUtils.getMRC1Addr(), testVolume+"/anotherDir", authString); + mrcClient.createFile(SetupUtils.getMRC1Addr(), testVolume+"/test/test1", authString); + mrcClient.createFile(SetupUtils.getMRC1Addr(), testVolume+"/test/test2", authString); + mrcClient.createFile(SetupUtils.getMRC1Addr(), testVolume+"/anotherDir/test3", authString); + + RandomAccessFile test1 = new RandomAccessFile("r",SetupUtils.getMRC1Addr(),testVolume+"/test/test1",mrcClient.getSpeedy(),authString); + RandomAccessFile test3 = new RandomAccessFile("r",SetupUtils.getMRC1Addr(),testVolume+"/anotherDir/test3",mrcClient.getSpeedy(),authString); + + String fileVolume = mrcClient.stat(SetupUtils.getMRC1Addr(), testVolume+"/test/test2", false, true, false, authString).get("fileId").toString(); + volumeID = fileVolume.substring(0, fileVolume.indexOf(':')); + + + String content = ""; + for (int i = 0; i < 6000; i++) + content = content.concat("Hello World "); + byte[] bytesIn = content.getBytes(); + assertEquals(bytesIn.length, 72000); + + int length = bytesIn.length; + + test1.write(bytesIn, 0, length); + test3.write(bytesIn, 0, 65536); + + mrcClient.shutdown(); + mrcClient.waitForShutdown(); + } +} diff --git a/servers/test/org/xtreemfs/test/osd/ClientLeaseTest.java b/servers/test/org/xtreemfs/test/osd/ClientLeaseTest.java new file mode 100644 index 0000000000000000000000000000000000000000..87376b679c3a0484f0956096dcd260ca1b2132c7 --- /dev/null +++ b/servers/test/org/xtreemfs/test/osd/ClientLeaseTest.java @@ -0,0 +1,335 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin, + Barcelona Supercomputing Center - Centro Nacional de Supercomputacion and + Consiglio Nazionale delle Ricerche. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Björn Kolbeck (ZIB) + */ + +package org.xtreemfs.test.osd; + +import java.io.File; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import junit.framework.TestCase; +import junit.textui.TestRunner; + +import org.xtreemfs.common.Capability; +import org.xtreemfs.common.ClientLease; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.clients.HttpErrorException; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.clients.dir.DIRClient; +import org.xtreemfs.common.clients.osd.OSDClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.striping.Location; +import org.xtreemfs.common.striping.Locations; +import org.xtreemfs.common.striping.RAID0; +import org.xtreemfs.common.striping.StripingPolicy; +import org.xtreemfs.common.util.FSUtils; +import org.xtreemfs.common.uuids.ServiceUUID; +import org.xtreemfs.dir.DIRConfig; +import org.xtreemfs.dir.RequestController; +import org.xtreemfs.osd.OSD; +import org.xtreemfs.osd.OSDConfig; +import org.xtreemfs.test.SetupUtils; + +/** + * Class for testing the NewOSD It uses the old OSDTest tests. It checks if the + * OSD works without replicas neither striping + * + * @author Jesus Malo (jmalo) + */ +public class ClientLeaseTest extends TestCase { + + private final ServiceUUID serverID; + + private final Locations loc; + + private final String file; + + private Capability cap; + + private OSD osd; + + private final long stripeSize = 1; + + private DIRClient dirClient; + + private OSDClient client; + + private final DIRConfig dirConfig; + + private final OSDConfig osdConfig; + + private RequestController dir; + + public ClientLeaseTest(String testName) throws Exception { + super(testName); + + Logging.start(Logging.LEVEL_TRACE); + + dirConfig = SetupUtils.createDIRConfig(); + osdConfig = SetupUtils.createOSD1Config(); + + // It sets the loc attribute + List locations = new ArrayList(1); + StripingPolicy sp = new RAID0(stripeSize, 1); + serverID = SetupUtils.getOSD1UUID(); + List osd = new ArrayList(1); + osd.add(serverID); + locations.add(new Location(sp, osd)); + loc = new Locations(locations); + + file = "1:1"; + cap = new Capability(file, "x", 0, osdConfig.getCapabilitySecret()); + } + + protected void setUp() throws Exception { + + System.out.println("TEST: " + getClass().getSimpleName() + "." + getName()); + + // cleanup + File testDir = new File(SetupUtils.TEST_DIR); + + FSUtils.delTree(testDir); + testDir.mkdirs(); + + dir = new RequestController(dirConfig); + dir.startup(); + + SetupUtils.initTimeSync(); + + dirClient = SetupUtils.createDIRClient(10000); + + osd = new OSD(osdConfig); + client = SetupUtils.createOSDClient(10000); + } + + protected void tearDown() throws Exception { + osd.shutdown(); + dir.shutdown(); + + client.shutdown(); + client.waitForShutdown(); + + if (dirClient != null) { + dirClient.shutdown(); + dirClient.waitForShutdown(); + } + } + + public void testAcquireLease() throws Exception { + + OSDClient c = new OSDClient(dirClient.getSpeedy()); + + ClientLease lease = new ClientLease(file); + lease.setClientId("ABCDEF"); + lease.setOperation(ClientLease.EXCLUSIVE_LEASE); + lease.setFirstObject(0); + lease.setLastObject(ClientLease.TO_EOF); + + RPCResponse>> r = c.acquireClientLease(serverID.getAddress(), loc, cap, lease); + List> tmp = r.get(); + + ClientLease result = ClientLease.parseFromMap(tmp.get(0)); + assertNotNull(result.getClientId()); + assertTrue(result.getSequenceNo() > 0); + assertTrue(result.getExpires() > 0); + } + + public void testConflictingLeases() throws Exception { + + assertNotNull(dirClient); + OSDClient c = new OSDClient(dirClient.getSpeedy()); + + ClientLease lease = new ClientLease(file); + lease.setClientId("ABCDEF"); + lease.setOperation(ClientLease.EXCLUSIVE_LEASE); + lease.setFirstObject(0); + lease.setLastObject(ClientLease.TO_EOF); + + RPCResponse>> r = c.acquireClientLease(serverID.getAddress(), loc, cap, lease); + List> tmp = r.get(); + + ClientLease result = ClientLease.parseFromMap(tmp.get(0)); + assertNotNull(result.getClientId()); + assertTrue(result.getSequenceNo() > 0); + assertTrue(result.getExpires() > 0); + + + lease.setClientId("YXYXYX"); + r = c.acquireClientLease(serverID.getAddress(), loc, cap, lease); + tmp = r.get(); + + result = ClientLease.parseFromMap(tmp.get(0)); + assertNull(result.getClientId()); + + } + + public void testMultipleLeases() throws Exception { + + assertNotNull(dirClient); + OSDClient c = new OSDClient(dirClient.getSpeedy()); + + ClientLease lease = new ClientLease(file); + lease.setClientId("ABCDEF"); + lease.setOperation(ClientLease.EXCLUSIVE_LEASE); + lease.setFirstObject(10); + lease.setLastObject(ClientLease.TO_EOF); + + RPCResponse>> r = c.acquireClientLease(serverID.getAddress(), loc, cap, lease); + List> tmp = r.get(); + + ClientLease result = ClientLease.parseFromMap(tmp.get(0)); + assertNotNull(result.getClientId()); + assertTrue(result.getSequenceNo() > 0); + assertTrue(result.getExpires() > 0); + + + ClientLease lease2 = new ClientLease(file); + lease2.setClientId("ABCDEF"); + lease2.setOperation(ClientLease.EXCLUSIVE_LEASE); + lease2.setFirstObject(0); + lease2.setLastObject(9); + + RPCResponse>> r2 = c.acquireClientLease(serverID.getAddress(), loc, cap, lease2); + tmp = r2.get(); + + result = ClientLease.parseFromMap(tmp.get(0)); + assertNotNull(result.getClientId()); + assertTrue(result.getSequenceNo() > 0); + assertTrue(result.getExpires() > 0); + + } + + public void testReturnLease() throws Exception { + + assertNotNull(dirClient); + OSDClient c = new OSDClient(dirClient.getSpeedy()); + + ClientLease lease = new ClientLease(file); + lease.setClientId("ABCDEF"); + lease.setOperation(ClientLease.EXCLUSIVE_LEASE); + lease.setFirstObject(10); + lease.setLastObject(ClientLease.TO_EOF); + + RPCResponse>> r = c.acquireClientLease(serverID.getAddress(), loc, cap, lease); + List> tmp = r.get(); + + ClientLease result = ClientLease.parseFromMap(tmp.get(0)); + assertNotNull(result.getClientId()); + assertTrue(result.getSequenceNo() > 0); + assertTrue(result.getExpires() > 0); + + //return the lease + try { + RPCResponse r2 = c.returnLease(serverID.getAddress(), loc, cap, result); + r2.waitForResponse(); + } catch (HttpErrorException ex) { + fail("cannot return lease: "+ex); + } + + //try to acquire lease again + r = c.acquireClientLease(serverID.getAddress(), loc, cap, lease); + tmp = r.get(); + + result = ClientLease.parseFromMap(tmp.get(0)); + assertNotNull(result.getClientId()); + assertTrue(result.getSequenceNo() > 0); + assertTrue(result.getExpires() > 0); + + + } + + public void testRenewLease() throws Exception { + + assertNotNull(dirClient); + OSDClient c = new OSDClient(dirClient.getSpeedy()); + + ClientLease lease = new ClientLease(file); + lease.setClientId("ABCDEF"); + lease.setOperation(ClientLease.EXCLUSIVE_LEASE); + lease.setFirstObject(0); + lease.setLastObject(ClientLease.TO_EOF); + + RPCResponse>> r = c.acquireClientLease(serverID.getAddress(), loc, cap, lease); + List> tmp = r.get(); + + ClientLease result = ClientLease.parseFromMap(tmp.get(0)); + assertNotNull(result.getClientId()); + assertTrue(result.getSequenceNo() > 0); + assertTrue(result.getExpires() > 0); + + + r = c.acquireClientLease(serverID.getAddress(), loc, cap, result); + tmp = r.get(); + + result = ClientLease.parseFromMap(tmp.get(0)); + assertNotNull(result.getClientId()); + assertTrue(result.getExpires() > lease.getExpires()); + + } + + public void testTimeout() throws Exception { + + assertNotNull(dirClient); + OSDClient c = new OSDClient(dirClient.getSpeedy()); + + ClientLease lease = new ClientLease(file); + lease.setClientId("ABCDEF"); + lease.setOperation(ClientLease.EXCLUSIVE_LEASE); + lease.setFirstObject(0); + lease.setLastObject(ClientLease.TO_EOF); + + RPCResponse>> r = c.acquireClientLease(serverID.getAddress(), loc, cap, lease); + List> tmp = r.get(); + + ClientLease result = ClientLease.parseFromMap(tmp.get(0)); + assertNotNull(result.getClientId()); + assertTrue(result.getSequenceNo() > 0); + assertTrue(result.getExpires() > 0); + + try { + RPCResponse r2 = c.put(serverID.getAddress(),loc,cap,file,0,ReusableBuffer.wrap("YaggaYagga".getBytes()),result); + r2.waitForResponse(); + } catch (HttpErrorException ex) { + fail(ex.toString()); + } + + result.setExpires(1); + + try { + RPCResponse r2 = c.put(serverID.getAddress(),loc,cap,file,0,ReusableBuffer.wrap("YaggaYagga".getBytes()),result); + r2.waitForResponse(); + fail("lease should be timed out"); + } catch (HttpErrorException ex) { + } + + + } + + public static void main(String[] args) { + TestRunner.run(ClientLeaseTest.class); + } +} diff --git a/servers/test/org/xtreemfs/test/osd/CowPolicyTest.java b/servers/test/org/xtreemfs/test/osd/CowPolicyTest.java new file mode 100644 index 0000000000000000000000000000000000000000..7c2e7811f13cd1195583f20f574c1d82482212eb --- /dev/null +++ b/servers/test/org/xtreemfs/test/osd/CowPolicyTest.java @@ -0,0 +1,94 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package org.xtreemfs.test.osd; + +import junit.framework.TestCase; +import org.xtreemfs.osd.storage.CowPolicy; +import org.xtreemfs.osd.storage.CowPolicy.cowMode; + +/** + * + * @author bjko + */ +public class CowPolicyTest extends TestCase { + + public CowPolicyTest(String testName) { + super(testName); + } + + @Override + protected void setUp() throws Exception { + super.setUp(); + } + + @Override + protected void tearDown() throws Exception { + super.tearDown(); + } + + public void testNoCow() throws Exception { + CowPolicy p = new CowPolicy(cowMode.NO_COW); + assertFalse(p.isCOW(0)); + assertFalse(p.isCOW(10)); + assertFalse(p.isCOW(55)); + p.objectChanged(0); + assertFalse(p.isCOW(0)); + } + + public void testAlwaysCow() throws Exception { + CowPolicy p = new CowPolicy(cowMode.ALWAYS_COW); + assertTrue(p.isCOW(0)); + assertTrue(p.isCOW(10)); + assertTrue(p.isCOW(55)); + p.objectChanged(0); + assertTrue(p.isCOW(0)); + } + + public void testCowOnce() throws Exception { + CowPolicy p = new CowPolicy(cowMode.COW_ONCE,115); + assertTrue(p.isCOW(0)); + assertTrue(p.isCOW(10)); + assertTrue(p.isCOW(55)); + p.objectChanged(0); + //check entrie 8 bits since COW uses byte array for COW_ONCE bitmap + assertFalse(p.isCOW(0)); + assertTrue(p.isCOW(1)); + assertTrue(p.isCOW(2)); + assertTrue(p.isCOW(3)); + assertTrue(p.isCOW(4)); + assertTrue(p.isCOW(5)); + assertTrue(p.isCOW(6)); + assertTrue(p.isCOW(7)); + + p.objectChanged(5); + //check entrie 8 bits since COW uses byte array for COW_ONCE bitmap + assertFalse(p.isCOW(0)); + assertTrue(p.isCOW(1)); + assertTrue(p.isCOW(2)); + assertTrue(p.isCOW(3)); + assertTrue(p.isCOW(4)); + assertFalse(p.isCOW(5)); + assertTrue(p.isCOW(6)); + assertTrue(p.isCOW(7)); + + p.objectChanged(9); + //check entrie 8 bits since COW uses byte array for COW_ONCE bitmap + assertTrue(p.isCOW(8)); + assertFalse(p.isCOW(9)); + assertTrue(p.isCOW(10)); + assertTrue(p.isCOW(11)); + assertTrue(p.isCOW(12)); + assertTrue(p.isCOW(13)); + assertTrue(p.isCOW(14)); + assertTrue(p.isCOW(15)); + + //any new object (here > 114) must return false + assertFalse(p.isCOW(22556)); + p.objectChanged(22556); + assertFalse(p.isCOW(22556)); + } + +} diff --git a/servers/test/org/xtreemfs/test/osd/OSDDataIntegrityTest.java b/servers/test/org/xtreemfs/test/osd/OSDDataIntegrityTest.java new file mode 100644 index 0000000000000000000000000000000000000000..70839222aa243d7f5ee1feef9a97dd876770a50b --- /dev/null +++ b/servers/test/org/xtreemfs/test/osd/OSDDataIntegrityTest.java @@ -0,0 +1,394 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin, + Barcelona Supercomputing Center - Centro Nacional de Supercomputacion and + Consiglio Nazionale delle Ricerche. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Jesús Malo (BSC), Björn Kolbeck (ZIB), + * Eugenio Cesario (CNR) + */ + +package org.xtreemfs.test.osd; + +import java.io.File; +import java.util.ArrayList; +import java.util.List; + +import junit.framework.TestCase; +import junit.textui.TestRunner; + +import org.xtreemfs.common.Capability; +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.clients.dir.DIRClient; +import org.xtreemfs.common.clients.osd.OSDClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.striping.Location; +import org.xtreemfs.common.striping.Locations; +import org.xtreemfs.common.striping.RAID0; +import org.xtreemfs.common.striping.StripingPolicy; +import org.xtreemfs.common.util.FSUtils; +import org.xtreemfs.common.uuids.ServiceUUID; +import org.xtreemfs.dir.DIRConfig; +import org.xtreemfs.dir.RequestController; +import org.xtreemfs.foundation.pinky.HTTPHeaders; +import org.xtreemfs.osd.OSD; +import org.xtreemfs.osd.OSDConfig; +import org.xtreemfs.test.SetupUtils; + +public class OSDDataIntegrityTest extends TestCase { + + private final ServiceUUID serverID; + + private final Locations loc; + + private final String fileId; + + private final Capability cap; + + private final OSDConfig osdConfig; + + private final DIRConfig dirConfig; + + private OSDClient osdClient; + + private DIRClient dirClient; + + private RequestController dir; + + private OSD osdServer; + + public OSDDataIntegrityTest(String testName) throws Exception { + super(testName); + + Logging.start(Logging.LEVEL_DEBUG); + + dirConfig = SetupUtils.createDIRConfig(); + osdConfig = SetupUtils.createOSD1Config(); + serverID = SetupUtils.getOSD1UUID(); + + fileId = "ABCDEF:1"; + cap = new Capability(fileId, "DebugCapability", 0, osdConfig.getCapabilitySecret()); + + List locations = new ArrayList(1); + StripingPolicy sp = new RAID0(2, 1); + List osd = new ArrayList(1); + osd.add(serverID); + locations.add(new Location(sp, osd)); + + loc = new Locations(locations); + } + + protected void setUp() throws Exception { + + System.out.println("TEST: " + getClass().getSimpleName() + "." + getName()); + + // cleanup + File testDir = new File(SetupUtils.TEST_DIR); + + FSUtils.delTree(testDir); + testDir.mkdirs(); + + dir = new org.xtreemfs.dir.RequestController(dirConfig); + dir.startup(); + + dirClient = SetupUtils.initTimeSync(); + + osdServer = new OSD(osdConfig); + + synchronized (this) { + try { + this.wait(50); + } catch (InterruptedException ex) { + ex.printStackTrace(); + } + } + + osdClient = SetupUtils.createOSDClient(10000); + + SetupUtils.setupLocalResolver(); + } + + protected void tearDown() throws Exception { + osdServer.shutdown(); + + if (dirClient != null) { + dirClient.shutdown(); + dirClient.waitForShutdown(); + } + + dir.shutdown(); + osdClient.shutdown(); + osdClient.waitForShutdown(); + } + + public void testWriteRanges() throws Exception { + + // test for obj 1,2,3... + for (int objId = 0; objId < 5; objId++) { + // write half object + ReusableBuffer buf = BufferPool.allocate(1024); + for (int i = 0; i < 1024; i++) + buf.put((byte) 'A'); + RPCResponse r = osdClient.putWithForcedIncrement(serverID.getAddress(), loc, cap, fileId, + objId, 0, buf); + r.waitForResponse(); + + String newFileSize = r.getHeaders().getHeader(HTTPHeaders.HDR_XNEWFILESIZE); + newFileSize = newFileSize.substring(1, newFileSize.indexOf(',')); + long newFS = Long.parseLong(newFileSize); + assertEquals(1024 + (objId) * 2048, newFS); + + r.freeBuffers(); + + // read data + r = osdClient.get(serverID.getAddress(), loc, cap, fileId, objId); + ReusableBuffer data = r.getBody(); + + data.position(0); + assertEquals(1024, data.capacity()); + for (int i = 0; i < 1024; i++) + assertEquals((byte) 'A', data.get()); + r.freeBuffers(); + + // write second half + buf = BufferPool.allocate(1024); + for (int i = 0; i < 1024; i++) + buf.put((byte) 'a'); + r = osdClient.put(serverID.getAddress(), loc, cap, fileId, objId, 1024, buf); + r.waitForResponse(); + r.freeBuffers(); + + // read data + r = osdClient.get(serverID.getAddress(), loc, cap, fileId, objId); + data = r.getBody(); + + data.position(0); + assertEquals(2048, data.capacity()); + for (int i = 0; i < 1024; i++) + assertEquals((byte) 'A', data.get()); + for (int i = 0; i < 1024; i++) + assertEquals((byte) 'a', data.get()); + r.freeBuffers(); + + // write somewhere in the middle + buf = BufferPool.allocate(1024); + for (int i = 0; i < 1024; i++) + buf.put((byte) 'x'); + r = osdClient.putWithForcedIncrement(serverID.getAddress(), loc, cap, fileId, objId, 512, + buf); + r.waitForResponse(); + r.freeBuffers(); + + // read data + r = osdClient.get(serverID.getAddress(), loc, cap, fileId, objId); + data = r.getBody(); + + data.position(0); + assertEquals(2048, data.capacity()); + for (int i = 0; i < 512; i++) + assertEquals((byte) 'A', data.get()); + for (int i = 0; i < 1024; i++) + assertEquals((byte) 'x', data.get()); + for (int i = 0; i < 512; i++) + assertEquals((byte) 'a', data.get()); + + r.freeBuffers(); + } + + } + + public void testReadRanges() throws Exception { + + // test for obj 1,2,3... + for (int objId = 0; objId < 5; objId++) { + // write half object + ReusableBuffer buf = BufferPool.allocate(2048); + for (int i = 0; i < 512; i++) + buf.put((byte) 'A'); + for (int i = 0; i < 512; i++) + buf.put((byte) 'B'); + for (int i = 0; i < 512; i++) + buf.put((byte) 'C'); + for (int i = 0; i < 512; i++) + buf.put((byte) 'D'); + RPCResponse r = osdClient.putWithForcedIncrement(serverID.getAddress(), loc, cap, fileId, + objId, 0, buf); + r.waitForResponse(); + r.freeBuffers(); + + // read data 1st 512 bytes + r = osdClient.get(serverID.getAddress(), loc, cap, fileId, objId, 0, 511); + ReusableBuffer data = r.getBody(); + + data.position(0); + assertEquals(512, data.capacity()); + for (int i = 0; i < 512; i++) + assertEquals((byte) 'A', data.get()); + + r.freeBuffers(); + + r = osdClient.get(serverID.getAddress(), loc, cap, fileId, objId, 1024, 1535); + data = r.getBody(); + + data.position(0); + assertEquals(512, data.capacity()); + for (int i = 0; i < 512; i++) + assertEquals((byte) 'C', data.get()); + + r.freeBuffers(); + } + } + + public void testImplicitTruncateWithinObject() throws Exception { + + // first test implicit truncate through write within a single object + ReusableBuffer buf = BufferPool.allocate(1024); + for (int i = 0; i < 1024; i++) + buf.put((byte) 'A'); + RPCResponse r = osdClient.putWithForcedIncrement(serverID.getAddress(), loc, cap, fileId, 0, + 1024, buf); + r.waitForResponse(); + String newFileSize = r.getHeaders().getHeader(HTTPHeaders.HDR_XNEWFILESIZE); + newFileSize = newFileSize.substring(1, newFileSize.indexOf(',')); + long newFS = Long.parseLong(newFileSize); + assertEquals(2048, newFS); + r.freeBuffers(); + + // read data + r = osdClient.get(serverID.getAddress(), loc, cap, fileId, 0); + ReusableBuffer data = r.getBody(); + + data.position(0); + assertEquals(2048, data.capacity()); + for (int i = 0; i < 1024; i++) + assertEquals((byte) 0, data.get()); + for (int i = 0; i < 1024; i++) + assertEquals((byte) 'A', data.get()); + + r.freeBuffers(); + } + + public void testImplicitTruncate() throws Exception { + + // first test implicit truncate through write within a single object + ReusableBuffer buf = BufferPool.allocate(1024); + for (int i = 0; i < 1024; i++) + buf.put((byte) 'A'); + RPCResponse r = osdClient.putWithForcedIncrement(serverID.getAddress(), loc, cap, fileId, 1, + 1024, buf); + r.waitForResponse(); + r.freeBuffers(); + + // read data + r = osdClient.get(serverID.getAddress(), loc, cap, fileId, 0); + ReusableBuffer data = r.getBody(); + + data.position(0); + assertEquals(2048, data.capacity()); + for (int i = 0; i < 2048; i++) + assertEquals((byte) 0, data.get()); + + r.freeBuffers(); + + // read data + r = osdClient.get(serverID.getAddress(), loc, cap, fileId, 1); + data = r.getBody(); + + data.position(0); + assertEquals(2048, data.capacity()); + for (int i = 0; i < 1024; i++) + assertEquals((byte) 0, data.get()); + for (int i = 0; i < 1024; i++) + assertEquals((byte) 'A', data.get()); + + r.freeBuffers(); + } + + public void testEOF() throws Exception { + + // first test implicit truncate through write within a single object + ReusableBuffer buf = BufferPool.allocate(1023); + for (int i = 0; i < 1023; i++) + buf.put((byte) 'A'); + RPCResponse r = osdClient.putWithForcedIncrement(serverID.getAddress(), loc, cap, fileId, 1, + 1024, buf); + r.waitForResponse(); + r.freeBuffers(); + + // read data + r = osdClient.get(serverID.getAddress(), loc, cap, fileId, 1); + ReusableBuffer data = r.getBody(); + + data.position(0); + assertEquals(2047, data.capacity()); + for (int i = 0; i < 1024; i++) + assertEquals((byte) 0, data.get()); + for (int i = 0; i < 1023; i++) + assertEquals((byte) 'A', data.get()); + + r.freeBuffers(); + + // read non-existing object (EOF) + r = osdClient.get(serverID.getAddress(), loc, cap, fileId, 2); + data = r.getBody(); + assertNull(data); + r.freeBuffers(); + } + + public void testOverlappingWrites() throws Exception { + + // first test implicit truncate through write within a single object + ReusableBuffer buf = BufferPool.allocate(1024); + for (int i = 0; i < 1024; i++) + buf.put((byte) 'A'); + RPCResponse r = osdClient.putWithForcedIncrement(serverID.getAddress(), loc, cap, fileId, 1, + 0, buf); + r.waitForResponse(); + r.freeBuffers(); + + buf = BufferPool.allocate(1024); + for (int i = 0; i < 1024; i++) + buf.put((byte) 'B'); + r = osdClient.putWithForcedIncrement(serverID.getAddress(), loc, cap, fileId, 1, 512, buf); + r.waitForResponse(); + r.freeBuffers(); + + // read data + r = osdClient.get(serverID.getAddress(), loc, cap, fileId, 1); + ReusableBuffer data = r.getBody(); + + data.position(0); + assertEquals(1536, data.capacity()); + for (int i = 0; i < 512; i++) + assertEquals((byte) 'A', data.get()); + for (int i = 0; i < 1024; i++) + assertEquals((byte) 'B', data.get()); + + r.freeBuffers(); + + } + + public static void main(String[] args) { + TestRunner.run(OSDDataIntegrityTest.class); + } + +} diff --git a/servers/test/org/xtreemfs/test/osd/OSDTest.java b/servers/test/org/xtreemfs/test/osd/OSDTest.java new file mode 100644 index 0000000000000000000000000000000000000000..dc6127223dabf2fb8d1d580fb157e797e951bc7f --- /dev/null +++ b/servers/test/org/xtreemfs/test/osd/OSDTest.java @@ -0,0 +1,890 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin, + Barcelona Supercomputing Center - Centro Nacional de Supercomputacion and + Consiglio Nazionale delle Ricerche. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Jesús Malo (BSC), Björn Kolbeck (ZIB), + * Eugenio Cesario (CNR) + */ + +package org.xtreemfs.test.osd; + +import java.io.File; +import java.util.ArrayList; +import java.util.List; + +import junit.framework.TestCase; +import junit.textui.TestRunner; + +import org.xtreemfs.common.Capability; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.clients.HttpErrorException; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.clients.dir.DIRClient; +import org.xtreemfs.common.clients.osd.OSDClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.striping.Location; +import org.xtreemfs.common.striping.Locations; +import org.xtreemfs.common.striping.RAID0; +import org.xtreemfs.common.striping.StripingPolicy; +import org.xtreemfs.common.util.FSUtils; +import org.xtreemfs.common.uuids.ServiceUUID; +import org.xtreemfs.dir.DIRConfig; +import org.xtreemfs.dir.RequestController; +import org.xtreemfs.foundation.pinky.HTTPHeaders; +import org.xtreemfs.foundation.pinky.HTTPUtils; +import org.xtreemfs.osd.OSD; +import org.xtreemfs.osd.OSDConfig; +import org.xtreemfs.test.SetupUtils; + +/** + * Class for testing the NewOSD It uses the old OSDTest tests. It checks if the + * OSD works without replicas neither striping + * + * @author Jesus Malo (jmalo) + */ +public class OSDTest extends TestCase { + + private final ServiceUUID serverID; + + private final Locations loc; + + private final String file; + + private final long objectNumber; + + private final long stripeSize; + + private final DIRConfig dirConfig; + + private final OSDConfig osdConfig; + + private DIRClient dirClient; + + private OSDClient client; + + private RequestController dir; + + private OSD osd; + + private Capability cap; + + public OSDTest(String testName) throws Exception { + super(testName); + + Logging.start(Logging.LEVEL_DEBUG); + + dirConfig = SetupUtils.createDIRConfig(); + osdConfig = SetupUtils.createOSD1Config(); + + stripeSize = 1; + + // It sets the loc attribute + List locations = new ArrayList(1); + StripingPolicy sp = new RAID0(stripeSize, 1); + serverID = SetupUtils.getOSD1UUID(); + List osd = new ArrayList(1); + osd.add(serverID); + locations.add(new Location(sp, osd)); + loc = new Locations(locations); + + file = "1:1"; + objectNumber = 0; + + cap = new Capability(file, "DebugCapability", 0, osdConfig.getCapabilitySecret()); + } + + protected void setUp() throws Exception { + + System.out.println("TEST: " + getClass().getSimpleName() + "." + getName()); + + // cleanup + File testDir = new File(SetupUtils.TEST_DIR); + + FSUtils.delTree(testDir); + testDir.mkdirs(); + + dir = new RequestController(dirConfig); + dir.startup(); + + dirClient = SetupUtils.initTimeSync(); + + osd = new OSD(osdConfig); + client = SetupUtils.createOSDClient(10000); + } + + protected void tearDown() throws Exception { + osd.shutdown(); + dir.shutdown(); + + client.shutdown(); + client.waitForShutdown(); + + if (dirClient != null) { + dirClient.shutdown(); + dirClient.waitForShutdown(); + } + } + + /** + * It tests the get of a whole object + */ + public void testRightGET() throws Exception { + + final String content = "Hello World"; + RPCResponse tmp = client.put(serverID.getAddress(), loc, cap, file, objectNumber, + ReusableBuffer.wrap(content.getBytes())); + tmp.waitForResponse(); + tmp.freeBuffers(); + + RPCResponse answer = client.get(serverID.getAddress(), loc, cap, file, objectNumber); + answer.waitForResponse(); + + assertEquals(HTTPUtils.SC_OKAY, answer.getStatusCode()); + assertEquals(content, new String(answer.getBody().array())); + + answer.freeBuffers(); + + // check object; proper size should be returned + answer = client.checkObject(serverID.getAddress(), loc, cap, file, objectNumber); + assertEquals(String.valueOf(content.length()), answer.get().toString()); + + answer.freeBuffers(); + } + + /** + * It tests the get of a range of bytes + */ + public void testRangeGET() throws Exception { + + final String content = "Hello World"; + RPCResponse tmp = client.put(serverID.getAddress(), loc, cap, file, objectNumber, + ReusableBuffer.wrap(content.getBytes())); + tmp.waitForResponse(); + tmp.freeBuffers(); + + final int firstByte = 2; + final int lastByte = 9; + + RPCResponse answer = client.get(serverID.getAddress(), loc, cap, file, objectNumber, + firstByte, lastByte); + answer.waitForResponse(); + + assertEquals(HTTPUtils.SC_OKAY, answer.getStatusCode()); + assertEquals(content.substring(firstByte, lastByte + 1), new String(answer.getBody() + .array())); + + answer.freeBuffers(); + } + + /** + * It tests the empty get + */ + public void testEmptyGET() throws Exception { + + // create a new file + RPCResponse answer = client.put(serverID.getAddress(), loc, cap, file, objectNumber, + ReusableBuffer.wrap("Hello World".getBytes())); + answer.waitForResponse(); + answer.freeBuffers(); + + cap = new Capability(file, "", 1, osdConfig.getCapabilitySecret()); + + // truncate the file to zero length + answer = client.truncate(serverID.getAddress(), loc, cap, file, 0); + answer.waitForResponse(); + answer.freeBuffers(); + + // get the file content + answer = client.get(serverID.getAddress(), loc, cap, file); + answer.waitForResponse(); + + assertEquals(HTTPUtils.SC_OKAY, answer.getStatusCode()); + assertNull(answer.getBody()); + + answer.freeBuffers(); + } + + /** + * It tests the put of a range + */ + public void testRangePUT() throws Exception { + + final int tamData = 4; + byte[] data = new byte[tamData]; + final int firstByte = 0; + byte[] readByte; + RPCResponse answer; + HTTPHeaders headers; + String newFileSize; + + // It tests the first put. The response will have a new file size header + answer = client.put(serverID.getAddress(), loc, cap, file, objectNumber, firstByte, + ReusableBuffer.wrap(data)); + answer.waitForResponse(); + + headers = answer.getHeaders(); + newFileSize = headers.getHeader(HTTPHeaders.HDR_XNEWFILESIZE); + assertNotNull(newFileSize); + newFileSize = newFileSize.substring(1, newFileSize.indexOf(',')); + assertEquals(tamData, Integer.parseInt(newFileSize)); + answer.freeBuffers(); + + answer = client.get(serverID.getAddress(), loc, cap, file, objectNumber); + answer.waitForResponse(); + readByte = answer.getSpeedyRequest().getResponseBody(); + assertEquals(tamData, readByte.length); + answer.freeBuffers(); + + // It tests a second put, cloned to the previous one. The response won't + // have a new file size header + answer = client.put(serverID.getAddress(), loc, cap, file, objectNumber, firstByte, + ReusableBuffer.wrap(data)); + answer.waitForResponse(); + headers = answer.getHeaders(); + newFileSize = headers.getHeader(HTTPHeaders.HDR_XNEWFILESIZE); + assertNull(newFileSize); + answer.freeBuffers(); + + answer = client.get(serverID.getAddress(), loc, cap, file, objectNumber); + answer.waitForResponse(); + readByte = answer.getSpeedyRequest().getResponseBody(); + assertNotNull(readByte); + assertEquals(tamData, readByte.length); + answer.freeBuffers(); + } + + /** + * It test the put of a whole object. + */ + public void testObjectPUT() throws Exception { + + final int tamData = 1; + byte[] data = new byte[tamData]; + + RPCResponse answer = client.put(serverID.getAddress(), loc, cap, file, objectNumber, + ReusableBuffer.wrap(data)); + answer.waitForResponse(); + HTTPHeaders headers = answer.getHeaders(); + String newFileSize = headers.getHeader(HTTPHeaders.HDR_XNEWFILESIZE); + assertNotNull(newFileSize); + newFileSize = newFileSize.substring(1, newFileSize.indexOf(',')); + assertNotNull(newFileSize); + assertEquals(tamData, Integer.parseInt(newFileSize)); + answer.freeBuffers(); + + answer = client.get(serverID.getAddress(), loc, cap, file, objectNumber); + answer.waitForResponse(); + byte[] readByte = answer.getSpeedyRequest().getResponseBody(); + assertNotNull(readByte); + assertEquals(tamData, readByte.length); + answer.freeBuffers(); + } + + /** + * It tests the deletion of a non-existent file + */ + public void testWrongDELETE() throws Exception { + + // Test + RPCResponse answer = client.delete(serverID.getAddress(), loc, cap, file); + try { + answer.waitForResponse(); + fail("got OK, should have got NOT FOUND"); + } catch (HttpErrorException exc) { + assertEquals(HTTPUtils.SC_USER_EXCEPTION, exc.getStatusCode()); + answer.freeBuffers(); + } + } + + /** + * It tests the deletion of one existing file + */ + public void testObjectDELETE() throws Exception { + + final String content = "Hello World"; + RPCResponse tmp = client.put(serverID.getAddress(), loc, cap, file, objectNumber, + ReusableBuffer.wrap(content.getBytes())); + tmp.waitForResponse(); + tmp.freeBuffers(); + + RPCResponse answer = client.delete(serverID.getAddress(), loc, cap, file); + answer.waitForResponse(); + answer.freeBuffers(); + + // NOTE: Due to the POSIX-compliant delete-on-close semantics, it is not + // possible to draw any conclusions whether the objects have been + // deleted yet or not. A file will not be regarded as closed by an OSD + // as long as it still knows of a capability for the file that has not + // yet expired. + + // try { + // // file should not exist anymore + // answer = client.get(serverID.getAddress(), loc, cap, file); + // answer.waitForResponse(); + // fail(); + // } catch (Exception ex) { + // answer.freeBuffers(); + // } + } + + /** + * It tests the OSD when receives a x-Location where it isn't include + */ + public void testNoOSDLocation() throws Exception { + + List locations = new ArrayList(1); + StripingPolicy sp = new RAID0(1, 1); + List osd = new ArrayList(1); + osd.add(new ServiceUUID("www.google.com:80")); + locations.add(new Location(sp, osd)); + Locations noOSDLoc = new Locations(locations); + + long objectNumber = 0; + final int tamData = 1; + byte[] data = new byte[tamData]; + + // Test + RPCResponse answerGET = null; + try { + answerGET = client.get(serverID.getAddress(), noOSDLoc, cap, file); + answerGET.waitForResponse(); + fail("got 200, should have got 420"); + } catch (HttpErrorException exc) { + assertEquals(HTTPUtils.SC_USER_EXCEPTION, exc.getStatusCode()); + answerGET.freeBuffers(); + } + + RPCResponse answerPUT = null; + try { + answerPUT = client.put(serverID.getAddress(), noOSDLoc, cap, file, objectNumber, + ReusableBuffer.wrap(data)); + answerPUT.waitForResponse(); + fail("got 200, should have got 420"); + } catch (HttpErrorException exc) { + assertEquals(HTTPUtils.SC_USER_EXCEPTION, exc.getStatusCode()); + answerPUT.freeBuffers(); + } + + RPCResponse answerDELETE = null; + try { + answerDELETE = client.delete(serverID.getAddress(), noOSDLoc, cap, file); + answerDELETE.waitForResponse(); + fail("got 200, should have got 420"); + } catch (HttpErrorException exc) { + assertEquals(HTTPUtils.SC_USER_EXCEPTION, exc.getStatusCode()); + answerDELETE.freeBuffers(); + } + } + + /** + * It tests the OSD when receives a request for an object which the OSD + * isn't responsible for + */ + public void testNoOSDResponsible() throws Exception { + + List locations = new ArrayList(1); + StripingPolicy sp = new RAID0(1, 2); + List osd = new ArrayList(1); + osd.add(serverID); + osd.add(new ServiceUUID("www.google.com")); + locations.add(new Location(sp, osd)); + Locations noOSDLoc = new Locations(locations); + + long objectNumber = 1; + + // Test + RPCResponse answerGET = null; + try { + answerGET = client.get(serverID.getAddress(), noOSDLoc, cap, file, objectNumber); + answerGET.waitForResponse(); + fail("got 200, should have got 500"); + } catch (HttpErrorException exc) { + assertEquals(HTTPUtils.SC_USER_EXCEPTION, exc.getStatusCode()); + answerGET.freeBuffers(); + } + + } + + /** + * It tests if wrong GET requests with low level errors are correctly + * processed + */ + public void testLowLevelGetRequests() throws Exception { + + String token = "GET"; + HTTPHeaders headers = new HTTPHeaders(); + headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString()); + headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString().asString()); + headers.addHeader(HTTPHeaders.HDR_XOBJECTNUMBER, Long.toString(objectNumber)); + headers.addHeader(HTTPHeaders.HDR_CONTENT_LENGTH, Long.toString(0)); + + RPCResponse answer = null; + + // Bad Content-Range tests + // Empty string + try { + headers.setHeader(HTTPHeaders.HDR_CONTENT_RANGE, new String("")); + answer = client.sendRPC(serverID.getAddress(), token, null, file, headers); + answer.waitForResponse(); + fail("got 200, should have got 420"); + } catch (HttpErrorException exc) { + assertEquals(HTTPUtils.SC_USER_EXCEPTION, exc.getStatusCode()); + } finally { + answer.freeBuffers(); + } + + // Totally wrong string + try { + headers.setHeader(HTTPHeaders.HDR_CONTENT_RANGE, new String("qwerty1234567890")); + answer = client.sendRPC(serverID.getAddress(), token, null, file, headers); + answer.waitForResponse(); + fail("got 200, should have got 420"); + } catch (HttpErrorException exc) { + assertEquals(HTTPUtils.SC_USER_EXCEPTION, exc.getStatusCode()); + } finally { + answer.freeBuffers(); + } + + // Incomplete string + try { + headers.setHeader(HTTPHeaders.HDR_CONTENT_RANGE, new String("bytes")); + answer = client.sendRPC(serverID.getAddress(), token, null, file, headers); + answer.waitForResponse(); + fail("got 200, should have got 420"); + } catch (HttpErrorException exc) { + assertEquals(HTTPUtils.SC_USER_EXCEPTION, exc.getStatusCode()); + } finally { + answer.freeBuffers(); + } + + // Incomplete string + try { + headers.setHeader(HTTPHeaders.HDR_CONTENT_RANGE, new String("bytes 0")); + answer = client.sendRPC(serverID.getAddress(), token, null, file, headers); + answer.waitForResponse(); + fail("got 200, should have got 420"); + } catch (HttpErrorException exc) { + assertEquals(HTTPUtils.SC_USER_EXCEPTION, exc.getStatusCode()); + } finally { + answer.freeBuffers(); + } + + // Incomplete string + try { + headers.setHeader(HTTPHeaders.HDR_CONTENT_RANGE, new String("bytes 0-")); + answer = client.sendRPC(serverID.getAddress(), token, null, file, headers); + answer.waitForResponse(); + fail("got 200, should have got 420"); + } catch (HttpErrorException exc) { + assertEquals(HTTPUtils.SC_USER_EXCEPTION, exc.getStatusCode()); + } finally { + answer.freeBuffers(); + } + + // Incomplete string + try { + headers.setHeader(HTTPHeaders.HDR_CONTENT_RANGE, new String("bytes 0-0")); + answer = client.sendRPC(serverID.getAddress(), token, null, file, headers); + answer.waitForResponse(); + fail("got 200, should have got 420"); + } catch (HttpErrorException exc) { + assertEquals(HTTPUtils.SC_USER_EXCEPTION, exc.getStatusCode()); + } finally { + answer.freeBuffers(); + } + + // Incomplete string + try { + headers.setHeader(HTTPHeaders.HDR_CONTENT_RANGE, new String("bytes 0-0/")); + answer = client.sendRPC(serverID.getAddress(), token, null, file, headers); + answer.waitForResponse(); + fail("got 200, should have got 420"); + } catch (HttpErrorException exc) { + assertEquals(HTTPUtils.SC_USER_EXCEPTION, exc.getStatusCode()); + } finally { + answer.freeBuffers(); + } + + // Wrong string at end + try { + headers.setHeader(HTTPHeaders.HDR_CONTENT_RANGE, new String("bytes 0-0/1")); + answer = client.sendRPC(serverID.getAddress(), token, null, file, headers); + answer.waitForResponse(); + fail("got 200, should have got 420"); + } catch (HttpErrorException exc) { + assertEquals(HTTPUtils.SC_USER_EXCEPTION, exc.getStatusCode()); + } finally { + answer.freeBuffers(); + } + + // Wrong string at end + try { + headers.setHeader(HTTPHeaders.HDR_CONTENT_RANGE, new String("bytes 0-0/1*")); + answer = client.sendRPC(serverID.getAddress(), token, null, file, headers); + answer.waitForResponse(); + fail("got 200, should have got 420"); + } catch (HttpErrorException exc) { + assertEquals(HTTPUtils.SC_USER_EXCEPTION, exc.getStatusCode()); + } finally { + answer.freeBuffers(); + } + + // Wrong string: there are no blank + try { + headers.setHeader(HTTPHeaders.HDR_CONTENT_RANGE, new String("bytes0-0/*")); + answer = client.sendRPC(serverID.getAddress(), token, null, file, headers); + answer.waitForResponse(); + fail("got 200, should have got 420"); + } catch (HttpErrorException exc) { + assertEquals(HTTPUtils.SC_USER_EXCEPTION, exc.getStatusCode()); + } finally { + answer.freeBuffers(); + } + + // Semantically wrong string + try { + headers.setHeader(HTTPHeaders.HDR_CONTENT_RANGE, new String("bytes 1-0/*")); + answer = client.sendRPC(serverID.getAddress(), token, null, file, headers); + answer.waitForResponse(); + fail("got 200, should have got 420"); + } catch (HttpErrorException exc) { + assertEquals(HTTPUtils.SC_USER_EXCEPTION, exc.getStatusCode()); + } finally { + answer.freeBuffers(); + } + + // Wrong string + try { + headers.setHeader(HTTPHeaders.HDR_CONTENT_RANGE, new String("bytes A-/*")); + answer = client.sendRPC(serverID.getAddress(), token, null, file, headers); + answer.waitForResponse(); + fail("got 200, should have got 420"); + } catch (HttpErrorException exc) { + assertEquals(HTTPUtils.SC_USER_EXCEPTION, exc.getStatusCode()); + } finally { + answer.freeBuffers(); + } + + // Wrong string + try { + headers.setHeader(HTTPHeaders.HDR_CONTENT_RANGE, new String("bytes $-$/*")); + answer = client.sendRPC(serverID.getAddress(), token, null, file, headers); + answer.waitForResponse(); + fail("got 200, should have got 420"); + } catch (HttpErrorException exc) { + assertEquals(HTTPUtils.SC_USER_EXCEPTION, exc.getStatusCode()); + } finally { + answer.freeBuffers(); + } + + // Wrong string + try { + headers.setHeader(HTTPHeaders.HDR_CONTENT_RANGE, new String("bytes 1.1-4/*")); + answer = client.sendRPC(serverID.getAddress(), token, null, file, headers); + answer.waitForResponse(); + fail("got 200, should have got 420"); + } catch (HttpErrorException exc) { + assertEquals(HTTPUtils.SC_USER_EXCEPTION, exc.getStatusCode()); + } finally { + answer.freeBuffers(); + } + + // Semantically wrong string with no limits values + try { + headers.setHeader(HTTPHeaders.HDR_CONTENT_RANGE, new String("bytes 4321-1234/*")); + answer = client.sendRPC(serverID.getAddress(), token, null, file, headers); + answer.waitForResponse(); + fail("got 200, should have got 420"); + } catch (HttpErrorException exc) { + assertEquals(HTTPUtils.SC_USER_EXCEPTION, exc.getStatusCode()); + } finally { + answer.freeBuffers(); + } + } + + /** + * It tests the truncate method + */ + public void testTruncate() throws Exception { + + final byte[] data = new String("abcdefghij").getBytes(); + final Long testSize = new Long(data.length); + + final String badFile = "2:2"; + + // Testing + RPCResponse answer = null; + + // Bad location -> Inexistent OSD + { + List locations = new ArrayList(1); + StripingPolicy sp = new RAID0(1, 1); + ServiceUUID google = new ServiceUUID("www.google.com:80"); + List osd2 = new ArrayList(1); + osd2.add(google); + locations.add(new Location(sp, osd2)); + Locations noOSDLoc = new Locations(locations); + + try { + answer = client.truncate(serverID.getAddress(), noOSDLoc, cap, badFile, testSize); + answer.waitForResponse(); + fail("got 200, should have got 420"); + } catch (HttpErrorException exc) { + assertEquals(HTTPUtils.SC_USER_EXCEPTION, exc.getStatusCode()); + answer.freeBuffers(); + } + } + + // It generates a file of testSize bytes which will be truncated several + // times + Logging.logMessage(Logging.LEVEL_DEBUG, this, loc.toString()); + answer = client.put(serverID.getAddress(), loc, cap, file, objectNumber, ReusableBuffer + .wrap(data)); + answer.waitForResponse(); + answer.freeBuffers(); + + // Test sizes + Long[] testSizes = { new Long(testSize + 1), new Long(testSize), new Long(2 * testSize), + new Long(0) }; + int epoch = 0; + for (Long size : testSizes) { + epoch++; + + Capability newCap = new Capability(file, "DebugCapability", epoch, osdConfig + .getCapabilitySecret()); + answer = client.truncate(serverID.getAddress(), loc, newCap, file, size); + answer.waitForResponse(); + String newFileSize = answer.getHeaders().getHeader(HTTPHeaders.HDR_XNEWFILESIZE); + assertNotNull(newFileSize); + newFileSize = newFileSize.substring(1, newFileSize.indexOf(',')); + assertEquals(size, new Long(newFileSize)); + answer.freeBuffers(); + + answer = client.get(serverID.getAddress(), loc, cap, file); + answer.waitForResponse(); + answer.freeBuffers(); + } + } + + /** + * It tests truncate mixed with read and write operations. + */ + public void testTruncateWithPutAndGet() throws Exception { + + final byte[] data = new String("abcdefghij").getBytes(); + final long kb = 1024 * stripeSize; + + final long[] testSizes = { 0, 1, 2, kb - 1, kb, kb + 1, 2 * kb - 1, 2 * kb, 2 * kb + 1 }; + + int epoch = 0; + Capability oldCap = cap; + for (long i : testSizes) { + epoch++; + + Capability newCap = new Capability(file, "DebugCapability", epoch, osdConfig + .getCapabilitySecret()); + + // append data to the file + RPCResponse answer = client.put(serverID.getAddress(), loc, oldCap, file, objectNumber, + ReusableBuffer.wrap(data)); + answer.waitForResponse(); + + HTTPHeaders headers = answer.getHeaders(); + String newFileSize = headers.getHeader(HTTPHeaders.HDR_XNEWFILESIZE); + assertNotNull(newFileSize); + newFileSize = newFileSize.substring(1, newFileSize.indexOf(',')); + assertEquals(data.length, Integer.parseInt(newFileSize)); + answer.freeBuffers(); + + // truncate the file + RPCResponse answer2 = client.truncate(serverID.getAddress(), loc, newCap, file, i); + answer2.waitForResponse(); + HTTPHeaders headers2 = answer2.getHeaders(); + newFileSize = headers2.getHeader(HTTPHeaders.HDR_XNEWFILESIZE); + assertNotNull(newFileSize); + newFileSize = newFileSize.substring(1, newFileSize.indexOf(',')); + assertEquals(i, Long.parseLong(newFileSize)); + answer2.freeBuffers(); + + // read the last byte + if (i > 0) { + RPCResponse answer3 = client.get(serverID.getAddress(), loc, cap, file, (i - 1) + / kb, (i - 1) % kb, (i - 1) % kb); + answer3.waitForResponse(); + HTTPHeaders headers3 = answer3.getHeaders(); + String gotFileSize = headers3.getHeader(HTTPHeaders.HDR_CONTENT_LENGTH); + assertNotNull(gotFileSize); + assertEquals(1, Long.parseLong(gotFileSize)); + answer3.freeBuffers(); + } + + // truncate the file to zero length + newCap = new Capability(file, "DebugCapability", ++epoch, osdConfig + .getCapabilitySecret()); + RPCResponse answer4 = client.truncate(serverID.getAddress(), loc, newCap, file, 0); + answer4.waitForResponse(); + answer4.freeBuffers(); + + oldCap = newCap; + } + } + + /** + * It tests the operations over files with holes + */ + public void testHoles() throws Exception { + + long epoch = 0; + + final byte[] data = new String("abcdefghij").getBytes(); + final long kb = 1024 * stripeSize; + + // Write object 1 and read object 0 + { + RPCResponse answerW = client.put(serverID.getAddress(), loc, cap, file, 1, + ReusableBuffer.wrap(data)); + answerW.waitForResponse(); + assertEquals(HTTPUtils.SC_OKAY, answerW.getStatusCode()); + HTTPHeaders headersW = answerW.getHeaders(); + String newFileSize = headersW.getHeader(HTTPHeaders.HDR_XNEWFILESIZE); + assertNotNull(newFileSize); + newFileSize = newFileSize.substring(1, newFileSize.indexOf(',')); + assertEquals(kb + data.length, Integer.parseInt(newFileSize)); + answerW.freeBuffers(); + + RPCResponse answerR = client.get(serverID.getAddress(), loc, cap, file, 0); + answerR.waitForResponse(); + HTTPHeaders headersR = answerR.getHeaders(); + String dataSize = headersR.getHeader(HTTPHeaders.HDR_CONTENT_LENGTH); + assertEquals(HTTPUtils.SC_OKAY, answerR.getStatusCode()); + assertNotNull(dataSize); + assertEquals(kb, Long.parseLong(dataSize)); + answerR.freeBuffers(); + + // @todo Check the data zeroed + + // truncate the file to zero length + cap = new Capability(file, "DebugCapability", ++epoch, osdConfig.getCapabilitySecret()); + RPCResponse answerT = client.truncate(serverID.getAddress(), loc, cap, file, 0); + answerT.waitForResponse(); + answerT.freeBuffers(); + } + + // Write objecs 0 and 2 and read object 1 + { + RPCResponse answerW = client.put(serverID.getAddress(), loc, cap, file, 0, + ReusableBuffer.wrap(data)); + answerW.waitForResponse(); + assertEquals(HTTPUtils.SC_OKAY, answerW.getStatusCode()); + HTTPHeaders headersW = answerW.getHeaders(); + String newFileSize = headersW.getHeader(HTTPHeaders.HDR_XNEWFILESIZE); + assertNotNull(newFileSize); + newFileSize = newFileSize.substring(1, newFileSize.indexOf(',')); + assertEquals(data.length, Integer.parseInt(newFileSize)); + answerW.freeBuffers(); + + RPCResponse answerW2 = client.put(serverID.getAddress(), loc, cap, file, 2, + ReusableBuffer.wrap(data)); + answerW2.waitForResponse(); + assertEquals(HTTPUtils.SC_OKAY, answerW2.getStatusCode()); + HTTPHeaders headersW2 = answerW2.getHeaders(); + String newFileSize2 = headersW2.getHeader(HTTPHeaders.HDR_XNEWFILESIZE); + assertNotNull(newFileSize2); + newFileSize2 = newFileSize2.substring(1, newFileSize2.indexOf(',')); + assertEquals(2 * kb + data.length, Integer.parseInt(newFileSize2)); + answerW2.freeBuffers(); + + RPCResponse answerR = client.get(serverID.getAddress(), loc, cap, file, 1); + answerR.waitForResponse(); + HTTPHeaders headersR = answerR.getHeaders(); + String dataSize = headersR.getHeader(HTTPHeaders.HDR_CONTENT_LENGTH); + assertEquals(HTTPUtils.SC_OKAY, answerR.getStatusCode()); + assertNotNull(dataSize); + assertEquals(kb, Long.parseLong(dataSize)); + answerR.freeBuffers(); + + // @todo Check the data zeroed + + // truncate the file to zero length + cap = new Capability(file, "DebugCapability", ++epoch, osdConfig.getCapabilitySecret()); + RPCResponse answerT = client.truncate(serverID.getAddress(), loc, cap, file, 0); + answerT.waitForResponse(); + answerT.freeBuffers(); + } + + // Write objecs 0 and truncate extending to object 1 and read object 1. + // It also tries to read object 2, but it should fail + { + RPCResponse answerW = client.put(serverID.getAddress(), loc, cap, file, 0, + ReusableBuffer.wrap(data)); + answerW.waitForResponse(); + assertEquals(HTTPUtils.SC_OKAY, answerW.getStatusCode()); + HTTPHeaders headersW = answerW.getHeaders(); + String newFileSize = headersW.getHeader(HTTPHeaders.HDR_XNEWFILESIZE); + assertNotNull(newFileSize); + newFileSize = newFileSize.substring(1, newFileSize.indexOf(',')); + assertEquals(data.length, Integer.parseInt(newFileSize)); + answerW.freeBuffers(); + + cap = new Capability(file, "DebugCapability", ++epoch, osdConfig.getCapabilitySecret()); + RPCResponse answerT = client.truncate(serverID.getAddress(), loc, cap, file, 2 * kb); + answerT.waitForResponse(); + assertEquals(HTTPUtils.SC_OKAY, answerT.getStatusCode()); + HTTPHeaders headersT = answerT.getHeaders(); + String newFileSizeT = headersT.getHeader(HTTPHeaders.HDR_XNEWFILESIZE); + assertNotNull(newFileSizeT); + newFileSizeT = newFileSizeT.substring(1, newFileSizeT.indexOf(',')); + assertEquals(2 * kb, Long.parseLong(newFileSizeT)); + answerT.freeBuffers(); + + RPCResponse answerR = client.get(serverID.getAddress(), loc, cap, file, 1); + answerR.waitForResponse(); + HTTPHeaders headersR = answerR.getHeaders(); + String dataSize = headersR.getHeader(HTTPHeaders.HDR_CONTENT_LENGTH); + assertEquals(HTTPUtils.SC_OKAY, answerR.getStatusCode()); + assertNotNull(dataSize); + assertEquals(kb, Long.parseLong(dataSize)); + answerR.freeBuffers(); + + RPCResponse answerR2 = client.get(serverID.getAddress(), loc, cap, file, 2); + answerR2.waitForResponse(); + HTTPHeaders headersR2 = answerR2.getHeaders(); + String dataSize2 = headersR2.getHeader(HTTPHeaders.HDR_CONTENT_LENGTH); + assertEquals(HTTPUtils.SC_OKAY, answerR2.getStatusCode()); + assertNotNull(dataSize2); + assertEquals(0, Long.parseLong(dataSize2)); + answerR2.freeBuffers(); + + // @todo Check the data zeroed + + // truncate the file to zero length + cap = new Capability(file, "DebugCapability", ++epoch, osdConfig.getCapabilitySecret()); + RPCResponse answerTr = client.truncate(serverID.getAddress(), loc, cap, file, 0); + answerTr.waitForResponse(); + answerTr.freeBuffers(); + } + } + + public static void main(String[] args) { + TestRunner.run(OSDTest.class); + } +} diff --git a/servers/test/org/xtreemfs/test/osd/ParserStageTest.java b/servers/test/org/xtreemfs/test/osd/ParserStageTest.java new file mode 100644 index 0000000000000000000000000000000000000000..60efe17c5ffdd8b955b1bd2d0ad8b06d4676063e --- /dev/null +++ b/servers/test/org/xtreemfs/test/osd/ParserStageTest.java @@ -0,0 +1,954 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin, + Barcelona Supercomputing Center - Centro Nacional de Supercomputacion and + Consiglio Nazionale delle Ricerche. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Jesús Malo (BSC), Björn Kolbeck (ZIB), + * Eugenio Cesario (CNR) + */ + +package org.xtreemfs.test.osd; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.List; + +import junit.framework.TestCase; +import junit.textui.TestRunner; + +import org.xtreemfs.common.Capability; +import org.xtreemfs.common.Request; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.clients.dir.DIRClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.striping.Location; +import org.xtreemfs.common.striping.Locations; +import org.xtreemfs.common.striping.RAID0; +import org.xtreemfs.common.striping.StripingPolicy; +import org.xtreemfs.common.uuids.ServiceUUID; +import org.xtreemfs.common.uuids.UUIDResolver; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.json.JSONString; +import org.xtreemfs.foundation.pinky.HTTPHeaders; +import org.xtreemfs.foundation.pinky.HTTPUtils; +import org.xtreemfs.foundation.pinky.PinkyRequest; +import org.xtreemfs.foundation.speedy.SpeedyRequest; +import org.xtreemfs.osd.ErrorCodes; +import org.xtreemfs.osd.OSDConfig; +import org.xtreemfs.osd.OSDRequest; +import org.xtreemfs.osd.RequestDispatcher; +import org.xtreemfs.osd.ErrorRecord.ErrorClass; +import org.xtreemfs.osd.ops.Operation; +import org.xtreemfs.osd.stages.ParserStage; +import org.xtreemfs.osd.stages.Stage; +import org.xtreemfs.osd.stages.StageStatistics; +import org.xtreemfs.test.SetupUtils; + +/** + * This class implements the tests for the ParserStage + * + * @author jmalo + */ +public class ParserStageTest extends TestCase { + + ServiceUUID osdId; + + ServiceUUID wrongOSDId, unavailOSD; + + private final long stripeSize; + + private Locations loc; + + private Locations wrongLoc; + + private Capability cap; + + private String file; + + ParserStage stage; + + TestRequestDispatcher master; + + boolean finished; + + /** Creates a new instance of ParserStageTest */ + public ParserStageTest(String testName) throws Exception { + super(testName); + + Logging.start(Logging.LEVEL_DEBUG); + + OSDConfig config = SetupUtils.createOSD1Config(); + master = new TestRequestDispatcher(config); + + osdId = config.getUUID(); + + stripeSize = 1; + + wrongOSDId = new ServiceUUID("www.google.es"); + + unavailOSD = new ServiceUUID("osdX-uuid"); + + stage = new ParserStage(master); + + // It sets the always required objects + file = new String("1:1"); + + List locations = new ArrayList(1); + List wrongLocations = new ArrayList(1); + StripingPolicy sp1 = new RAID0(stripeSize, 1); + StripingPolicy sp2 = new RAID0(stripeSize, 2); + List osd = new ArrayList(1); + List wrongOsd = new ArrayList(1); + osd.add(osdId); + osd.add(unavailOSD); + wrongOsd.add(wrongOSDId); + locations.add(new Location(sp2, osd)); + wrongLocations.add(new Location(sp1, wrongOsd)); + loc = new Locations(locations); + wrongLoc = new Locations(wrongLocations); + + cap = new Capability(file, "read", 0, config.getCapabilitySecret()); + } + + protected void setUp() throws Exception { + System.out.println("TEST: " + getClass().getSimpleName() + "." + getName()); + stage.start(); + + SetupUtils.setupLocalResolver(); + UUIDResolver.addLocalMapping("osdX-uuid", 45454, SetupUtils.SSL_ON); + } + + protected void tearDown() throws Exception { + stage.shutdown(); + stage.waitForShutdown(); + } + + public void testParseGetRequest() throws Exception { + + OSDRequest rq = new OSDRequest(0); + HTTPHeaders headers = new HTTPHeaders(); + headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString()); + headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, "[[], 1,\"lazy\"]"); + String uri = null; + if (file != null) + uri = file; + rq.setPinkyRequest(new PinkyRequest(HTTPUtils.GET_TOKEN, uri, null, headers)); + + finished = false; + stage.enqueueOperation(rq, ParserStage.STAGEOP_PARSE, null); + synchronized (this) { + while (!finished) + wait(); + } + + // expected: X-Loc cache miss + assertEquals(ErrorClass.USER_EXCEPTION, rq.getError().getErrorClass()); + assertEquals(ErrorCodes.NEED_FULL_XLOC, rq.getError().getErrorCode()); + + rq = new OSDRequest(0); + rq.setPinkyRequest(generateGetRequest(loc, cap, file)); + + finished = false; + stage.enqueueOperation(rq, ParserStage.STAGEOP_PARSE, null); + synchronized (this) { + while (!finished) + wait(); + } + + // expected: parsing successful + System.out.println(rq.getError()); + assertNull(rq.getError()); + assertEquals(OSDRequest.Type.READ, rq.getType()); + assertEquals(file, rq.getDetails().getFileId()); + assertEquals(loc, rq.getDetails().getLocationList()); + assertEquals(cap.toString(), rq.getDetails().getCapability().toString()); + assertEquals(loc.getLocation(osdId), rq.getDetails().getCurrentReplica()); + assertEquals(loc.getLocation(osdId).getStripingPolicy(), rq.getDetails() + .getCurrentReplica().getStripingPolicy()); + + // Wrong use cases + List erroneousRequests = new ArrayList(); + erroneousRequests.add(generateGetRequest(loc, cap, null)); + erroneousRequests.add(generateGetRequest(wrongLoc, cap, file + "4")); + + checkFailureCases(erroneousRequests); + } + + // /** + // * It tests the parsing of ReadWhole requests + // */ + // public void testReadWhole() throws Exception { + // + // long objectNumber = 0; + // + // // Right use case + // { + // Request rq = new Request(generateReadWholeRequest(loc + // .getSummarized(), cap, file, objectNumber)); + // + // stage.enqueueRequest(rq); + // waitParsedRequest(); + // + // assertEquals(Request.Status.X_LOC_CACHEMISS, rq.getStatus()); + // + // rq = new Request(generateReadWholeRequest(loc, cap, file, + // objectNumber)); + // + // stage.enqueueRequest(rq); + // waitParsedRequest(); + // + // assertEquals(Request.Status.PARSED, rq.getStatus()); + // assertTrue(rq.getOperation() instanceof ReadWhole); + // assertEquals(OperationType.READ, rq.getOSDOperation().getOpType()); + // assertEquals(OperationSubType.WHOLE, rq.getOSDOperation() + // .getOpSubType()); + // assertEquals(file, rq.getFileId()); + // assertEquals(loc, rq.getLocations()); + // assertEquals(cap.toString(), rq.getCapability().toString()); + // assertEquals(loc.getLocation(osdId), rq.getLocation()); + // assertEquals(loc.getLocation(osdId).getStripingPolicy(), rq + // .getPolicy()); + // assertEquals(objectNumber, rq.getObjectNo()); + // } + // + // // Wrong use cases + // List wrongOnes = new ArrayList(); + // wrongOnes.add(generateReadWholeRequest(loc, cap, null, objectNumber)); + // wrongOnes.add(generateReadWholeRequest(loc, null, file + "2", + // objectNumber)); + // wrongOnes.add(generateReadWholeRequest(null, cap, file + "3", + // objectNumber)); + // wrongOnes.add(generateReadWholeRequest(wrongLoc, cap, file + "4", + // objectNumber)); + // wrongOnes.add(generateReadWholeRequest(loc, cap, file + "5", + // objectNumber + 1)); + // + // checkWrongCases(wrongOnes); + // } + // + // /** + // * It tests the parsing of ReadRange requests + // */ + // public void testReadRange() throws Exception { + // + // long objectNumber = 0; + // long[] range = { 0, 1 }; + // + // // Right use case + // { + // Request rq = new Request(generateReadRangeRequest(loc + // .getSummarized(), cap, file, objectNumber, range)); + // + // stage.enqueueRequest(rq); + // waitParsedRequest(); + // + // assertEquals(Request.Status.X_LOC_CACHEMISS, rq.getStatus()); + // + // rq = new Request(generateReadRangeRequest(loc, cap, file, + // objectNumber, range)); + // + // stage.enqueueRequest(rq); + // waitParsedRequest(); + // + // assertEquals(Request.Status.PARSED, rq.getStatus()); + // assertTrue(rq.getOperation() instanceof ReadRange); + // assertEquals(OperationType.READ, rq.getOSDOperation().getOpType()); + // assertEquals(OperationSubType.RANGE, rq.getOSDOperation() + // .getOpSubType()); + // assertEquals(file, rq.getFileId()); + // assertEquals(loc, rq.getLocations()); + // assertEquals(cap.toString(), rq.getCapability().toString()); + // assertEquals(loc.getLocation(osdId), rq.getLocation()); + // assertEquals(loc.getLocation(osdId).getStripingPolicy(), rq + // .getPolicy()); + // assertEquals(objectNumber, rq.getObjectNo()); + // assertEquals(range[0], rq.getByteRange()[0]); + // assertEquals(range[1], rq.getByteRange()[1]); + // } + // + // // Wrong use cases + // List wrongOnes = new ArrayList(); + // wrongOnes.add(generateReadRangeRequest(loc, cap, null, objectNumber, + // range)); + // wrongOnes.add(generateReadRangeRequest(loc, null, file + "2", + // objectNumber, range)); + // wrongOnes.add(generateReadRangeRequest(null, cap, file + "3", + // objectNumber, range)); + // wrongOnes.add(generateReadRangeRequest(wrongLoc, cap, file + "4", + // objectNumber, range)); + // wrongOnes.add(generateReadRangeRequest(loc, cap, file + "5", + // objectNumber + 1, range)); + // long[][] wrongRanges = { { 1, 0 }, { -1, 0 }, { 0, -1 }, { -1, -1 }, + // { 0, 1 + Integer.MAX_VALUE } }; + // for (long[] w : wrongRanges) { + // wrongOnes.add(generateReadRangeRequest(loc, cap, file + "6", + // objectNumber, w)); + // } + // + // checkWrongCases(wrongOnes); + // } + // + // /** + // * It tests the parsing of WriteWhole requests + // */ + // public void testWriteWhole() throws Exception { + // + // long objectNumber = 0; + // byte[] data = new byte[2]; + // data[0] = 'A'; + // data[1] = 'Z'; + // + // // Right use case + // { + // Request rq = new Request(generateWriteWholeRequest(loc + // .getSummarized(), cap, file, objectNumber, data)); + // + // stage.enqueueRequest(rq); + // waitParsedRequest(); + // + // assertEquals(Request.Status.X_LOC_CACHEMISS, rq.getStatus()); + // + // rq = new Request(generateWriteWholeRequest(loc, cap, file, + // objectNumber, data)); + // + // stage.enqueueRequest(rq); + // waitParsedRequest(); + // + // assertEquals(Request.Status.PARSED, rq.getStatus()); + // assertTrue(rq.getOperation() instanceof WriteWhole); + // assertEquals(OperationType.WRITE, rq.getOSDOperation().getOpType()); + // assertEquals(OperationSubType.WHOLE, rq.getOSDOperation() + // .getOpSubType()); + // assertEquals(file, rq.getFileId()); + // assertEquals(loc, rq.getLocations()); + // assertEquals(cap.toString(), rq.getCapability().toString()); + // assertEquals(loc.getLocation(osdId), rq.getLocation()); + // assertEquals(loc.getLocation(osdId).getStripingPolicy(), rq + // .getPolicy()); + // assertEquals(objectNumber, rq.getObjectNo()); + // } + // + // // Wrong use cases + // List wrongOnes = new ArrayList(); + // wrongOnes.add(generateWriteWholeRequest(loc, cap, null, objectNumber, + // data)); + // wrongOnes.add(generateWriteWholeRequest(loc, null, file + "2", + // objectNumber, data)); + // wrongOnes.add(generateWriteWholeRequest(null, cap, file + "3", + // objectNumber, data)); + // wrongOnes.add(generateWriteWholeRequest(loc, cap, file + "4", null, + // data)); + // wrongOnes.add(generateWriteWholeRequest(loc, cap, file + "5", + // objectNumber, null)); + // wrongOnes.add(generateWriteWholeRequest(wrongLoc, cap, file + "6", + // objectNumber, data)); + // wrongOnes.add(generateWriteWholeRequest(loc, cap, file + "7", + // objectNumber + 1, data)); + // + // checkWrongCases(wrongOnes); + // } + // + // /** + // * It tests the parsing of WriteRange requests + // */ + // public void testWriteRange() throws Exception { + // + // long objectNumber = 0; + // long[] range = { 0, 1 }; + // byte[] data = new byte[2]; + // data[0] = 'A'; + // data[1] = 'Z'; + // + // // Right use case + // { + // Request rq = new Request(generateWriteRangeRequest(loc + // .getSummarized(), cap, file, objectNumber, range, data)); + // + // stage.enqueueRequest(rq); + // waitParsedRequest(); + // + // assertEquals(Request.Status.X_LOC_CACHEMISS, rq.getStatus()); + // + // rq = new Request(generateWriteRangeRequest(loc, cap, file, + // objectNumber, range, data)); + // + // stage.enqueueRequest(rq); + // waitParsedRequest(); + // + // assertEquals(Request.Status.PARSED, rq.getStatus()); + // assertTrue(rq.getOperation() instanceof WriteRange); + // assertEquals(OperationType.WRITE, rq.getOSDOperation().getOpType()); + // assertEquals(OperationSubType.RANGE, rq.getOSDOperation() + // .getOpSubType()); + // assertEquals(file, rq.getFileId()); + // assertEquals(loc, rq.getLocations()); + // assertEquals(cap.toString(), rq.getCapability().toString()); + // assertEquals(loc.getLocation(osdId), rq.getLocation()); + // assertEquals(loc.getLocation(osdId).getStripingPolicy(), rq + // .getPolicy()); + // assertEquals(objectNumber, rq.getObjectNo()); + // assertEquals(range[0], rq.getByteRange()[0]); + // assertEquals(range[1], rq.getByteRange()[1]); + // } + // + // // Wrong use cases + // List wrongOnes = new ArrayList(); + // wrongOnes.add(generateWriteRangeRequest(loc, cap, null, objectNumber, + // range, data)); + // wrongOnes.add(generateWriteRangeRequest(loc, null, file + "2", + // objectNumber, range, data)); + // wrongOnes.add(generateWriteRangeRequest(null, cap, file + "3", + // objectNumber, range, data)); + // wrongOnes.add(generateWriteRangeRequest(loc, cap, file + "4", null, + // range, data)); + // wrongOnes.add(generateWriteRangeRequest(loc, cap, file + "5", + // objectNumber, range, null)); + // wrongOnes.add(generateWriteRangeRequest(wrongLoc, cap, file + "6", + // objectNumber, range, data)); + // wrongOnes.add(generateWriteRangeRequest(loc, cap, file + "7", + // objectNumber + 1, range, data)); + // long[][] wrongRanges = { { 1, 0 }, { -1, 0 }, { 0, -1 }, { -1, -1 }, + // { 0, 1 + Integer.MAX_VALUE } }; + // for (long[] w : wrongRanges) { + // wrongOnes.add(generateWriteRangeRequest(loc, cap, file + "8", + // objectNumber, w, data)); + // } + // + // checkWrongCases(wrongOnes); + // } + // + // /** + // * It tests the parsing of DeleteWhole requests + // */ + // public void testDeleteWhole() throws Exception { + // + // // Right use case + // { + // Request rq = new Request(generateDeleteWholeRequest(loc + // .getSummarized(), cap, file)); + // + // stage.enqueueRequest(rq); + // waitParsedRequest(); + // + // assertEquals(Request.Status.X_LOC_CACHEMISS, rq.getStatus()); + // + // rq = new Request(generateDeleteWholeRequest(loc, cap, file)); + // + // stage.enqueueRequest(rq); + // waitParsedRequest(); + // + // assertEquals(Request.Status.PARSED, rq.getStatus()); + // assertTrue(rq.getOperation() instanceof DeleteWhole); + // assertEquals(OperationType.DELETE, rq.getOSDOperation().getOpType()); + // assertEquals(OperationSubType.WHOLE, rq.getOSDOperation() + // .getOpSubType()); + // assertEquals(file, rq.getFileId()); + // assertEquals(loc, rq.getLocations()); + // assertEquals(cap.toString(), rq.getCapability().toString()); + // assertEquals(loc.getLocation(osdId), rq.getLocation()); + // assertEquals(loc.getLocation(osdId).getStripingPolicy(), rq + // .getPolicy()); + // } + // + // // Wrong use cases + // List wrongOnes = new ArrayList(); + // wrongOnes.add(generateDeleteWholeRequest(loc, cap, null)); + // wrongOnes.add(generateDeleteWholeRequest(loc, null, file + "2")); + // wrongOnes.add(generateDeleteWholeRequest(null, cap, file + "3")); + // wrongOnes.add(generateDeleteWholeRequest(wrongLoc, cap, file + "4")); + // + // checkWrongCases(wrongOnes); + // } + // + // /** + // * It tests the parsing of RPCFileSize requests + // */ + // public void testFetchGlobalMax() throws Exception { + // + // // Right use case + // { + // Request rq = new Request(generateRPCGlobalMaxRequest(loc + // .getSummarized(), cap, file)); + // + // stage.enqueueRequest(rq); + // waitParsedRequest(); + // + // assertEquals(Request.Status.X_LOC_CACHEMISS, rq.getStatus()); + // + // rq = new Request(generateRPCGlobalMaxRequest(loc, cap, file)); + // + // stage.enqueueRequest(rq); + // waitParsedRequest(); + // + // assertEquals(Request.Status.PARSED, rq.getStatus()); + // assertTrue(rq.getOperation() instanceof RPCFileSize); + // assertEquals(OperationType.RPC, rq.getOSDOperation().getOpType()); + // assertEquals(OperationSubType.FETCH_GLOBAL_MAX, rq + // .getOSDOperation().getOpSubType()); + // assertEquals(cap.toString(), rq.getCapability().toString()); + // } + // + // // Wrong use cases + // List wrongOnes = new ArrayList(); + // wrongOnes.add(generateRPCGlobalMaxRequest(null, cap, file + "2")); + // wrongOnes.add(generateRPCGlobalMaxRequest(loc, null, file + "3")); + // wrongOnes.add(generateRPCGlobalMaxRequest(loc, cap, null)); + // wrongOnes.add(generateRPCGlobalMaxRequest(wrongLoc, cap, file + "4")); + // + // checkWrongCases(wrongOnes); + // } + // + // /** + // * It tests the parsing of RPCTruncate requests + // */ + // public void testTruncate() throws Exception { + // + // long size = 1; + // // Right use case + // { + // Request rq = new Request(generateRPCTruncateRequest(loc + // .getSummarized(), cap, file, size)); + // + // stage.enqueueRequest(rq); + // waitParsedRequest(); + // + // assertEquals(Request.Status.X_LOC_CACHEMISS, rq.getStatus()); + // + // rq = new Request(generateRPCTruncateRequest(loc, cap, file, size)); + // + // stage.enqueueRequest(rq); + // waitParsedRequest(); + // + // assertEquals(Request.Status.PARSED, rq.getStatus()); + // assertTrue(rq.getOperation() instanceof RPCTruncate); + // assertEquals(OperationType.RPC, rq.getOSDOperation().getOpType()); + // assertEquals(OperationSubType.TRUNCATE, rq.getOSDOperation() + // .getOpSubType()); + // assertEquals(file, rq.getFileId()); + // assertEquals(loc, rq.getLocations()); + // assertEquals(cap.toString(), rq.getCapability().toString()); + // assertEquals(loc.getLocation(osdId), rq.getLocation()); + // assertEquals(loc.getLocation(osdId).getStripingPolicy(), rq + // .getPolicy()); + // } + // + // // Wrong use cases + // List wrongOnes = new ArrayList(); + // wrongOnes.add(generateRPCTruncateRequest(null, cap, file + "2", size)); + // wrongOnes.add(generateRPCTruncateRequest(loc, null, file + "3", size)); + // wrongOnes.add(generateRPCTruncateRequest(loc, cap, null, size)); + // wrongOnes.add(generateRPCTruncateRequest(loc, cap, file + "4", null)); + // wrongOnes.add(generateRPCTruncateRequest(loc, cap, null, null)); + // wrongOnes.add(generateRPCTruncateRequest(loc, cap, file + "5", -size)); + // wrongOnes.add(generateRPCTruncateRequest(wrongLoc, cap, file + "6", + // size)); + // + // checkWrongCases(wrongOnes); + // } + // + // /** + // * It tests the parsing of RPCDeleteReplica requests + // */ + // public void testDeleteReplica() throws Exception { + // + // // Right use case + // { + // Request rq = new Request(generateRPCDeleteReplicaRequest(cap, file)); + // + // stage.enqueueRequest(rq); + // waitParsedRequest(); + // + // assertEquals(Request.Status.PARSED, rq.getStatus()); + // assertTrue(rq.getOperation() instanceof RPCDeleteReplica); + // assertEquals(OperationType.RPC, rq.getOSDOperation().getOpType()); + // assertEquals(OperationSubType.DELETE_REPLICA, rq.getOSDOperation() + // .getOpSubType()); + // assertEquals(file, rq.getFileId()); + // assertEquals(cap.toString(), rq.getCapability().toString()); + // } + // + // // Wrong use cases + // List wrongOnes = new ArrayList(); + // wrongOnes.add(generateRPCDeleteReplicaRequest(null, file + "2")); + // wrongOnes.add(generateRPCDeleteReplicaRequest(cap, null)); + // + // checkWrongCases(wrongOnes); + // } + // + // /** + // * It tests the parsing of RPCDeleteReplica requests + // */ + // public void testTruncateReplica() throws Exception { + // + // // Right use case + // long size = 1; + // { + // Request rq = new Request(generateRPCTruncateReplicaRequest(loc + // .getSummarized(), cap, file, size)); + // + // stage.enqueueRequest(rq); + // waitParsedRequest(); + // + // assertEquals(Request.Status.X_LOC_CACHEMISS, rq.getStatus()); + // + // rq = new Request(generateRPCTruncateReplicaRequest(loc, cap, file, + // size)); + // + // stage.enqueueRequest(rq); + // waitParsedRequest(); + // + // assertEquals(Request.Status.PARSED, rq.getStatus()); + // assertTrue(rq.getOperation() instanceof RPCTruncateReplica); + // assertEquals(OperationType.RPC, rq.getOSDOperation().getOpType()); + // assertEquals(OperationSubType.TRUNCATE_REPLICA, rq + // .getOSDOperation().getOpSubType()); + // assertEquals(file, rq.getFileId()); + // assertEquals(cap.toString(), rq.getCapability().toString()); + // assertEquals(loc, rq.getLocations()); + // assertEquals(loc.getLocation(osdId), rq.getLocation()); + // assertEquals(loc.getLocation(osdId).getStripingPolicy(), rq + // .getPolicy()); + // } + // + // // Wrong use cases + // List wrongOnes = new ArrayList(); + // wrongOnes.add(generateRPCTruncateReplicaRequest(null, cap, file + "2", + // size)); + // wrongOnes.add(generateRPCTruncateReplicaRequest(loc, null, file + "3", + // size)); + // wrongOnes.add(generateRPCTruncateReplicaRequest(loc, cap, null, size)); + // wrongOnes.add(generateRPCTruncateReplicaRequest(loc, cap, file + "4", + // null)); + // wrongOnes.add(generateRPCTruncateReplicaRequest(loc, cap, null, null)); + // wrongOnes.add(generateRPCTruncateReplicaRequest(loc, cap, file + "5", + // -size)); + // wrongOnes.add(generateRPCTruncateReplicaRequest(wrongLoc, cap, file + // + "6", size)); + // + // checkWrongCases(wrongOnes); + // } + // + + private PinkyRequest generateGetRequest(Locations loc, Capability cap, String file) + throws JSONException { + HTTPHeaders headers = new HTTPHeaders(); + if (cap != null) + headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString()); + if (loc != null) + headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString().asString()); + String uri = null; + if (file != null) + uri = file; + + return new PinkyRequest(HTTPUtils.GET_TOKEN, uri, null, headers); + } + + // + // private PinkyRequest generateReadRangeRequest(Locations loc, + // Capability cap, String file, long objectNumber, long[] range) + // throws JSONException { + // HTTPHeaders headers = new HTTPHeaders(); + // if (cap != null) + // headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString()); + // if (loc != null) + // headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString() + // .asString()); + // headers.addHeader(HTTPHeaders.HDR_XOBJECTNUMBER, Long + // .toString(objectNumber)); + // headers.addHeader(HTTPHeaders.HDR_CONTENT_RANGE, new String("bytes " + // + range[0] + "-" + range[1] + "/*")); + // String uri = null; + // if (file != null) + // uri = file; + // + // return new PinkyRequest(HTTPUtils.GET_TOKEN, uri, null, headers); + // } + // + // private PinkyRequest generateReadWholeRequest(Locations loc, + // Capability cap, String file, long objectNumber) throws JSONException { + // HTTPHeaders headers = new HTTPHeaders(); + // if (cap != null) + // headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString()); + // if (loc != null) + // headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString() + // .asString()); + // headers.addHeader(HTTPHeaders.HDR_XOBJECTNUMBER, Long + // .toString(objectNumber)); + // String uri = null; + // if (file != null) + // uri = file; + // + // return new PinkyRequest(HTTPUtils.GET_TOKEN, uri, null, headers); + // } + // + // private PinkyRequest generateWriteRangeRequest(Locations loc, + // Capability cap, String file, Long objectNumber, long[] range, + // byte[] data) throws JSONException { + // HTTPHeaders headers = new HTTPHeaders(); + // if (cap != null) + // headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString()); + // if (loc != null) + // headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString() + // .asString()); + // if (objectNumber != null) + // headers.addHeader(HTTPHeaders.HDR_XOBJECTNUMBER, objectNumber + // .toString()); + // headers.addHeader(HTTPHeaders.HDR_CONTENT_RANGE, new String("bytes " + // + range[0] + "-" + range[1] + "/*")); + // + // ReusableBuffer rb = null; + // if (data != null) { + // headers.addHeader(HTTPHeaders.HDR_CONTENT_LENGTH, Long + // .toString(data.length)); + // rb = ReusableBuffer.wrap(data); + // } + // + // String uri = null; + // if (file != null) + // uri = file; + // + // return new PinkyRequest(HTTPUtils.PUT_TOKEN, uri, null, headers, rb); + // } + // + // private PinkyRequest generateWriteWholeRequest(Locations loc, + // Capability cap, String file, Long objectNumber, byte[] data) + // throws JSONException { + // HTTPHeaders headers = new HTTPHeaders(); + // if (cap != null) + // headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString()); + // if (loc != null) + // headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString() + // .asString()); + // if (objectNumber != null) + // headers.addHeader(HTTPHeaders.HDR_XOBJECTNUMBER, objectNumber + // .toString()); + // + // ReusableBuffer rb = null; + // if (data != null) { + // headers.addHeader(HTTPHeaders.HDR_CONTENT_LENGTH, Long + // .toString(data.length)); + // rb = ReusableBuffer.wrap(data); + // } + // + // String uri = null; + // if (file != null) + // uri = file; + // + // return new PinkyRequest(HTTPUtils.PUT_TOKEN, uri, null, headers, rb); + // } + // + // private PinkyRequest generateDeleteWholeRequest(Locations loc, + // Capability cap, String file) throws JSONException { + // HTTPHeaders headers = new HTTPHeaders(); + // if (cap != null) + // headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString()); + // if (loc != null) + // headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString() + // .asString()); + // + // String uri = null; + // if (file != null) + // uri = file; + // + // return new PinkyRequest(HTTPUtils.DELETE_TOKEN, uri, null, headers); + // } + // + // private PinkyRequest generateRPCDeleteReplicaRequest(Capability cap, + // String file) { + // HTTPHeaders headers = new HTTPHeaders(); + // if (cap != null) + // headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString()); + // if (file != null) + // headers.addHeader(HTTPHeaders.HDR_XFILEID, file); + // + // return new PinkyRequest(HTTPUtils.POST_TOKEN, + // RPCTokens.deleteLocalTOKEN, null, headers); + // } + // + // private PinkyRequest generateRPCGlobalMaxRequest(Locations loc, + // Capability cap, String file) throws JSONException { + // HTTPHeaders headers = new HTTPHeaders(); + // if (cap != null) + // headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString()); + // if (loc != null) + // headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString() + // .asString()); + // if (file != null) + // headers.addHeader(HTTPHeaders.HDR_XFILEID, file); + // + // return new PinkyRequest(HTTPUtils.POST_TOKEN, + // RPCTokens.fetchGlobalMaxToken, null, headers, null); + // } + // + // private PinkyRequest generateRPCTruncateRequest(Locations loc, + // Capability cap, String file, Long finalSize) throws JSONException { + // HTTPHeaders headers = new HTTPHeaders(); + // if (cap != null) + // headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString()); + // if (loc != null) + // headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString() + // .asString()); + // + // byte[] data = null; + // if ((file != null) && (finalSize != null)) { + // data = JSONParser.toJSON(file, finalSize).getBytes( + // HTTPUtils.ENC_UTF8); + // } else if (file != null) { + // data = JSONParser.toJSON(file).getBytes(HTTPUtils.ENC_UTF8); + // } else if (finalSize != null) { + // data = JSONParser.toJSON(finalSize).getBytes(HTTPUtils.ENC_UTF8); + // } + // + // ReusableBuffer rb = null; + // if (data != null) { + // rb = ReusableBuffer.wrap(data); + // headers.addHeader(HTTPHeaders.HDR_CONTENT_LENGTH, Long + // .toString(data.length)); + // } + // + // return new PinkyRequest(HTTPUtils.POST_TOKEN, RPCTokens.truncateTOKEN, + // null, headers, rb); + // } + // + // private PinkyRequest generateRPCTruncateReplicaRequest(Locations loc, + // Capability cap, String file, Long finalSize) throws JSONException { + // HTTPHeaders headers = new HTTPHeaders(); + // if (cap != null) + // headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString()); + // if (loc != null) + // headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString() + // .asString()); + // + // byte[] data = null; + // if ((file != null) && (finalSize != null)) { + // data = JSONParser.toJSON(file, finalSize).getBytes( + // HTTPUtils.ENC_UTF8); + // } else if (file != null) { + // data = JSONParser.toJSON(file).getBytes(HTTPUtils.ENC_UTF8); + // } else if (finalSize != null) { + // data = JSONParser.toJSON(finalSize).getBytes(HTTPUtils.ENC_UTF8); + // } + // + // ReusableBuffer rb = null; + // if (data != null) { + // rb = ReusableBuffer.wrap(data); + // headers.addHeader(HTTPHeaders.HDR_CONTENT_LENGTH, Long + // .toString(data.length)); + // } + // + // return new PinkyRequest(HTTPUtils.POST_TOKEN, + // RPCTokens.truncateLocalTOKEN, null, headers, rb); + // } + // + + private void checkFailureCases(List cases) throws Exception { + + for (PinkyRequest pr : cases) { + + OSDRequest rq = new OSDRequest(0); + rq.setPinkyRequest(pr); + + finished = false; + stage.enqueueOperation(rq, ParserStage.STAGEOP_PARSE, null); + synchronized (this) { + while (!finished) + wait(); + } + + assertNotNull(rq.getError()); + } + } + + public static void main(String[] args) { + TestRunner.run(ParserStageTest.class); + } + + private class TestRequestDispatcher implements RequestDispatcher { + + private class TestOp extends Operation { + + public TestOp(RequestDispatcher master) { + super(master); + } + + @Override + public void startRequest(OSDRequest rq) { + master.requestFinished(rq); + } + + } + + private OSDConfig config; + + public TestRequestDispatcher(OSDConfig config) throws IOException { + this.config = config; + } + + public OSDConfig getConfig() { + return config; + } + + public Operation getOperation(RequestDispatcher.Operations opCode) { + return new TestOp(this); + } + + public Stage getStage(Stages stage) { + return null; + } + + public StageStatistics getStatistics() { + return new StageStatistics(); + } + + public boolean isHeadOSD(Location xloc) { + return false; + } + + public void requestFinished(OSDRequest rq) { + synchronized (ParserStageTest.this) { + finished = true; + ParserStageTest.this.notify(); + } + } + + @Override + public void sendSpeedyRequest(Request originalRequest, + SpeedyRequest speedyRq, InetSocketAddress server) + throws IOException { + // TODO Auto-generated method stub + + } + + public void sendUDP(ReusableBuffer data, InetSocketAddress receiver) { + } + + public void shutdown() { + } + + @Override + public DIRClient getDIRClient() { + // TODO Auto-generated method stub + return null; + } + } + +} diff --git a/servers/test/org/xtreemfs/test/osd/StorageLayoutTest.java b/servers/test/org/xtreemfs/test/osd/StorageLayoutTest.java new file mode 100644 index 0000000000000000000000000000000000000000..3bb565b3cc0a5c4b0601425a6cd9ab45a188fc0e --- /dev/null +++ b/servers/test/org/xtreemfs/test/osd/StorageLayoutTest.java @@ -0,0 +1,118 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin, + Barcelona Supercomputing Center - Centro Nacional de Supercomputacion and + Consiglio Nazionale delle Ricerche. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Jesús Malo (BSC), Björn Kolbeck (ZIB), + * Eugenio Cesario (CNR) + */ + +package org.xtreemfs.test.osd; + +import java.io.File; +import java.io.IOException; + +import junit.framework.TestCase; +import junit.textui.TestRunner; + +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.striping.RAID0; +import org.xtreemfs.common.striping.StripingPolicy; +import org.xtreemfs.common.util.FSUtils; +import org.xtreemfs.osd.OSDConfig; +import org.xtreemfs.osd.storage.HashStorageLayout; +import org.xtreemfs.osd.storage.MetadataCache; +import org.xtreemfs.osd.storage.SimpleStorageLayout; +import org.xtreemfs.test.SetupUtils; + +/** + * + * @author bjko + */ +public class StorageLayoutTest extends TestCase { + + final OSDConfig config; + + public StorageLayoutTest(String testName) throws IOException { + super(testName); + Logging.start(SetupUtils.DEBUG_LEVEL); + config = SetupUtils.createOSD1Config(); + } + + protected void setUp() throws Exception { + System.out.println("TEST: " + getClass().getSimpleName() + "." + getName()); + FSUtils.delTree(new File(config.getObjDir())); + } + + protected void tearDown() throws Exception { + } + + public void testLayout() throws Exception { + + final String fileId = "ABCDEFG:0001"; + + SimpleStorageLayout layout = new SimpleStorageLayout(config, new MetadataCache()); + HashStorageLayout layout2 = new HashStorageLayout(config, new MetadataCache()); + + StripingPolicy sp = new RAID0(64, 1); + + assertFalse(layout.fileExists(fileId)); + assertFalse(layout2.fileExists(fileId)); + + ReusableBuffer data = BufferPool.allocate(64); + for (int i = 0; i < 64; i++) { + data.put((byte) (48 + i)); + } + + layout.writeObject(fileId, 0l, data, 1, 0, null, sp, 0l); + layout2.writeObject(fileId, 0l, data, 1, 0, null, sp, 0l); + BufferPool.free(data); + + data = layout.readObject(fileId, 0l, 1, null, sp, 0l); + assertEquals(64, data.capacity()); + for (int i = 0; i < 64; i++) { + assertEquals((byte) (48 + i), data.get()); + } + BufferPool.free(data); + + data = layout2.readObject(fileId, 0l, 1, null, sp, 0l); + assertEquals(64, data.capacity()); + for (int i = 0; i < 64; i++) { + assertEquals((byte) (48 + i), data.get()); + } + BufferPool.free(data); + + data = layout.readObject(fileId, 1l, 1, null, sp, 0l); + assertEquals(0, data.capacity()); + BufferPool.free(data); + + data = layout2.readObject(fileId, 1l, 1, null, sp, 0l); + assertEquals(0, data.capacity()); + BufferPool.free(data); + } + + public static void main(String[] args) { + TestRunner.run(StorageLayoutTest.class); + } + +} diff --git a/servers/test/org/xtreemfs/test/osd/StorageStageTest.java b/servers/test/org/xtreemfs/test/osd/StorageStageTest.java new file mode 100644 index 0000000000000000000000000000000000000000..5ec3a1f3656a69d629fc52aac1a5105344ecd0cd --- /dev/null +++ b/servers/test/org/xtreemfs/test/osd/StorageStageTest.java @@ -0,0 +1,505 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin, + Barcelona Supercomputing Center - Centro Nacional de Supercomputacion and + Consiglio Nazionale delle Ricerche. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB), Jesús Malo (BSC), Björn Kolbeck (ZIB), + * Eugenio Cesario (CNR) + */ + +package org.xtreemfs.test.osd; + +import junit.framework.TestCase; + +// TODO +public class StorageStageTest extends TestCase { + + public void testDummy() { + + } + +// /* +// * needed for checking the results +// */ +// private class TestRequestController implements RequestHandler, UDPCom { +// +// private OSDRequest lastRequest = null; +// +// public TestRequestController(OSDId me) throws IOException { +// } +// +// public OSDId getMe() { +// return OSDID; +// } +// +// /** +// * blocks untill a Request is received +// * +// * @return last received Request +// */ +// public synchronized OSDRequest getLastRequest(long timeout) { +// if (lastRequest == null) { +// try { +// wait(timeout); +// } catch (InterruptedException ex) { +// ex.printStackTrace(); +// } +// } +// OSDRequest ret = lastRequest; +// lastRequest = null; +// return ret; +// } +// +// public synchronized void stageCallback(OSDRequest request) { +// lastRequest = request; +// BufferPool.free(request.getData()); +// notify(); +// } +// +// public void sendUDP(ReusableBuffer data, InetSocketAddress receiver) { +// throw new UnsupportedOperationException("Not supported yet."); +// } +// +// public void receiveUDP(ReusableBuffer data, InetSocketAddress sender) { +// throw new UnsupportedOperationException("Not supported yet."); +// } +// +// public void sendInternalEvent(OSDRequest event) { +// } +// } +// +// final OSDId OSDID = new OSDId("localhost", 32636, +// OSDId.SCHEME_HTTP); +// +// StorageStage stage; +// +// TestRequestController controller; +// +// File dbDir; +// +// OSDConfig config; +// +// StripingPolicy sp; +// +// Location loc; +// +// MultiSpeedy speedy; +// +// public StorageStageTest(String testName) throws IOException { +// super(testName); +// Logging.start(SetupUtils.DEBUG_LEVEL); +// +// config = SetupUtils.createOSD1ConfigForceWithoutSSL(); +// controller = new TestRequestController(OSDID); +// stage = null; +// +// dbDir = new File(config.getObjDir()); +// FSTools.delTree(dbDir); +// +// sp = new RAID0(1, 1); +// List osd = new ArrayList(); +// osd.add(OSDID); +// loc = new Location(sp, osd); +// } +// +// protected void setUp() throws Exception { +// System.out.println("TEST: " + getClass().getSimpleName() + "." +// + getName()); +// speedy = new MultiSpeedy(); +// stage = new StorageStage(controller, controller, config, speedy); +// stage.start(); +// } +// +// protected void tearDown() throws Exception { +// FSTools.delTree(dbDir); +// stage.shutdown(); +// speedy.shutdown(); +// stage.waitForShutdown(); +// speedy.waitForShutdown(); +// stage = null; +// } +// +// /* +// * Tests +// */ +// +// // TODO: write better assert +// public void testFileWrite() { +// for (int i = 0; i < 10; i++) { +// System.out.println("TEST: " + getClass().getSimpleName() + "." +// + getName() + ":" + i); +// +// OSDRequest rq = createWriteRequest(1000); +// stage.enqueueRequest(rq); +// OSDRequest rqR = controller.getLastRequest(OSDClient.TIMEOUT); +// +// assertEquals(OSDRequest.Status.PERSISTED, rqR.getStatus()); +// } +// } +// +// // TODO: write better assert +// public void testFileRead() { +// for (int i = 0; i < 10; i++) { +// System.out.println("TEST: " + getClass().getSimpleName() + "." +// + getName() + ":" + i); +// +// OSDRequest rq1 = createWriteRequest(10); +// stage.enqueueRequest(rq1); +// OSDRequest rq1R = controller.getLastRequest(OSDClient.TIMEOUT); +// +// OSDRequest rq2 = createReadRequest(rq1.getFileId(), rq1 +// .getObjectNo()); +// stage.enqueueRequest(rq2); +// OSDRequest rq2R = controller.getLastRequest(OSDClient.TIMEOUT); +// +// assertEquals(OSDRequest.Status.PERSISTED, rq2R.getStatus()); +// } +// } +// +// // TODO: write better assert +// public void testFileDelete() { +// for (int i = 0; i < 10; i++) { +// System.out.println("TEST: " + getClass().getSimpleName() + "." +// + getName() + ":" + i); +// +// OSDRequest rq1 = createWriteRequest(10); +// stage.enqueueRequest(rq1); +// OSDRequest rq1R = controller.getLastRequest(OSDClient.TIMEOUT); +// +// OSDRequest rq2 = createDeleteRequest(rq1.getFileId(), rq1 +// .getObjectNo()); +// stage.enqueueRequest(rq2); +// OSDRequest rq2R = controller.getLastRequest(OSDClient.TIMEOUT); +// +// assertEquals(OSDRequest.Status.PERSISTED, rq2R.getStatus()); +// } +// } +// +// /** +// * setup a WriteRequest +// */ +// private OSDRequest createWriteRequest(int dataLength) +// throws IllegalArgumentException { +// OSDRequest rq = new OSDRequest(new PinkyRequest()); +// // set the needed parameters +// OSDOperation op = new OSDOperation(OperationType.WRITE, +// OperationSubType.WHOLE); +// rq.setOSDOperation(op); +// String id = generateFileId(); +// rq.setFileId(id); +// rq.setObjectNo(1); +// rq.setPolicy(sp); +// rq.setLocation(loc); +// rq.setCapability(new Capability(id, "write", "IAmTheClient", 0)); +// +// byte[] bytes = generateRandomBytes(dataLength); +// ReusableBuffer buf = ReusableBuffer.wrap(bytes); +// rq.getRequest().requestBody = buf; +// rq.getRequest().requestBdyLength = buf.capacity(); +// return rq; +// } +// +// /** +// * setup a ReadRequest +// */ +// private OSDRequest createReadRequest(String fileId, long objNo) +// throws IllegalArgumentException { +// OSDRequest rq = new OSDRequest(new PinkyRequest()); +// // set the needed parameters +// OSDOperation op = new OSDOperation(OperationType.READ, +// OperationSubType.WHOLE); +// rq.setOSDOperation(op); +// +// rq.setFileId(fileId); +// rq.setObjectNo(objNo); +// rq.setPolicy(sp); +// rq.setLocation(loc); +// rq.setCapability(new Capability(fileId, "read", "IAmTheClient", 0)); +// +// return rq; +// } +// +// /** +// * setup a ReadRequest +// */ +// private OSDRequest createDeleteRequest(String fileId, long objNo) +// throws IllegalArgumentException { +// OSDRequest rq = new OSDRequest(new PinkyRequest()); +// +// // set the needed parameters +// OSDOperation op = new OSDOperation(OperationType.DELETE, +// OperationSubType.WHOLE); +// rq.setOSDOperation(op); +// +// rq.setLocation(loc); +// rq.setFileId(fileId); +// rq.setObjectNo(objNo); +// rq.setPolicy(sp); +// rq.setCapability(new Capability(fileId, "delete", "IAmTheClient", 0)); +// +// return rq; +// } +// +// /** +// * generates randomly filled byte-array +// * +// * @param length +// * length of the byte-array +// */ +// private byte[] generateRandomBytes(int length) { +// Random r = new Random(); +// byte[] bytes = new byte[length]; +// +// r.nextBytes(bytes); +// return bytes; +// } +// +// /** +// * generates randomly Filename +// */ +// private String generateFileId() throws IllegalArgumentException { +// Random r = new Random(); +// String id = new String(r.nextInt(10) + ":" + r.nextInt(1000000)); +// +// return id; +// } +// +// public static void main(String[] args) { +// TestRunner.run(StorageStageTest.class); +// } +//} +// +// +//class MultiThreadedStorageStageTest extends TestCase { +// +// /* +// * needed for checking the results +// */ +// private class TestRequestController implements RequestHandler, UDPCom { +// private OSDId me; +// private OSDRequest lastRequest = null; +// private int num_requests_processed; +// +// public TestRequestController(OSDId me) throws IOException { +// this.me = me; +// num_requests_processed = 0; +// } +// +// public OSDId getMe() { +// return OSDID; +// } +// +// /** +// * blocks until a Request is received +// * @return last received Request +// */ +// public synchronized OSDRequest getLastRequest(long timeout) { +// if(lastRequest == null) { +// try { +// wait(timeout); +// } +// catch (InterruptedException ex) { +// ex.printStackTrace(); +// } +// } +// OSDRequest ret = lastRequest; +// lastRequest = null; +// return ret; +// } +// +// public synchronized void stageCallback(OSDRequest request) { +// //lastRequest = request; +// //System.out.println("++++" + num_requests_processed + ": stage callback: request " + request.getFileId()); +// num_requests_processed++; +// BufferPool.free(request.getData()); +// //notify(); +// } +// +// public void sendUDP(ReusableBuffer data, InetSocketAddress receiver) { +// throw new UnsupportedOperationException("Not supported yet."); +// } +// +// public void receiveUDP(ReusableBuffer data, InetSocketAddress sender) { +// throw new UnsupportedOperationException("Not supported yet."); +// } +// +// public void sendInternalEvent(OSDRequest event) { +// } +// } +// +// final OSDId OSDID = new OSDId("localhost",32636, OSDId.SCHEME_HTTP); +// +// MultithreadedStorageStage stage; +// TestRequestController controller; +// File dbDir; +// OSDConfig config; +// +// StripingPolicy sp; +// Location loc; +// +// MultiSpeedy speedy; +// +// +// public MultiThreadedStorageStageTest(String testName) throws IOException { +// super(testName); +// Logging.start(SetupUtils.DEBUG_LEVEL); +// +// config = SetupUtils.createOSD1ConfigForceWithoutSSL(); +// controller = new TestRequestController(OSDID); +// stage = null; +// +// dbDir = new File(config.getObjDir()); +// FSTools.delTree(dbDir); +// +// sp = new RAID0(1,1); +// List osd = new ArrayList(); +// osd.add(OSDID); +// loc = new Location(sp,osd); +// } +// +// protected void setUp() throws Exception { +// System.out.println("TEST: " + getClass().getSimpleName() + "." + getName()); +// speedy = new MultiSpeedy(); +// stage = new MultithreadedStorageStage(controller, controller, config, speedy,10); +// stage.start(); +// } +// +// protected void tearDown() throws Exception { +// FSTools.delTree(dbDir); +// stage.shutdown(); +// speedy.shutdown(); +// stage.waitForShutdown(); +// speedy.waitForShutdown(); +// stage = null; +// } +// +// /* +// * Tests +// */ +// +// // TODO: write better assert +// public void testMultipleRequests() throws InterruptedException { +// int numRequests = 10; +// for(int i=0; i for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Jan Stender (ZIB) + */ + +package org.xtreemfs.test.osd; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.StringTokenizer; + +import junit.framework.TestCase; +import junit.textui.TestRunner; + +import org.xtreemfs.common.Capability; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.clients.RPCResponse; +import org.xtreemfs.common.clients.RPCResponseListener; +import org.xtreemfs.common.clients.dir.DIRClient; +import org.xtreemfs.common.clients.osd.OSDClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.striping.Location; +import org.xtreemfs.common.striping.Locations; +import org.xtreemfs.common.striping.RAID0; +import org.xtreemfs.common.striping.StripingPolicy; +import org.xtreemfs.common.util.FSUtils; +import org.xtreemfs.common.uuids.ServiceUUID; +import org.xtreemfs.dir.DIRConfig; +import org.xtreemfs.dir.RequestController; +import org.xtreemfs.foundation.pinky.HTTPHeaders; +import org.xtreemfs.foundation.pinky.HTTPUtils; +import org.xtreemfs.osd.OSD; +import org.xtreemfs.osd.OSDConfig; +import org.xtreemfs.test.SetupUtils; + +public class StripingTest extends TestCase { + + static class MRCDummy implements RPCResponseListener { + + private long issuedEpoch; + + private long epoch; + + private long fileSize; + + private String capSecret; + + public MRCDummy(String capSecret) { + this.capSecret = capSecret; + } + + Capability open(char mode) { + if (mode == 't') + issuedEpoch++; + + return new Capability(FILE_ID, "DebugCapability", issuedEpoch, capSecret); + } + + synchronized long getFileSize() { + return fileSize; + } + + public synchronized void responseAvailable(RPCResponse response) { + + try { + + String newFileSizeString = response.getSpeedyRequest().responseHeaders + .getHeader(HTTPHeaders.HDR_XNEWFILESIZE); + + if (newFileSizeString != null) { + + StringTokenizer st = new StringTokenizer(newFileSizeString, "[],"); + long newFileSize = Long.parseLong(st.nextToken()); + long epochNo = Long.parseLong(st.nextToken()); + + if (epochNo < epoch) + return; + + if (epochNo > epoch || newFileSize > fileSize) { + epoch = epochNo; + fileSize = newFileSize; + } + } + + } catch (Exception exc) { + exc.printStackTrace(); + System.exit(1); + } + } + + } + + private static final String FILE_ID = "1:1"; + + private static final int KB = 1; + + private static final int SIZE = KB * 1024; + + private static final byte[] ZEROS_HALF = new byte[SIZE / 2]; + + private static final byte[] ZEROS = new byte[SIZE]; + + private final Capability cap; + + private final DIRConfig dirConfig; + + private final OSDConfig osdCfg1; + + private final OSDConfig osdCfg2; + + private final OSDConfig osdCfg3; + + private final String capSecret; + + private List osdServer; + + private List osdIDs; + + private OSDClient client; + + private Locations loc; + + private StripingPolicy sp; + + private RequestController dir; + + private DIRClient dirClient; + + /** Creates a new instance of StripingTest */ + public StripingTest(String testName) throws IOException { + super(testName); + Logging.start(SetupUtils.DEBUG_LEVEL); + + osdCfg1 = SetupUtils.createOSD1Config(); + osdCfg2 = SetupUtils.createOSD2Config(); + osdCfg3 = SetupUtils.createOSD3Config(); + + capSecret = osdCfg1.getCapabilitySecret(); + cap = new Capability(FILE_ID, "DebugCapability", 0, capSecret); + + sp = new RAID0(KB, 3); + dirConfig = SetupUtils.createDIRConfig(); + } + + protected void setUp() throws Exception { + + System.out.println("TEST: " + getClass().getSimpleName() + "." + getName()); + + FSUtils.delTree(new File(SetupUtils.TEST_DIR)); + + dir = new RequestController(dirConfig); + dir.startup(); + + dirClient = SetupUtils.initTimeSync(); + + osdIDs = new ArrayList(3); + osdIDs.add(SetupUtils.getOSD1UUID()); + osdIDs.add(SetupUtils.getOSD2UUID()); + osdIDs.add(SetupUtils.getOSD3UUID()); + + osdServer = new ArrayList(3); + osdServer.add(new OSD(osdCfg1)); + osdServer.add(new OSD(osdCfg2)); + osdServer.add(new OSD(osdCfg3)); + + client = SetupUtils.createOSDClient(10000); + + List locations = new ArrayList(1); + + List osd = new ArrayList(3); + for (ServiceUUID oid : osdIDs) + osd.add(oid); + locations.add(new Location(sp, osd)); + loc = new Locations(locations); + + SetupUtils.setupLocalResolver(); + } + + protected void tearDown() throws Exception { + + client.shutdown(); + client.waitForShutdown(); + + if (dirClient != null) { + dirClient.shutdown(); + dirClient.waitForShutdown(); + } + + osdServer.get(0).shutdown(); + osdServer.get(1).shutdown(); + osdServer.get(2).shutdown(); + dir.shutdown(); + } + + /* TODO: test delete/truncate epochs! */ + + /** + * tests reading and writing of striped files + */ + public void testPUTandGET() throws Exception { + + final int numObjs = 5; + final int[] testSizes = { 1, 2, SIZE - 1, SIZE }; + + for (int ts : testSizes) { + + byte[] data = generateData(ts); + String file = "1:1" + ts; + + for (int i = 0, osdIndex = 0; i < numObjs; i++, osdIndex = i % osdIDs.size()) { + + // write an object with the given test size + RPCResponse resp = client.put(osdIDs.get(osdIndex).getAddress(), loc, cap, file, i, + ReusableBuffer.wrap(data)); + resp.waitForResponse(); + + String fileSizeHeader = resp.getHeaders().getHeader(HTTPHeaders.HDR_XNEWFILESIZE); + String fileSizeString = fileSizeHeader.substring(1, fileSizeHeader.indexOf(',')); + assertEquals(i * SIZE + ts, Integer.parseInt(fileSizeString)); + resp.freeBuffers(); + + // read and check the previously written object + resp = client.get(osdIDs.get(osdIndex).getAddress(), loc, cap, file, i); + assertEquals(HTTPUtils.SC_OKAY, resp.getStatusCode()); + checkResponse(data, resp); + resp.freeBuffers(); + } + } + } + + public void testIntermediateHoles() throws Exception { + + final byte[] data = generateData(3); + + // write the nineth object, check the file size + int obj = 8; + RPCResponse response = client.put(osdIDs.get(obj % osdIDs.size()).getAddress(), loc, cap, + FILE_ID, obj, ReusableBuffer.wrap(data)); + assertEquals("[" + (obj * SIZE + data.length) + ",0]", response.getHeaders().getHeader( + HTTPHeaders.HDR_XNEWFILESIZE)); + response.freeBuffers(); + + // write the fifth object, check the file size + obj = 5; + response = client.put(osdIDs.get(obj % osdIDs.size()).getAddress(), loc, cap, FILE_ID, obj, + ReusableBuffer.wrap(data)); + + // file size header may be either null or 4 * size + data.length, + // depending on whether the globalmax message was received already + String xNewFileSize = response.getHeaders().getHeader(HTTPHeaders.HDR_XNEWFILESIZE); + assertTrue(xNewFileSize == null || xNewFileSize.equals((obj * SIZE + data.length) + "")); + response.freeBuffers(); + + // check whether the first object consists of zeros + obj = 0; + response = client.get(osdIDs.get(obj % osdIDs.size()).getAddress(), loc, cap, FILE_ID, obj); + checkResponse(ZEROS, response); + response.freeBuffers(); + + // write the first object, check the file size header (must be null) + response = client.put(osdIDs.get(obj % osdIDs.size()).getAddress(), loc, cap, FILE_ID, obj, + ReusableBuffer.wrap(data)); + assertNull(response.getHeaders().getHeader(HTTPHeaders.HDR_XNEWFILESIZE)); + response.freeBuffers(); + } + + public void testWriteExtend() throws Exception { + + final byte[] data = generateData(3); + final byte[] paddedData = new byte[SIZE]; + System.arraycopy(data, 0, paddedData, 0, data.length); + + // write first object + RPCResponse response = client.put(osdIDs.get(0).getAddress(), loc, cap, FILE_ID, 0, + ReusableBuffer.wrap(data)); + response.waitForResponse(); + response.freeBuffers(); + + // write second object + response = client.put(osdIDs.get(1).getAddress(), loc, cap, FILE_ID, 1, ReusableBuffer + .wrap(data)); + response.waitForResponse(); + response.freeBuffers(); + + // read first object + response = client.get(osdIDs.get(0).getAddress(), loc, cap, FILE_ID, 0); + response.waitForResponse(); + response.freeBuffers(); + + // check whether the first object consists of zeros, except for the + // first character + response = client.get(osdIDs.get(0).getAddress(), loc, cap, FILE_ID, 0); + checkResponse(paddedData, response); + response.freeBuffers(); + } + + /** + * tests the truncation of striped files + */ + public void testTruncate() throws Exception { + + byte[] data = generateData(SIZE); + + // ------------------------------- + // create a file with five objects + // ------------------------------- + for (int i = 0, osdIndex = 0; i < 5; i++, osdIndex = i % osdIDs.size()) { + + RPCResponse tmp = client.put(osdIDs.get(osdIndex).getAddress(), loc, cap, FILE_ID, i, + ReusableBuffer.wrap(data)); + tmp.waitForResponse(); + tmp.freeBuffers(); + } + + // ---------------------------------------------- + // shrink the file to a length of one full object + // ---------------------------------------------- + + Capability truncateCap1 = new Capability(FILE_ID, "DebugCapability", 1, capSecret); + + RPCResponse resp = client.truncate(osdIDs.get(0).getAddress(), loc, truncateCap1, FILE_ID, + SIZE); + resp.waitForResponse(); + resp.freeBuffers(); + + // check whether all objects have the expected content + for (int i = 0, osdIndex = 0; i < 5; i++, osdIndex = i % osdIDs.size()) { + + // try to read the object + resp = client.get(osdIDs.get(osdIndex).getAddress(), loc, cap, FILE_ID, i); + + // the first object must exist, all other ones must have been + // deleted + if (i == 0) + checkResponse(data, resp); + else + checkResponse(null, resp); + + resp.freeBuffers(); + } + + // ------------------------------------------------- + // extend the file to a length of eight full objects + // ------------------------------------------------- + Capability truncateCap2 = new Capability(FILE_ID, "DebugCapability", 2, capSecret); + + resp = client.truncate(osdIDs.get(0).getAddress(), loc, truncateCap2, FILE_ID, SIZE * 8); + resp.waitForResponse(); + resp.freeBuffers(); + + // check whether all objects have the expected content + for (int i = 0, osdIndex = 0; i < 8; i++, osdIndex = i % osdIDs.size()) { + + // try to read the object + resp = client.get(osdIDs.get(osdIndex).getAddress(), loc, cap, FILE_ID, i); + + // the first object must contain data, all other ones must contain + // zeros + if (i == 0) + checkResponse(data, resp); + else + checkResponse(ZEROS, resp); + + resp.freeBuffers(); + } + + // ------------------------------------------ + // shrink the file to a length of 3.5 objects + // ------------------------------------------ + Capability truncateCap3 = new Capability(FILE_ID, "DebugCapability", 3, capSecret); + + resp = client.truncate(osdIDs.get(0).getAddress(), loc, truncateCap3, FILE_ID, + (long) (SIZE * 3.5f)); + resp.waitForResponse(); + resp.freeBuffers(); + + // check whether all objects have the expected content + for (int i = 0, osdIndex = 0; i < 5; i++, osdIndex = i % osdIDs.size()) { + + // try to read the object + resp = client.get(osdIDs.get(osdIndex).getAddress(), loc, cap, FILE_ID, i); + + // the first object must contain data, all other ones must contain + // zeros, where the last one must only be half an object size + if (i == 0) + checkResponse(data, resp); + else if (i == 3) + checkResponse(ZEROS_HALF, resp); + else if (i >= 4) + checkResponse(null, resp); + else + checkResponse(ZEROS, resp); + + resp.freeBuffers(); + } + + // -------------------------------------------------- + // truncate the file to the same length it had before + // -------------------------------------------------- + Capability truncateCap4 = new Capability(FILE_ID, "DebugCapability", 4, capSecret); + + resp = client.truncate(osdIDs.get(0).getAddress(), loc, truncateCap4, FILE_ID, + (long) (SIZE * 3.5f)); + resp.waitForResponse(); + resp.freeBuffers(); + + // check whether all objects have the expected content + for (int i = 0, osdIndex = 0; i < 5; i++, osdIndex = i % osdIDs.size()) { + + // try to read the object + resp = client.get(osdIDs.get(osdIndex).getAddress(), loc, cap, FILE_ID, i); + + // the first object must contain data, all other ones must contain + // zeros, where the last one must only be half an object size + if (i == 0) + checkResponse(data, resp); + else if (i == 3) + checkResponse(ZEROS_HALF, resp); + else if (i >= 4) + checkResponse(null, resp); + else + checkResponse(ZEROS, resp); + + resp.freeBuffers(); + } + + // -------------------------------- + // truncate the file to zero length + // -------------------------------- + Capability truncateCap5 = new Capability(FILE_ID, "DebugCapability", 5, capSecret); + + resp = client.truncate(osdIDs.get(0).getAddress(), loc, truncateCap5, FILE_ID, 0); + resp.waitForResponse(); + resp.freeBuffers(); + + // check whether all objects have the expected content + for (int i = 0, osdIndex = 0; i < 5; i++, osdIndex = i % osdIDs.size()) { + + // try to read the object + resp = client.get(osdIDs.get(osdIndex).getAddress(), loc, cap, FILE_ID, i); + + // none of the objects must contain data + checkResponse(null, resp); + resp.freeBuffers(); + } + + data = generateData(5); + + // ---------------------------------- + // write new data to the first object + // ---------------------------------- + resp = client.put(osdIDs.get(0).getAddress(), loc, truncateCap5, FILE_ID, 0, ReusableBuffer + .wrap(data)); + resp.waitForResponse(); + resp.freeBuffers(); + + // ---------------------------------------------- + // extend the file to a length of one full object + // ---------------------------------------------- + Capability truncateCap6 = new Capability(FILE_ID, "DebugCapability", 6, capSecret); + + resp = client.truncate(osdIDs.get(0).getAddress(), loc, truncateCap6, FILE_ID, SIZE); + resp.waitForResponse(); + resp.freeBuffers(); + + // try to read the object + resp = client.get(osdIDs.get(0).getAddress(), loc, cap, FILE_ID, 0); + + // the object must contain data plus padding zeros + + final byte[] dataWithZeros = new byte[SIZE]; + System.arraycopy(data, 0, dataWithZeros, 0, data.length); + + checkResponse(dataWithZeros, resp); + resp.freeBuffers(); + + // --------------------------------------------- + // shrink the file to a length of half an object + // --------------------------------------------- + Capability truncateCap7 = new Capability(FILE_ID, "DebugCapability", 7, capSecret); + + resp = client.truncate(osdIDs.get(0).getAddress(), loc, truncateCap7, FILE_ID, SIZE / 2); + resp.waitForResponse(); + resp.freeBuffers(); + + // try to read the object + resp = client.get(osdIDs.get(0).getAddress(), loc, cap, FILE_ID, 0); + + // the object must contain data plus padding zeros + + final byte[] dataWithHalfZeros = new byte[SIZE / 2]; + System.arraycopy(data, 0, dataWithHalfZeros, 0, data.length); + + checkResponse(dataWithHalfZeros, resp); + resp.freeBuffers(); + } + + public void testInterleavedWriteAndTruncate() throws Exception { + + final int numIterations = 20; + final int maxObject = 20; + final int maxSize = maxObject * SIZE; + final int numWrittenObjs = 5; + + final MRCDummy mrcDummy = new MRCDummy(capSecret); + final List responses = new LinkedList(); + + for (int l = 0; l < numIterations; l++) { + + Capability cap = mrcDummy.open('w'); + + // randomly write 'numWrittenObjs' objects + for (int i = 0; i < numWrittenObjs; i++) { + + int objId = (int) (Math.random() * maxObject); + int osdIndex = objId % osdIDs.size(); + + // write an object with a random amount of bytes + int size = (int) ((SIZE - 1) * Math.random()) + 1; + RPCResponse resp = client.put(osdIDs.get(osdIndex).getAddress(), loc, cap, FILE_ID, + objId, ReusableBuffer.wrap(generateData(size))); + responses.add(resp); + + // update the file size when the response is received + resp.setResponseListener(mrcDummy); + } + + cap = mrcDummy.open('t'); + + // truncate the file + long newSize = (long) (Math.random() * maxSize); + RPCResponse resp = client.truncate(osdIDs.get(0).getAddress(), loc, cap, FILE_ID, + newSize); + resp.setResponseListener(mrcDummy); + resp.waitForResponse(); + resp.freeBuffers(); + + // wait until all write requests have been completed, i.e. all file + // size updates have been performed + for (RPCResponse r : responses) { + r.waitForResponse(); + r.freeBuffers(); + } + responses.clear(); + + long fileSize = mrcDummy.getFileSize(); + + // read the previously truncated objects, check size + for (int i = 0; i < maxObject; i++) { + resp = client.get(osdIDs.get(i % osdIDs.size()).getAddress(), loc, cap, FILE_ID, i); + resp.waitForResponse(); + + // check inner objects - should be full + if (i < fileSize / SIZE) + assertEquals(SIZE + "", resp.getHeaders().getHeader( + HTTPHeaders.HDR_CONTENT_LENGTH)); + + // check last object - should either be an EOF (null) or partial + // object + else if (i == fileSize / SIZE) { + if (fileSize % SIZE == 0) + assertEquals(null, resp.getBody()); + else + assertEquals((fileSize % SIZE) + "", resp.getHeaders().getHeader( + HTTPHeaders.HDR_CONTENT_LENGTH)); + } + + // check outer objects - should be EOF (null) + else + assertEquals(null, resp.getBody()); + + resp.freeBuffers(); + } + + } + + } + + /** + * tests the deletion of striped files + */ + public void testDELETE() throws Exception { + + final int numObjs = 5; + + byte[] data = generateData(SIZE); + + // create all objects + for (int i = 0, osdIndex = 0; i < numObjs; i++, osdIndex = i % osdIDs.size()) { + + RPCResponse tmp = client.put(osdIDs.get(osdIndex).getAddress(), loc, cap, FILE_ID, i, + ReusableBuffer.wrap(data)); + tmp.waitForResponse(); + tmp.freeBuffers(); + } + + Capability deleteCap = new Capability(FILE_ID, "DebugCapability", 1, capSecret); + + // delete the file + RPCResponse resp = client.delete(osdIDs.get(0).getAddress(), loc, deleteCap, FILE_ID); + resp.waitForResponse(); + resp.freeBuffers(); + } + + public static void main(String[] args) { + TestRunner.run(StripingTest.class); + } + + /** + * convenience method for generation of data + */ + private byte[] generateData(int numBytes) { + byte[] returnValue = new byte[numBytes]; + + for (int i = 0; i < returnValue.length; i++) + returnValue[i] = (byte) ('0' + i % 2); + + return returnValue; + } + + /** + * Checks whether the data array received with the response is equal to the + * given one. + * + * @param data + * the data array + * @param response + * the response + * @throws Exception + */ + private void checkResponse(byte[] data, RPCResponse response) throws Exception { + + if (data == null) { + if (response.getBody() != null) + System.out.println("body (" + response.getBody().capacity() + "): " + + new String(response.getBody().array())); + assertNull(response.getBody()); + } + + else { + byte[] responseData = response.getBody().getData(); + assertEquals(data.length, responseData.length); + for (int i = 0; i < data.length; i++) + assertEquals(data[i], responseData[i]); + } + } + +} diff --git a/servers/test/org/xtreemfs/test/osd/replication/ReplicationStageTest.java b/servers/test/org/xtreemfs/test/osd/replication/ReplicationStageTest.java new file mode 100644 index 0000000000000000000000000000000000000000..5f3b59a57aefd8a1d88e4c5aff9dfdc30addf4d5 --- /dev/null +++ b/servers/test/org/xtreemfs/test/osd/replication/ReplicationStageTest.java @@ -0,0 +1,258 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . +*/ +/* + * AUTHORS: Christian Lorenz (ZIB) + */ +package org.xtreemfs.test.osd.replication; + + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; + +import junit.framework.TestCase; + +import org.junit.After; +import org.junit.Before; +import org.xtreemfs.common.Capability; +import org.xtreemfs.common.Request; +import org.xtreemfs.common.buffer.ReusableBuffer; +import org.xtreemfs.common.clients.dir.DIRClient; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.striping.Location; +import org.xtreemfs.common.striping.Locations; +import org.xtreemfs.common.striping.RAID0; +import org.xtreemfs.common.striping.StripingPolicy; +import org.xtreemfs.common.uuids.ServiceUUID; +import org.xtreemfs.foundation.json.JSONException; +import org.xtreemfs.foundation.pinky.HTTPHeaders; +import org.xtreemfs.foundation.pinky.HTTPUtils; +import org.xtreemfs.foundation.pinky.PinkyRequest; +import org.xtreemfs.foundation.pinky.SSLOptions; +import org.xtreemfs.foundation.speedy.MultiSpeedy; +import org.xtreemfs.foundation.speedy.SpeedyRequest; +import org.xtreemfs.foundation.speedy.SpeedyResponseListener; +import org.xtreemfs.osd.OSDConfig; +import org.xtreemfs.osd.OSDRequest; +import org.xtreemfs.osd.RequestDispatcher; +import org.xtreemfs.osd.RequestDispatcher.Operations; +import org.xtreemfs.osd.RequestDispatcher.Stages; +import org.xtreemfs.osd.ops.Operation; +import org.xtreemfs.osd.ops.ReadOperation; +import org.xtreemfs.osd.ops.FetchAndWriteReplica; +import org.xtreemfs.osd.stages.AuthenticationStage; +import org.xtreemfs.osd.stages.ParserStage; +import org.xtreemfs.osd.stages.ReplicationStage; +import org.xtreemfs.osd.stages.Stage; +import org.xtreemfs.osd.stages.StageCallbackInterface; +import org.xtreemfs.osd.stages.StageStatistics; +import org.xtreemfs.osd.stages.StorageStage; +import org.xtreemfs.osd.stages.Stage.StageResponseCode; +import org.xtreemfs.osd.storage.MetadataCache; +import org.xtreemfs.osd.storage.StorageLayout; +import org.xtreemfs.osd.storage.Striping; +import org.xtreemfs.osd.storage.Striping.RPCMessage; +import org.xtreemfs.test.SetupUtils; +import org.xtreemfs.test.osd.ParserStageTest; + +/** + * + * 15.09.2008 + * @author clorenz + */ +public class ReplicationStageTest extends TestCase { + TestRequestDispatcher dispatcher; + private Capability capability; + private String file; + private List locationList; + + public ReplicationStageTest() { + super(); + // TODO Auto-generated constructor stub + } + + /** + * @throws java.lang.Exception + */ + @Before + public void setUp() throws Exception { + System.out.println("TEST: " + getClass().getSimpleName() + "." + + getName()); + Logging.start(SetupUtils.DEBUG_LEVEL); + + this.dispatcher = new TestRequestDispatcher(); + + file = "1:1"; + capability = new Capability(file, "read", 0, "IAmTheClient"); + locationList = new ArrayList(); + } + + /** + * @throws java.lang.Exception + */ + @After + public void tearDown() throws Exception { + dispatcher.shutdown(); + } + + public void testControlFlow() throws JSONException { + OSDRequest request = new OSDRequest(0); + + int stripeSize = 128; + + // add available osds + List osds = new ArrayList(); + osds.add(new ServiceUUID("UUID:localhost:33637")); + osds.add(new ServiceUUID("UUID:localhost:33638")); + osds.add(new ServiceUUID("UUID:localhost:33639")); + + List osds2 = new ArrayList(); + osds2.add(new ServiceUUID("UUID:localhost:33640")); + osds2.add(new ServiceUUID("UUID:localhost:33641")); + osds2.add(new ServiceUUID("UUID:localhost:33642")); + + locationList.add(new Location(new RAID0(stripeSize, osds.size()), osds)); + locationList.add(new Location(new RAID0(stripeSize, osds2.size()), osds2)); + + Locations locations = new Locations(locationList); + request.setPinkyRequest(generateGetRequest(locations, capability, file)); + + // start + request.setOperation(dispatcher.getOperation(Operations.READ)); + request.getOperation().startRequest(request); + + try { + Thread.sleep(10000); + } catch (InterruptedException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + } + + private PinkyRequest generateGetRequest(Locations loc, Capability cap, + String file) throws JSONException { + HTTPHeaders headers = new HTTPHeaders(); + if (cap != null) + headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString()); + if (loc != null) + headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString() + .asString()); + String uri = null; + if (file != null) + uri = file; + + return new PinkyRequest(HTTPUtils.GET_TOKEN, uri, null, headers); + } + + private class TestRequestDispatcher implements RequestDispatcher { + MultiSpeedy speedy = new TestMultiSpeedy(); + ReplicationStage stage; + DummyStage dummyStage; + + public TestRequestDispatcher() throws IOException { + stage = new ReplicationStage(this); + dummyStage = new DummyStage(); + stage.start(); + dummyStage.start(); + } + @Override + public Operation getOperation(RequestDispatcher.Operations opCode) { + switch(opCode){ + case READ: return new ReadOperation(this); + case FETCH_AND_WRITE_REPLICA: return new FetchAndWriteReplica(this); + } + return null; + } + @Override + public Stage getStage(Stages stage) { + switch(stage){ + case REPLICATION: return this.stage; + default: return this.dummyStage; + } + } + @Override + public StageStatistics getStatistics() { + return new StageStatistics(); + } + @Override + public boolean isHeadOSD(Location xloc) { + return false; + } + @Override + public void requestFinished(OSDRequest rq) { + // TODO + } + @Override + public void sendSpeedyRequest(Request originalRequest, + SpeedyRequest speedyRq, InetSocketAddress server) + throws IOException { + speedyRq.setOriginalRequest(originalRequest); + this.speedy.sendRequest(speedyRq, server); + } + @Override + public void sendUDP(ReusableBuffer data, InetSocketAddress receiver) { + } + @Override + public void shutdown() { + } + @Override + public OSDConfig getConfig() { + // TODO Auto-generated method stub + return null; + } + @Override + public DIRClient getDIRClient() { + // TODO Auto-generated method stub + return null; + } + } + + private class TestMultiSpeedy extends MultiSpeedy { + SpeedyResponseListener listener; + + public TestMultiSpeedy() throws IOException { + super(); + // TODO Auto-generated constructor stub + } + @Override + public void registerListener(SpeedyResponseListener rl, + InetSocketAddress server) { + // TODO Auto-generated method stub + this.listener = rl; + } + @Override + public void sendRequest(SpeedyRequest rq, InetSocketAddress server) + throws IOException, IllegalStateException { + // TODO Auto-generated method stub + rq.listener.receiveRequest(rq); + } + } + + private class DummyStage extends Stage { + public DummyStage() { super("DummyStage"); } + @Override + protected void processMethod(StageMethod method) { + method.getCallback().methodExecutionCompleted(method.getRq(), StageResponseCode.OK); + } + } +} diff --git a/servers/test/org/xtreemfs/test/scrubber/AsyncScrubberTest.java b/servers/test/org/xtreemfs/test/scrubber/AsyncScrubberTest.java new file mode 100644 index 0000000000000000000000000000000000000000..f863a03ae691674f1ce1cbb1671ce5d12c70b498 --- /dev/null +++ b/servers/test/org/xtreemfs/test/scrubber/AsyncScrubberTest.java @@ -0,0 +1,202 @@ +package org.xtreemfs.test.scrubber; + +import java.io.File; +import java.net.InetSocketAddress; + +import junit.framework.TestCase; +import junit.textui.TestRunner; + +import org.xtreemfs.common.auth.NullAuthProvider; +import org.xtreemfs.common.buffer.BufferPool; +import org.xtreemfs.common.clients.io.RandomAccessFile; +import org.xtreemfs.common.clients.mrc.MRCClient; +import org.xtreemfs.common.clients.scrubber.AsyncScrubber; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.common.util.FSUtils; +import org.xtreemfs.dir.DIRConfig; +import org.xtreemfs.foundation.speedy.MultiSpeedy; +import org.xtreemfs.mrc.MRCConfig; +import org.xtreemfs.mrc.RequestController; +import org.xtreemfs.osd.OSD; +import org.xtreemfs.osd.OSDConfig; +import org.xtreemfs.test.SetupUtils; + +public class AsyncScrubberTest extends TestCase { + + private RequestController mrc1; + + private org.xtreemfs.dir.RequestController dirService; + + private MRCConfig mrcCfg1; + + private OSDConfig osdConfig1, osdConfig2; + + private DIRConfig dsCfg; + + private OSD osd1, osd2; + + private InetSocketAddress mrc1Address; + + private InetSocketAddress dirAddress; + + private MRCClient client; + + private String authString; + + private long accessMode; + + private String volumeName; + + private AsyncScrubber scrubber; + + public AsyncScrubberTest() { + Logging.start(Logging.LEVEL_WARN); + } + + public void setUp() throws Exception { + + System.out.println("TEST: " + getClass().getSimpleName() + "." + getName()); + + authString = NullAuthProvider.createAuthString(System.getProperty("user.name"), MRCClient + .generateStringList(System.getProperty("user.name"))); + + accessMode = 511; // rwxrwxrwx + + dsCfg = SetupUtils.createDIRConfig(); + dirAddress = SetupUtils.getDIRAddr(); + + mrcCfg1 = SetupUtils.createMRC1Config(); + mrc1Address = SetupUtils.getMRC1Addr(); + + osdConfig1 = SetupUtils.createOSD1Config(); + osdConfig2 = SetupUtils.createOSD2Config(); + // cleanup + File testDir = new File(SetupUtils.TEST_DIR); + + FSUtils.delTree(testDir); + testDir.mkdirs(); + + // start the Directory Service + dirService = new org.xtreemfs.dir.RequestController(dsCfg); + dirService.startup(); + + // start the OSD + osd1 = new OSD(osdConfig1); + osd2 = new OSD(osdConfig2); + // start MRC + mrc1 = new RequestController(mrcCfg1); + mrc1.startup(); + + client = SetupUtils.createMRCClient(10000000); + + volumeName = "testVolume"; + + // create a volume (no access control) + client.createVolume(mrc1Address, volumeName, authString); + + // create some files and directories + client.createDir(mrc1Address, volumeName + "/myDir", authString); + client.createDir(mrc1Address, volumeName + "/anotherDir", authString); + client.createDir(mrc1Address, volumeName + "/yetAnotherDir", authString); + + for (int i = 0; i < 2; i++) + client.createFile(mrc1Address, volumeName + "/myDir/test" + i + ".txt", null, null, + accessMode, authString); + + client.createFile(mrc1Address, volumeName + "/test10.txt", null, null, accessMode, + authString); + + client.createFile(mrc1Address, volumeName + "/anotherDir/test11.txt", null, null, + accessMode, authString); + RandomAccessFile randomAccessFile1 = new RandomAccessFile("r", mrc1Address, volumeName + + "/anotherDir/test11.txt", client.getSpeedy(), authString); + RandomAccessFile randomAccessFile2 = new RandomAccessFile("r", mrc1Address, volumeName + + "/test10.txt", client.getSpeedy(), authString); + + String content = ""; + for (int i = 0; i < 6000; i++) + content = content.concat("Hello World "); + byte[] bytesIn = content.getBytes(); + assertEquals(bytesIn.length, 72000); + + int length = bytesIn.length; + + randomAccessFile1.write(bytesIn, 0, length); + + randomAccessFile2.write(bytesIn, 0, 65536); + } + + public void tearDown() throws Exception { + mrc1.shutdown(); + client.shutdown(); + osd1.shutdown(); + osd2.shutdown(); + scrubber.shutdown(); + dirService.shutdown(); + scrubber.waitForShutdown(); + client.waitForShutdown(); + + Logging.logMessage(Logging.LEVEL_DEBUG, this, BufferPool.getStatus()); + } + + public void testAsyncScrubber() throws Exception { + + scrubber = new AsyncScrubber(client.getSpeedy(), dirAddress, mrc1Address, volumeName, true, 2, 2, null); + scrubber.updateFileSize(volumeName + "/myDir/test0.txt", 10); + scrubber.start(); + + // file size corrected from 10 to 0 + assertEquals("0", client.stat(mrc1Address, volumeName + "/myDir/test0.txt", false, true, + false, authString).get("size").toString()); + assertNotNull(client.getXAttr(mrc1Address, volumeName + "/myDir/test0.txt", + AsyncScrubber.latestScrubAttr, authString)); + // file size same as before + assertEquals("0", client.stat(mrc1Address, volumeName + "/myDir/test1.txt", false, true, + false, authString).get("size").toString()); + assertNotNull(client.getXAttr(mrc1Address, volumeName + "/myDir/test0.txt", + AsyncScrubber.latestScrubAttr, authString)); + // file size corrected from 0 to 72000 (this file is stored in two + // objects) + assertEquals("72000", client.stat(mrc1Address, volumeName + "/anotherDir/test11.txt", + false, true, false, authString).get("size").toString()); + assertNotNull(client.getXAttr(mrc1Address, volumeName + "/myDir/test0.txt", + AsyncScrubber.latestScrubAttr, authString)); + // file size corrected from 0 to 65536, which is the stripe size. + assertEquals("65536", client.stat(mrc1Address, volumeName + "/test10.txt", false, true, + false, authString).get("size").toString()); + assertNotNull(client.getXAttr(mrc1Address, volumeName + "/myDir/test0.txt", + AsyncScrubber.latestScrubAttr, authString)); + + long testVolume = Long.valueOf(client.getXAttr(mrc1Address, volumeName, + AsyncScrubber.latestScrubAttr, authString).toString()); + long test0 = Long.valueOf(client.getXAttr(mrc1Address, volumeName + "/myDir/test0.txt", + AsyncScrubber.latestScrubAttr, authString).toString()); + long test1 = Long.valueOf(client.getXAttr(mrc1Address, volumeName + "/myDir/test1.txt", + AsyncScrubber.latestScrubAttr, authString).toString()); + long myDir = Long.valueOf(client.getXAttr(mrc1Address, volumeName + "/myDir", + AsyncScrubber.latestScrubAttr, authString).toString()); + long anotherDir = Long.valueOf(client.getXAttr(mrc1Address, volumeName + "/anotherDir", + AsyncScrubber.latestScrubAttr, authString).toString()); + long test11 = Long.valueOf(client.getXAttr(mrc1Address, + volumeName + "/anotherDir/test11.txt", AsyncScrubber.latestScrubAttr, authString) + .toString()); + long test10 = Long.valueOf(client.getXAttr(mrc1Address, volumeName + "/test10.txt", + AsyncScrubber.latestScrubAttr, authString).toString()); + long yetAnotherDir = Long.valueOf(client.getXAttr(mrc1Address, + volumeName + "/yetAnotherDir", AsyncScrubber.latestScrubAttr, authString).toString()); + + assertTrue(testVolume >= myDir); + assertTrue(testVolume >= anotherDir); + assertTrue(testVolume >= yetAnotherDir); + assertTrue(testVolume >= test10); + assertTrue(anotherDir == test11); + assertTrue(myDir >= test0); + assertTrue(myDir >= test1); + + } + + public static void main(String[] args) { + TestRunner.run(AsyncScrubberTest.class); + } + +} \ No newline at end of file diff --git a/servers/test/org/xtreemfs/test/scrubber/ScrubbedFileTest.java b/servers/test/org/xtreemfs/test/scrubber/ScrubbedFileTest.java new file mode 100644 index 0000000000000000000000000000000000000000..21fdb6323bcf65776f76fafd709590546fa2aab9 --- /dev/null +++ b/servers/test/org/xtreemfs/test/scrubber/ScrubbedFileTest.java @@ -0,0 +1,79 @@ +package org.xtreemfs.test.scrubber; + +import junit.framework.TestCase; +import org.junit.After; +import org.xtreemfs.common.clients.scrubber.FileState; +import org.xtreemfs.common.logging.Logging; + +public class ScrubbedFileTest extends TestCase{ + + public ScrubbedFileTest() { + Logging.start(Logging.LEVEL_WARN); + } + + public void setUp() throws Exception { + + System.out.println("TEST: " + getClass().getSimpleName() + "." + + getName()); + } + + @After + public void tearDown() throws Exception { + + } + + public void testFileState() throws Exception { + FileState state = new FileState(10,3); + for(int i = 0; i < 3; i++) + assertEquals(state.isTodo(i), true); + + state.markObjectAsInFlight(0); + state.markObjectAsInFlight(1); + state.incorporateReadResult(0, 5); + assertEquals(state.isFileDone(), true); + state.incorporateReadResult(1, 0); + assertEquals(state.isFileDone(), true); + assertEquals(state.getFileSize(), 5); + + state = new FileState(10,3); + + state.markObjectAsInFlight(0); + state.markObjectAsInFlight(1); + state.incorporateReadResult(1, 0); + assertEquals(state.isFileDone(), false); + state.incorporateReadResult(0, 5); + assertEquals(state.isFileDone(), true); + assertEquals(state.getFileSize(), 5); + + state = new FileState(10,3); + + state.markObjectAsInFlight(0); + state.markObjectAsInFlight(1); + state.incorporateReadResult(0, 10); + assertEquals(state.isFileDone(), false); + state.incorporateReadResult(1, 0); + assertEquals(state.isFileDone(), true); + assertEquals(state.getFileSize(), 10); + + state = new FileState(10,3); + + state.markObjectAsInFlight(0); + state.markObjectAsInFlight(1); + state.incorporateReadResult(1, 0); + assertEquals(state.isFileDone(), false); + state.incorporateReadResult(0, 10); + assertEquals(state.isFileDone(), true); + assertEquals(state.getFileSize(), 10); + + state = new FileState(10,3); + + state.markObjectAsInFlight(1); + state.incorporateReadResult(1, 5); + assertEquals(state.isFileDone(), false); + state.markObjectAsInFlight(0); + state.incorporateReadResult(0, 10); + assertEquals(state.isFileDone(), true); + assertEquals(state.getFileSize(), 15); + } + +} diff --git a/servers/xtreemos/bcprov-jdk16-139.jar b/servers/xtreemos/bcprov-jdk16-139.jar new file mode 100644 index 0000000000000000000000000000000000000000..8b79930545bfcb540d60ed2a2a61bc6e81f5519b Binary files /dev/null and b/servers/xtreemos/bcprov-jdk16-139.jar differ diff --git a/servers/xtreemos/cdaclient.jar b/servers/xtreemos/cdaclient.jar new file mode 100644 index 0000000000000000000000000000000000000000..49a0cb88c15d45b088a03997e1399962877f33ef Binary files /dev/null and b/servers/xtreemos/cdaclient.jar differ diff --git a/servers/xtreemos/org/xtreemos/XtreemOSAuthProvider.java b/servers/xtreemos/org/xtreemos/XtreemOSAuthProvider.java new file mode 100644 index 0000000000000000000000000000000000000000..0852b74f1a3dff28f30c8a5c5e76d3fa41670d00 --- /dev/null +++ b/servers/xtreemos/org/xtreemos/XtreemOSAuthProvider.java @@ -0,0 +1,108 @@ +/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin. + + This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based + Grid Operating System, see for more details. + The XtreemOS project has been developed with the financial support of the + European Commission's IST program under contract #FP6-033576. + + XtreemFS is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation, either version 2 of the License, or (at your option) + any later version. + + XtreemFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with XtreemFS. If not, see . + */ +/* + * AUTHORS: Bjoern Kolbeck (ZIB) + */ +package org.xtreemos; + +import java.security.cert.Certificate; +import java.security.cert.X509Certificate; +import java.util.ArrayList; +import java.util.List; + +import org.xtreemfs.common.auth.AuthenticationException; +import org.xtreemfs.common.auth.AuthenticationProvider; +import org.xtreemfs.common.auth.NullAuthProvider; +import org.xtreemfs.common.auth.UserCredentials; +import org.xtreemfs.common.logging.Logging; +import org.xtreemfs.foundation.pinky.channels.ChannelIO; +import org.xtreemos.wp35.VO; +import org.xtreemos.wp35.util.CertificateProcessor; + +/** + * authentication provider for XOS certificates. + * @author bjko + */ +public class XtreemOSAuthProvider implements AuthenticationProvider { + + private NullAuthProvider nullAuth; + + public UserCredentials getEffectiveCredentials(String authHeader, ChannelIO channel) throws AuthenticationException { + //use cached info! + assert(nullAuth != null); + if (channel.getAttachment() != null) { + Logging.logMessage(Logging.LEVEL_DEBUG,this,"using attachment..."); + final Object[] cache = (Object[])channel.getAttachment(); + final Boolean serviceCert = (Boolean)cache[0]; + if (serviceCert) { + Logging.logMessage(Logging.LEVEL_DEBUG,this,"service cert..."); + return nullAuth.getEffectiveCredentials(authHeader, channel); + } else { + Logging.logMessage(Logging.LEVEL_DEBUG,this,"using cached creds: "+cache[1]); + return (UserCredentials)cache[1]; + } + } + //parse cert if no cached info is present + try { + final Certificate[] certs = channel.getCerts(); + if (certs.length > 0) { + CertificateProcessor cp = new CertificateProcessor(); + byte[] content = ((X509Certificate) certs[0]).getExtensionValue(VO.Attribute.GlobalUserID.getOID()); + if (content == null) { + Logging.logMessage(Logging.LEVEL_DEBUG, this, "XOS-Service cert present"); + channel.setAttachment(new Object[]{new Boolean(true)}); + //use NullAuth in this case to parse JSON header + return nullAuth.getEffectiveCredentials(authHeader, null); + } else { + + final String globalUID = cp.getVOAttributeValue((X509Certificate) certs[0], VO.Attribute.GlobalUserID); + final String globalGID = cp.getVOAttributeValue((X509Certificate) certs[0], VO.Attribute.GlobalPrimaryGroupName); + final String secondaryGroupNames = cp.getVOAttributeValue((X509Certificate) certs[0], VO.Attribute.GlobalSecondaryGroupNames); + final String[] groupList = (secondaryGroupNames == null) ? new String[]{} : secondaryGroupNames.split(","); + List gids = new ArrayList(groupList.length+1); + gids.add(globalGID); + for (final String gid:groupList) + gids.add(gid); + Logging.logMessage(Logging.LEVEL_DEBUG, this, "XOS-User cert present: " + globalUID + "," + globalGID+ "secondary grps: "+gids); + + boolean isSuperUser = gids.contains("VOAdmin"); + final UserCredentials creds = new UserCredentials(globalUID, gids, isSuperUser); + channel.setAttachment(new Object[]{new Boolean(false),creds}); + return creds; + } + } else { + throw new AuthenticationException("no XOS-certificates present"); + } + } catch (Exception ex) { + Logging.logMessage(Logging.LEVEL_ERROR, this, ex); + throw new AuthenticationException("invalid credentials "+ex); + } + + } + + public void initialize(boolean useSSL) throws RuntimeException { + if (!useSSL) { + throw new RuntimeException(this.getClass().getName() + " can only be used if use_ssl is enabled!"); + } + nullAuth = new NullAuthProvider(); + nullAuth.initialize(useSSL); + } +}