diff --git a/servers/build.xml b/servers/build.xml
new file mode 100644
index 0000000000000000000000000000000000000000..aa5a87717eaf7c654d92d8ba598f8459c1a725e5
--- /dev/null
+++ b/servers/build.xml
@@ -0,0 +1,69 @@
+
+
+
+
+
+ Builds, tests, and runs the project XtreemFS.
+
+
+
diff --git a/servers/config/dirconfig.properties b/servers/config/dirconfig.properties
new file mode 100644
index 0000000000000000000000000000000000000000..5c608ae51476e481cd4857ed5bcffe28303ba172
--- /dev/null
+++ b/servers/config/dirconfig.properties
@@ -0,0 +1,32 @@
+# debug level (0 = error, 1 = warn, 2 = info, 3 = debug, 4 = trace)
+debug_level = 1
+
+# port for the service to listen on
+listen.port = 32638
+
+# optional address for network device ("any" if not specified)
+# listen.address = 127.0.0.1
+
+# directory containing the database
+database.dir = /var/lib/xtreemfs/dir/database
+
+# specify whether SSL is required
+ssl.enabled = false
+
+# server credentials for SSL handshakes
+ssl.service_creds = /etc/xos/xtreemfs/truststore/certs/ds.p12
+ssl.service_creds.pw = xtreemfs
+ssl.service_creds.container = pkcs12
+
+# trusted certificates for SSL handshakes
+ssl.trusted_certs = /etc/xos/xtreemfs/truststore/certs/xosrootca.jks
+ssl.trusted_certs.pw = xtreemfs
+ssl.trusted_certs.container = jks
+
+#authentication providers are used to retrieve the user identities
+#from the client or from certificate
+#default provider is org.xtreemfs.common.auth.NullAuthProvider
+#which just takes the information provided by the client
+authentication_provider = org.xtreemfs.common.auth.NullAuthProvider
+
+uuid = http://localhost:32638
\ No newline at end of file
diff --git a/servers/config/mrcconfig.properties b/servers/config/mrcconfig.properties
new file mode 100644
index 0000000000000000000000000000000000000000..8eb1c74b98f1dd49a2697fef9221cb0c6d71cbfd
--- /dev/null
+++ b/servers/config/mrcconfig.properties
@@ -0,0 +1,83 @@
+# degub level (0 = error, 1 = warn, 2 = info, 3 = debug, 4 = trace)
+debug_level = 1
+
+# port for the service to listen on
+listen.port = 32636
+
+# optional address for network device, "any" if not specified
+# listen.address = 127.0.0.1
+
+# interval for querying the Directory Service for new OSDs
+osd_check_interval = 10
+
+# Directory Service endpoint
+dir_service.host = localhost
+dir_service.port = 32638
+
+# directory for append log
+database.log = /var/lib/xtreemfs/mrc/db-log
+
+# directory for volume databases
+database.dir = /var/lib/xtreemfs/mrc/database
+
+# specify whether access time stamps are updated
+no_atime = true
+
+# granularity of the local clock (in ms)
+local_clock_renewal = 50
+
+# interval between two remote clock syncs (in ms)
+remote_time_sync = 60000
+
+# specify whether SSL is required
+ssl.enabled = false
+
+# server credentials for SSL handshakes
+ssl.service_creds = /etc/xos/xtreemfs/truststore/certs/mrc.p12
+ssl.service_creds.pw = xtreemfs
+ssl.service_creds.container = pkcs12
+
+# trusted certificates for SSL handshakes
+ssl.trusted_certs = /etc/xos/xtreemfs/truststore/certs/xosrootca.jks
+ssl.trusted_certs.pw = xtreemfs
+ssl.trusted_certs.container = jks
+
+# time span between two database checkpoint attempts (in ms)
+database.checkpoint.interval = 1800000
+
+# time span for which no requests must have been received to create a checkpoint (in ms)
+database.checkpoint.idle_interval = 1000
+
+# minimum size in bytes the log file must have to create a checkpoint
+database.checkpoint.logfile_size = 16384
+
+# Authentication providers are used to retrieve the user identities
+# from the client or from certificate.
+# The default provider is org.xtreemfs.mrc.auth.NullAuthProvider, which just
+# takes the information provided by the client. The name of a pluggable
+# provider can be used here.
+authentication_provider = org.xtreemfs.common.auth.NullAuthProvider
+
+# Optional directory containing deployable MRC policy implementations.
+# Policies can be directly deployed as .java or .class files in this directory
+# or one of its subdirectories. They will be compiled at startup time and
+# loaded at runtime. Policies may have external dependencies that can be
+# deployed either as .java, .class or .jar files. While Java and Class files
+# may be located in subdirectories, JAR files mustn't. So far, pluggable
+# policies have to inherit from either org.xtreemfs.mrc.ac.FileAccessPolicy,
+# org.xtreemfs.mrc.osdstatus.OSDSelectionPolicy, or
+# org.xtreemfs.common.auth.AuthenticationProvider. Policies identified by
+# policy IDs (OSDSelectionPolicy and FileAccessPolicy) require a public static
+# long field called POLICY_ID that assigns the policy a unique number.
+policy_dir = /etc/xos/xtreemfs/policies
+
+# Shared secret between the MRC and all OSDs.
+# The secret is used by the MRC to sign capabilities, i.e. security tokens for
+# data access at OSDs. In turn, an OSD uses the secret to verify that the
+# capability has been issued by the MRC. The shared secret will be replaced by
+# a public key infrastructure in future releases.
+capability_secret = secretPassphrase
+
+# UUID for the OSD
+# IMPORTANT: replace 'localhost' with a reachable host name/IP address
+uuid = http://localhost:32636
\ No newline at end of file
diff --git a/servers/config/osdconfig.properties b/servers/config/osdconfig.properties
new file mode 100644
index 0000000000000000000000000000000000000000..34ca5567fc240ce70935103902b37d3df82c3df2
--- /dev/null
+++ b/servers/config/osdconfig.properties
@@ -0,0 +1,56 @@
+# degub level (0 = error, 1 = warn, 2 = info, 3 = debug, 4 = trace)
+debug_level = 1
+
+# port for the service to listen on
+listen.port = 32640
+
+# optional address for network device, "any" if not specified
+# listen.address = 127.0.0.1
+
+# Directory Service endpoint
+dir_service.host = localhost
+dir_service.port = 32638
+
+# directory containing XtreemFS file content
+object_dir = /var/lib/xtreemfs/objs/
+
+# granularity of the local clock (in ms)
+local_clock_renewal = 50
+
+# interval between two remote clock syncs (in ms)
+remote_time_sync = 60000
+
+# specify whether SSL is required
+ssl.enabled = false
+
+# server credentials for SSL handshakes
+ssl.service_creds = /etc/xos/xtreemfs/truststore/certs/osd.p12
+ssl.service_creds.pw = xtreemfs
+ssl.service_creds.container = pkcs12
+
+# trusted certificates for SSL handshakes
+ssl.trusted_certs = /etc/xos/xtreemfs/truststore/certs/xosrootca.jks
+ssl.trusted_certs.pw = xtreemfs
+ssl.trusted_certs.container = jks
+
+report_free_space = true
+
+# specify whether internal OSD checksums are required
+# if the flag is set to true, the OSD will calculate checksums for
+# newly created objects, which will be checked when the object is read
+checksums.enabled = false
+
+# algorithm used for checksum calculation
+# by default, Adler32, CRC32, MD5 and SHA-1 are supported
+checksums.algorithm = Adler32
+
+# Shared secret between the MRC and all OSDs.
+# The secret is used by the MRC to sign capabilities, i.e. security tokens for
+# data access at OSDs. In turn, an OSD uses the secret to verify that the
+# capability has been issued by the MRC. The shared secret will be replaced by
+# a public key infrastructure in future releases.
+capability_secret = secretPassphrase
+
+# UUID for the OSD
+# IMPORTANT: replace 'localhost' with a reachable host name/IP address
+uuid = http://localhost:32640
\ No newline at end of file
diff --git a/servers/lib/hsqldb.jar b/servers/lib/hsqldb.jar
new file mode 100644
index 0000000000000000000000000000000000000000..dc3055e4463b389c9d6170b1f01950af9cd1cf3a
Binary files /dev/null and b/servers/lib/hsqldb.jar differ
diff --git a/servers/lib/test/commons-codec-1.3.jar b/servers/lib/test/commons-codec-1.3.jar
new file mode 100644
index 0000000000000000000000000000000000000000..957b6752af9a60c1bb2a4f65db0e90e5ce00f521
Binary files /dev/null and b/servers/lib/test/commons-codec-1.3.jar differ
diff --git a/servers/lib/test/commons-httpclient-3.0.1-contrib.jar b/servers/lib/test/commons-httpclient-3.0.1-contrib.jar
new file mode 100644
index 0000000000000000000000000000000000000000..4fa5f5d2bfe8cef1e6d75347ae58e0de1d7fd8ab
Binary files /dev/null and b/servers/lib/test/commons-httpclient-3.0.1-contrib.jar differ
diff --git a/servers/lib/test/commons-httpclient-3.0.1.jar b/servers/lib/test/commons-httpclient-3.0.1.jar
new file mode 100644
index 0000000000000000000000000000000000000000..cfc777c71d600a90001b7b2dcd68993d0977b0cb
Binary files /dev/null and b/servers/lib/test/commons-httpclient-3.0.1.jar differ
diff --git a/servers/lib/test/commons-logging-1.1.jar b/servers/lib/test/commons-logging-1.1.jar
new file mode 100644
index 0000000000000000000000000000000000000000..2ff9bbd90d63f92cdffea944869ed9bea7ead49c
Binary files /dev/null and b/servers/lib/test/commons-logging-1.1.jar differ
diff --git a/servers/lib/test/junit-4.3.1.jar b/servers/lib/test/junit-4.3.1.jar
new file mode 100644
index 0000000000000000000000000000000000000000..ff5d1888fc7a8b1501711594c70dc80fe07dce29
Binary files /dev/null and b/servers/lib/test/junit-4.3.1.jar differ
diff --git a/servers/man/man1/xtfs_cleanup.1 b/servers/man/man1/xtfs_cleanup.1
new file mode 100644
index 0000000000000000000000000000000000000000..98e64ed200ea687936bebb91a4292e01567ef577
--- /dev/null
+++ b/servers/man/man1/xtfs_cleanup.1
@@ -0,0 +1,48 @@
+.TH xtfs_cleanup 1 "September 2008" "The XtreemFS Distributed File System" "XtreemFS server"
+.SH NAME
+xtfs_cleanup \- checks for each file on the OSD, if it has an entry at the Metadata Server (MRC).
+.SH SYNOPSIS
+\fBxtfs_cleanup [ \fIoptions\fB ] \fI\fB | \fIuuid:
+.BR
+
+.SH DESCRIPTION
+.I xtfs_cleanup
+performs a check of each file on the given Object Storage Device (OSD) whether it is registered at a MRC or not.
+Returns a list of those files which properly can be deleted.
+
+.SH EXAMPLE USAGE
+.B "xtfs_cleanup [options] uuid:"
+.PP
+Cleans up the OSD with the given uuid.
+
+.SH OPTIONS
+.TP
+.TP
+\fB-h\fP show usage info
+.TP
+\fB-v\fP verbose mode - no listing of file details
+.TP
+\fB-h\fP restore mode - all files will be restored to directory '/Lost+Found/' at the volume they are located
+.TP
+\fB-e\fP !erase mode - all files will be removed without further request!
+.TP
+\fB-d\fP directory service (DIR) to use (e.g. 'http://localhost:32638')
+If no DIR URI is specified, URI and security settings are taken from '/etc/xos/xtreemfs/default_dir'.
+
+
+
+.SH "SEE ALSO"
+.BR xtfs_mount (1),
+.BR xtfs_umount (1),
+.BR xtfs_showmount (1),
+.BR xtfs_mkvol (1),
+.BR xtfs_rmvol (1),
+.BR xtfs_lsvol (1),
+.BR xtfs_stat (1),
+.BR xtfs_sp (1),
+.BR xtfs_mrcdbtool (1),
+.BR xtfs_scrub (1)
+.BR
+
+.SH AVAILABILITY
+The xtfs_cleanup command is part of the XtreemFS-server package and is available from http://www.XtreemFS.org.
diff --git a/servers/man/man1/xtfs_mrcdbtool.1 b/servers/man/man1/xtfs_mrcdbtool.1
new file mode 100644
index 0000000000000000000000000000000000000000..30ac2f9370adb9e1481a35db7abd50d2474492df
--- /dev/null
+++ b/servers/man/man1/xtfs_mrcdbtool.1
@@ -0,0 +1,55 @@
+.TH xtfs_mrcdbtool 1 "July 2008" "The XtreemFS Distributed File System" "XtreemFS server"
+.SH NAME
+xtfs_mrcdbtool \- dump and restore MRC databases.
+.SH SYNOPSIS
+\fBxtfs_mrcdbtool [\fIoptions\fB] \fIdump|restore dump_file
+.br
+
+.SH DESCRIPTION
+.I xtfs_mrcdbtool
+dumps an MRC database to a file, or restores an MRC database from a dump file. An XML dump of the MRC database is created if the \fIdump\fP parameter is present. When dumping an MRC database, the XML file containing the dump will be created on the server at the path \fIdump_file\fP. Dumps can be restored by using the \fIrestore\fP parameter. For safety reasons, this is only possible if the target MRC does not have a database yet.
+
+.SH EXAMPLE USAGE
+.B "xtfs_mrcdbtool -mrc http://localhost:32636 dump /tmp/dump.xml"
+.PP
+Dumps the database of the MRC running on \fIlocalhost:32636\fP to \fI/tmp/dump.xml\fP
+
+.B "xtfs_mrcdbtool -mrc http://localhost:32636 restore /tmp/dump.xml"
+.PP
+Restores the database of the MRC running on \fIlocalhost:32636\fP from the dump in \fI/tmp/dump.xml\fP
+
+.SH OPTIONS
+.TP
+.B \-h
+Print help.
+.TP
+.B \-mrc \fImrc_url
+The URL the MRC, e.g. http://localhost:32636.
+.TP
+.B \-c \fIcreds_file
+Path to a PKCS#12 credentials file (private key + certificate) to use for SSL authentication. Must be present when MRC URL starts with https://.
+.TP
+.B \-cp \fIcreds_passphrase
+An optional passphrase to access the credentials file.
+.TP
+.B \-t \fItrusted_CAs
+Path to a PKCS#12 file containing a set of certificates from trusted certification authorities. These certificates will be used to authenticate the MRC. Must be present when MRC URL starts with https://.
+.TP
+.B \-tp \fItrusted_passphrase
+An optional passphrase to access the truststore file.
+.RE
+
+.SH "SEE ALSO"
+.BR xtfs_mount (1),
+.BR xtfs_umount (1),
+.BR xtfs_showmount (1),
+.BR xtfs_mkvol (1),
+.BR xtfs_rmvol (1),
+.BR xtfs_lsvol (1),
+.BR xtfs_stat (1),
+.BR xtfs_sp (1),
+.BR xtfs_scrub (1)
+.BR
+
+.SH AVAILABILITY
+The xtfs_mrcdbtool command is part of the XtreemFS-server package and is available from http://www.XtreemFS.org
\ No newline at end of file
diff --git a/servers/man/man1/xtfs_scrub.1 b/servers/man/man1/xtfs_scrub.1
new file mode 100644
index 0000000000000000000000000000000000000000..9e9c41ac286cff09bb58e7a52470aa917836d213
--- /dev/null
+++ b/servers/man/man1/xtfs_scrub.1
@@ -0,0 +1,54 @@
+.TH xtfs_scrub 1 "July 2008" "The XtreemFS Distributed File System" "XtreemFS server"
+.SH NAME
+xtfs_scrub \- checks for each file in a volume if the MRC file size is outdated and if the checksum is correct.
+.SH SYNOPSIS
+\fBxtfs_scrub [ \fIoptions\fB ] \fI\fB | \fIuuid:
+.br
+
+.SH DESCRIPTION
+.I xtfs_scrub
+performs a consistency check of each file in a given volume. The check compares the file size stored as part of the metadata to the actual file size defined by all objects of the file. If the metadata file size is outdated, it is corrected, granted that no \fI\-chk\fP switch is provided. In addition, an error is indicated if the file's checksum is not correct. The volume to scrub can either be specified by a name or a UUID 'uuid:'
+
+.SH EXAMPLE USAGE
+.B "xtfs_scrub -dir http://localhost:32638 myVolume
+.PP
+Scrubs all files in the volume named 'myVolume' registered at the Directory Service 'http://localhost:32638'.
+
+.SH OPTIONS
+.TP
+.TP
+\fB-h\fP show usage info
+.TP
+\fB-dir\fP directory service to use (e.g. 'http://localhost:32638')
+If no URI is specified, URI and security settings are taken from '/etc/xos/xtreemfs/default_dir'
+In case of a secured URI ('https://...'), it is necessary to also specify SSL settings (-c, -cp, -t, -tp).
+.TP
+\fB-c\fP a PKCS#12 file containing user credentials
+.TP
+\fB-cp\fP a pass phrase to decrypt the the user credentials file
+.TP
+\fB-t\fP a PKCS#12 file containing a set of certificates from trusted CAs
+.TP
+\fB-tp\fP a pass phrase to decrypt the trusted CAs file
+.TP
+\fB\-chk\fP check only (do not update file sizes on the MRC in case of inconsistencies)
+.TP
+\fB\-cons\fP n number of connections per OSD (default=10)
+.TP
+\fB\-files\fP n number of files to fetch at once from MRC (default=100)
+
+
+.SH "SEE ALSO"
+.BR xtfs_mount (1),
+.BR xtfs_umount (1),
+.BR xtfs_showmount (1),
+.BR xtfs_mkvol (1),
+.BR xtfs_rmvol (1),
+.BR xtfs_lsvol (1),
+.BR xtfs_stat (1),
+.BR xtfs_sp (1),
+.BR xtfs_mrcdbtool (1)
+.BR
+
+.SH AVAILABILITY
+The xtfs_mrcdbtool command is part of the XtreemFS-server package and is available from http://www.XtreemFS.org
diff --git a/servers/nbproject/build-impl.xml b/servers/nbproject/build-impl.xml
new file mode 100644
index 0000000000000000000000000000000000000000..ca3d10a2280de081cca66f8118866555991f571e
--- /dev/null
+++ b/servers/nbproject/build-impl.xml
@@ -0,0 +1,627 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Must set src.dir
+ Must set test.src.dir
+ Must set build.dir
+ Must set dist.dir
+ Must set build.classes.dir
+ Must set dist.javadoc.dir
+ Must set build.test.classes.dir
+ Must set build.test.results.dir
+ Must set build.classes.excludes
+ Must set dist.jar
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Must set javac.includes
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Must select some files in the IDE or set javac.includes
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ To run this application from the command line without Ant, try:
+
+
+
+
+
+
+ java -cp "${run.classpath.with.dist.jar}" ${main.class}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ To run this application from the command line without Ant, try:
+
+ java -jar "${dist.jar.resolved}"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Must select one file in the IDE or set run.class
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Must select one file in the IDE or set debug.class
+
+
+
+
+ Must set fix.includes
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Must select some files in the IDE or set javac.includes
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Some tests failed; see details above.
+
+
+
+
+
+
+
+
+ Must select some files in the IDE or set test.includes
+
+
+
+ Some tests failed; see details above.
+
+
+
+
+ Must select one file in the IDE or set test.class
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Must select one file in the IDE or set applet.url
+
+
+
+
+
+
+
+
+ Must select one file in the IDE or set applet.url
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/servers/nbproject/genfiles.properties b/servers/nbproject/genfiles.properties
new file mode 100644
index 0000000000000000000000000000000000000000..7a45e2acae17a096edfdcacc55d970bb14ef1785
--- /dev/null
+++ b/servers/nbproject/genfiles.properties
@@ -0,0 +1,11 @@
+build.xml.data.CRC32=4a9eff70
+build.xml.script.CRC32=d0dcb2dc
+build.xml.stylesheet.CRC32=240b97a2
+# This file is used by a NetBeans-based IDE to track changes in generated files such as build-impl.xml.
+# Do not edit this file. You may delete it but then the IDE will never regenerate such files for you.
+nbproject/build-impl.xml.data.CRC32=4a9eff70
+nbproject/build-impl.xml.script.CRC32=b1628b34
+nbproject/build-impl.xml.stylesheet.CRC32=f1d9da08
+nbproject/profiler-build-impl.xml.data.CRC32=4a9eff70
+nbproject/profiler-build-impl.xml.script.CRC32=abda56ed
+nbproject/profiler-build-impl.xml.stylesheet.CRC32=a5b6598e
diff --git a/servers/nbproject/project.properties b/servers/nbproject/project.properties
new file mode 100644
index 0000000000000000000000000000000000000000..ee6aa55fe2b97c655d1078dfb781a69250c4395b
--- /dev/null
+++ b/servers/nbproject/project.properties
@@ -0,0 +1,79 @@
+application.args=
+application.title=XtreemFS
+application.vendor=bjko
+build.classes.dir=${build.dir}/classes
+build.classes.excludes=**/*.java,**/*.form
+# This directory is removed when the project is cleaned:
+build.dir=build
+build.generated.dir=${build.dir}/generated
+# Only compile against the classpath explicitly listed here:
+build.sysclasspath=ignore
+build.test.classes.dir=${build.dir}/test/classes
+build.test.results.dir=${build.dir}/test/results
+debug.classpath=\
+ ${run.classpath}
+debug.test.classpath=\
+ ${run.test.classpath}
+# This directory is removed when the project is cleaned:
+dist.dir=dist
+dist.jar=${dist.dir}/XtreemFS.jar
+dist.javadoc.dir=${dist.dir}/javadoc
+excludes=
+file.reference.bcprov-jdk16-139.jar=lib/bcprov-jdk16-139.jar
+file.reference.cdaclient.jar=lib/cdaclient.jar
+file.reference.commons-codec-1.3.jar=lib/test/commons-codec-1.3.jar
+file.reference.commons-httpclient-3.0.1-contrib.jar=lib/test/commons-httpclient-3.0.1-contrib.jar
+file.reference.commons-logging-1.1.jar=lib/test/commons-logging-1.1.jar
+file.reference.hsqldb.jar=lib/hsqldb.jar
+file.reference.junit-4.3.1.jar=lib/test/junit-4.3.1.jar
+file.reference.commons-httpclient-3.0.1.jar=lib/test/commons-httpclient-3.0.1.jar
+file.reference.config.jar=lib/config.jar
+file.reference.je-3.2.13.jar=lib/je-3.2.13.jar
+file.reference.xbean.jar=lib/xbean.jar
+includes=**
+jar.compress=false
+javac.classpath=\
+ ${file.reference.je-3.2.13.jar}:\
+ ${file.reference.hsqldb.jar}:\
+ ${file.reference.cdaclient.jar}:\
+ ${file.reference.bcprov-jdk16-139.jar}
+# Space-separated list of extra javac options
+javac.compilerargs=
+javac.deprecation=false
+javac.source=1.5
+javac.target=1.6
+javac.test.classpath=\
+ ${javac.classpath}:\
+ ${build.classes.dir}:\
+ ${file.reference.commons-httpclient-3.0.1.jar}:\
+ ${file.reference.commons-codec-1.3.jar}:\
+ ${file.reference.commons-logging-1.1.jar}:\
+ ${libs.junit.classpath}:\
+ ${file.reference.commons-httpclient-3.0.1-contrib.jar}
+javadoc.additionalparam=
+javadoc.author=false
+javadoc.encoding=
+javadoc.noindex=false
+javadoc.nonavbar=false
+javadoc.notree=false
+javadoc.private=false
+javadoc.splitindex=true
+javadoc.use=true
+javadoc.version=false
+javadoc.windowtitle=
+main.class=
+manifest.file=manifest.mf
+meta.inf.dir=${src.dir}/META-INF
+platform.active=default_platform
+run.classpath=\
+ ${javac.classpath}:\
+ ${build.classes.dir}
+# Space-separated list of JVM arguments used when running the project
+# (you may also define separate properties like run-sys-prop.name=value instead of -Dname=value
+# or test-sys-prop.name=value to set system properties for unit tests):
+run.jvmargs=-ea
+run.test.classpath=\
+ ${javac.test.classpath}:\
+ ${build.test.classes.dir}
+src.dir=src
+test.src.dir=test
diff --git a/servers/nbproject/project.xml b/servers/nbproject/project.xml
new file mode 100644
index 0000000000000000000000000000000000000000..dae09abe30923274de28f73f459466d1d1041828
--- /dev/null
+++ b/servers/nbproject/project.xml
@@ -0,0 +1,16 @@
+
+
+ org.netbeans.modules.java.j2seproject
+
+
+ XtreemFS
+ 1.6.5
+
+
+
+
+
+
+
+
+
diff --git a/servers/src/org/xtreemfs/common/Capability.java b/servers/src/org/xtreemfs/common/Capability.java
new file mode 100644
index 0000000000000000000000000000000000000000..0281279b13e83f971f6b1e94df4af22c29f5b181
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/Capability.java
@@ -0,0 +1,272 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin and
+ Barcelona Supercomputing Center - Centro Nacional de Supercomputacion.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB), Jesús Malo (BSC)
+ */
+
+package org.xtreemfs.common;
+
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
+import java.util.List;
+
+import org.xtreemfs.common.logging.Logging;
+import org.xtreemfs.common.util.OutputUtils;
+import org.xtreemfs.foundation.json.JSONException;
+import org.xtreemfs.foundation.json.JSONParser;
+import org.xtreemfs.foundation.json.JSONString;
+
+/**
+ * This class implements a Java representation of a capability.
+ *
+ * In general, a capability can be seen as a token granting the permission to
+ * carry out an operation on a remote server.
+ *
+ * When a client wants open a file, the MRC checks whether the respective kind
+ * of access is granted. If so, the MRC sends a capability to the client, which
+ * in turn sends the capability to the OSD when file contents are accessed or
+ * modified. The OSD has to check whether the capability is valid. A capability
+ * is valid as long as it has a correct signature and has not expired yet.
+ * Capabilities can be renewed in order to extend their validity.
+ *
+ * Each capability contains a file ID, a string representing the access mode, an
+ * expiration time stamp representing the time in seconds from 1/1/1970, a
+ * string containing data that can be used to verify the client identity, as
+ * well as a signature added by the MRC.
+ *
+ *
+ * @author stender
+ *
+ */
+public class Capability {
+
+ /**
+ * default validity for capabilities in seconds
+ */
+ public static final long DEFAULT_VALIDITY = 10 * 60;
+
+ private final String fileId;
+
+ private final String accessMode;
+
+ private final long expires;
+
+ private final String sharedSecret;
+
+ private final String signature;
+
+ private long epochNo;
+
+ /**
+ * Creates a capability from a given set of data. The expiration time stamp
+ * will be generated automatically by means of the local system time, and a
+ * signature will be added. This constructor is meant to initially create a
+ * capability at the MRC.
+ *
+ * @param fileId
+ * the file ID
+ * @param accessMode
+ * the access mode
+ * @param epochNo
+ * the epoch number associated with the capability; epoch numbers
+ * are incremented each time the file is truncated or deleted
+ * @param sharedSecret
+ * the shared secret to be used to sign the capability
+ */
+ public Capability(String fileId, String accessMode, long epochNo, String sharedSecret) {
+
+ this.fileId = fileId;
+ this.accessMode = accessMode;
+ this.epochNo = epochNo;
+ this.sharedSecret = sharedSecret;
+
+ this.expires = System.currentTimeMillis() / 1000 + DEFAULT_VALIDITY;
+ this.signature = calcSignature();
+ }
+
+ /**
+ * Creates a capability from a given set of data. A signature will be added
+ * automatically. This constructor is meant to initially create a capability
+ * at the MRC.
+ *
+ * @param fileId
+ * the file ID
+ * @param accessMode
+ * the access mode
+ * @param expires
+ * the expiration time stamp
+ * @param epochNo
+ * the epoch number associated with the capability; epoch numbers
+ * are incremented each time the file is truncated or deleted
+ * @param sharedSecret
+ * the shared secret to be used to sign the capability
+ */
+ public Capability(String fileId, String accessMode, long expires, long epochNo,
+ String sharedSecret) {
+ this.fileId = fileId;
+ this.accessMode = accessMode;
+ this.expires = expires;
+ this.epochNo = epochNo;
+ this.sharedSecret = sharedSecret;
+
+ this.signature = calcSignature();
+ }
+
+ /**
+ * Creates a capability from a string representation. This constructor is
+ * meant to be used to verify the validity of a capability string received
+ * from a remote host.
+ *
+ * @param capability
+ * the capability string
+ * @param sharedSecret
+ * the shared secret to be used to verify the capability
+ * @throws JSONException
+ * if parsing the capability failed
+ */
+ public Capability(String capability, String sharedSecret) throws JSONException {
+
+ List cap = (List) JSONParser.parseJSON(new JSONString(capability));
+ assert (cap.size() == 6 || cap.size() == 5);
+
+ this.sharedSecret = sharedSecret;
+ this.fileId = (String) cap.get(0);
+ this.accessMode = (String) cap.get(1);
+ this.expires = (Long) cap.get(2);
+ // ignore the client identity; it cannot be used because OSDs can act as
+ // client proxies
+ this.epochNo = (Long) cap.get(4);
+ this.signature = (String) cap.get(5);
+ }
+
+ /**
+ * Creates a capability from a string representation. This constructor is
+ * meant to be used to parse and forward a received capability.
+ * It cannot be used to verify capabilities! For this purpose, please
+ * use Capability(String capability, String sharedSecret).
+ *
+ * @param capability
+ * the capability string
+ * @throws JSONException
+ * if parsing the capability failed
+ */
+ public Capability(String capability) throws JSONException {
+
+ List cap = (List) JSONParser.parseJSON(new JSONString(capability));
+ assert (cap.size() == 6);
+
+ this.sharedSecret = null;
+ this.fileId = (String) cap.get(0);
+ this.accessMode = (String) cap.get(1);
+ this.expires = (Long) cap.get(2);
+ // ignore the client identity; it cannot be used because OSDs can act as
+ // client proxies
+ this.epochNo = (Long) cap.get(4);
+ this.signature = (String) cap.get(5);
+ }
+
+ public String getFileId() {
+ return fileId;
+ }
+
+ public String getAccessMode() {
+ return accessMode;
+ }
+
+ public long getExpires() {
+ return expires;
+ }
+
+ public String getClientIdentity() {
+ return "*";
+ }
+
+ public long getEpochNo() {
+ return epochNo;
+ }
+
+ public String getSignature() {
+ return signature;
+ }
+
+ /**
+ * Checks whether the capability is valid.
+ *
+ * @return true, if it hasn't expired yet and the signature is
+ * valid, false, otherwise
+ */
+ public boolean isValid() {
+ return !hasExpired() && hasValidSignature();
+ }
+
+ /**
+ * Checks whether the capability has expired.
+ *
+ * @return true, if the current system time is after the
+ * expiration time stamp false, otherwise
+ */
+ public boolean hasExpired() {
+ return System.currentTimeMillis() / 1000 > expires;
+ }
+
+ /**
+ * Checks whether the capability has a valid signature.
+ *
+ * @return true, if the signature is valid, false,
+ * otherwise
+ */
+ public boolean hasValidSignature() {
+ return signature.equals(calcSignature());
+ }
+
+ /**
+ * Returns a string representation of the capability.
+ *
+ * @return a JSON-formatted string representing the capability.
+ */
+ public String toString() {
+ return "[\"" + fileId + "\",\"" + accessMode + "\"," + expires + ",\""
+ + getClientIdentity() + "\"," + epochNo + ",\"" + signature + "\"]";
+ }
+
+ protected String calcSignature() {
+
+ // right now, we use a shared secret between MRC and OSDs
+ // as soon as we have a Public Key Infrastructure, signatures
+ // will be generated and checked by means of asymmetric encryption
+ // techniques
+
+ String plainText = fileId + accessMode + expires + epochNo + sharedSecret;
+
+ try {
+ MessageDigest md5 = MessageDigest.getInstance("MD5");
+ md5.update(plainText.getBytes());
+ byte[] digest = md5.digest();
+
+ return OutputUtils.byteArrayToHexString(digest);
+ } catch (NoSuchAlgorithmException exc) {
+ Logging.logMessage(Logging.LEVEL_ERROR, this, exc);
+ return null;
+ }
+ }
+
+}
diff --git a/servers/src/org/xtreemfs/common/ClientLease.java b/servers/src/org/xtreemfs/common/ClientLease.java
new file mode 100644
index 0000000000000000000000000000000000000000..051e60761a9124dfb7abf982934bce4968a5ce24
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/ClientLease.java
@@ -0,0 +1,241 @@
+/*
+ * To change this template, choose Tools | Templates
+ * and open the template in the editor.
+ */
+
+package org.xtreemfs.common;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import org.xtreemfs.foundation.json.JSONException;
+import org.xtreemfs.foundation.json.JSONParser;
+import org.xtreemfs.foundation.json.JSONString;
+
+/**
+ *
+ * @author bjko
+ */
+public final class ClientLease implements Cloneable {
+
+ /**
+ * Default time span for the client lease validity.
+ * Must be smaller than a intra-OSD lease, if replication is
+ * active!
+ */
+ public static final long LEASE_VALIDITY = 15000;
+
+ /**
+ * Indicates that a lease spans to EOF "append lease".
+ * a lease from 0 to -1 spans the whole file, even if data is appended.
+ */
+ public static final long TO_EOF = -1;
+
+ /**
+ * timestamp when the lease expires
+ */
+ private long firstObject;
+ /**
+ * last object the lease is valid for
+ */
+ private long lastObject;
+
+ /**
+ * UUID of the client owning the lease
+ */
+ private String clientId;
+
+ /**
+ * timestamp when the lease expires (in seconds since 01/01/70)
+ * must be XtreemFS global time!
+ */
+ private long expires;
+
+
+ /**
+ * fileId this lease was issued for
+ */
+ private final String fileId;
+
+ /**
+ * sequenceNo, used to generate unique leaseId = fileId+"/"+sequenceNo
+ */
+ private long sequenceNo;
+
+ /**
+ * lease type/operation
+ */
+ private String operation;
+
+ public static final String EXCLUSIVE_LEASE = "w";
+
+
+ public ClientLease(final String fileId) {
+ this.fileId = fileId;
+ }
+
+ public static ClientLease parseFromJSON(String json) throws JSONException {
+ try {
+ Map m = (Map) JSONParser.parseJSON(new JSONString(json));
+ return parseFromMap(m);
+ } catch (ClassCastException e) {
+ throw new JSONException("expected a Lease (see the XtreemFS protocol spec) object");
+ }
+ }
+
+ public static ClientLease parseFromList(List arguments) throws JSONException {
+ try {
+ final Map m = (Map)arguments.get(0);
+ return parseFromMap(m);
+ } catch (IndexOutOfBoundsException ex) {
+ throw new JSONException("expected lease object");
+ } catch (ClassCastException ex) {
+ throw new JSONException("expected lease object: "+ex);
+ }
+ }
+
+ public static ClientLease parseFromMap(Map m) throws JSONException {
+ try {
+ final String fileId = (String) m.get("fileId");
+ if (fileId == null)
+ throw new JSONException("Lease object must have a fileId field");
+ ClientLease l = new ClientLease(fileId);
+
+
+ if (!m.containsKey("clientId"))
+ throw new JSONException("Lease object must have a clientId field");
+
+ String tmp = (String) m.get("clientId");
+ l.setClientId(tmp);
+
+ tmp = (String) m.get("leaseId");
+ if (tmp == null)
+ l.setSequenceNo(0);
+ else
+ l.setSequenceNo(Long.valueOf(tmp));
+
+ Long tmp2 = (Long) m.get("firstObject");
+ if (tmp2 == null)
+ throw new JSONException("Lease object must have a firstObject field");
+ l.setFirstObject(tmp2);
+
+ tmp2 = (Long) m.get("lastObject");
+ if (tmp2 == null)
+ throw new JSONException("Lease object must have a lastObject field");
+ l.setLastObject(tmp2);
+
+ tmp2 = (Long) m.get("expires");
+ if (tmp2 == null)
+ l.setExpires(0);
+ else
+ l.setExpires(tmp2);
+
+ tmp = (String) m.get("operation");
+ if (tmp == null)
+ throw new JSONException("Lease object must have an operation field");
+ l.setOperation(tmp);
+
+
+ return l;
+ } catch (ClassCastException e) {
+ throw new JSONException("expected a Lease (see the XtreemFS protocol spec) object");
+ }
+ }
+
+ public Map encodeAsMap() throws JSONException {
+ Map m = new HashMap();
+ m.put("clientId",clientId);
+ m.put("leaseId",Long.toString(sequenceNo));
+ m.put("fileId",fileId);
+ m.put("firstObject",firstObject);
+ m.put("lastObject",lastObject);
+ m.put("expires",expires);
+ m.put("operation", operation);
+ return m;
+ }
+
+ public String encodeAsJSON() throws JSONException {
+ return JSONParser.writeJSON(encodeAsMap());
+ }
+
+ /**
+ * Checks if two leases have conflicting (i.e. overlapping ranges)
+ * @param other other lease for the same file
+ * @return true, if there is an overlap in the ranges
+ */
+ public boolean isConflicting(ClientLease other) {
+ //checks
+ if ( ((this.lastObject < other.firstObject) && (this.lastObject != TO_EOF)) ||
+ ((other.lastObject < this.firstObject) && (other.lastObject != TO_EOF)) ) {
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ @Override
+ public ClientLease clone() {
+ ClientLease l = new ClientLease(this.fileId);
+ l.clientId = this.clientId;
+ l.expires = this.expires;
+ l.firstObject = this.firstObject;
+ l.lastObject = this.lastObject;
+ l.operation = this.operation;
+ l.sequenceNo = this.sequenceNo;
+ return l;
+ }
+
+ public long getFirstObject() {
+ return firstObject;
+ }
+
+ public void setFirstObject(long firstObject) {
+ this.firstObject = firstObject;
+ }
+
+ public long getLastObject() {
+ return lastObject;
+ }
+
+ public void setLastObject(long lastObject) {
+ this.lastObject = lastObject;
+ }
+
+ public String getClientId() {
+ return clientId;
+ }
+
+ public void setClientId(String clientId) {
+ this.clientId = clientId;
+ }
+
+ public long getExpires() {
+ return expires;
+ }
+
+ public void setExpires(long expires) {
+ this.expires = expires;
+ }
+
+ public String getFileId() {
+ return fileId;
+ }
+
+ public long getSequenceNo() {
+ return sequenceNo;
+ }
+
+ public void setSequenceNo(long sequenceNo) {
+ this.sequenceNo = sequenceNo;
+ }
+
+ public String getOperation() {
+ return operation;
+ }
+
+ public void setOperation(String operation) {
+ this.operation = operation;
+ }
+
+
+}
diff --git a/servers/src/org/xtreemfs/common/DualQueue.java b/servers/src/org/xtreemfs/common/DualQueue.java
new file mode 100644
index 0000000000000000000000000000000000000000..23227f38c0dbe815397851a1d0690197cdf120c2
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/DualQueue.java
@@ -0,0 +1,131 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+*/
+/*
+ * AUTHORS: Björn Kolbeck (ZIB)
+ */
+
+package org.xtreemfs.common;
+
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.ReentrantLock;
+
+/**
+ *
+ * @author bjko
+ */
+public final class DualQueue {
+
+ private final LinkedBlockingQueue highPriority;
+ private final LinkedBlockingQueue lowPriority;
+
+ private final AtomicInteger totalQueueLength;
+
+ private final ReentrantLock waitLock;
+
+ private final Condition notEmpty;
+
+ public DualQueue() {
+ highPriority = new LinkedBlockingQueue();
+ lowPriority = new LinkedBlockingQueue();
+ totalQueueLength = new AtomicInteger(0);
+ waitLock = new ReentrantLock();
+ notEmpty = waitLock.newCondition();
+ }
+
+ public void putHighPriority(Object item) {
+ highPriority.add(item);
+ incrementAndWakeup();
+ }
+
+ public void putLowPriority(Object item) {
+ lowPriority.add(item);
+ incrementAndWakeup();
+ }
+
+ private void incrementAndWakeup() {
+ if (totalQueueLength.incrementAndGet() == 1) {
+ try {
+ waitLock.lock();
+ notEmpty.signalAll();
+ } finally {
+ waitLock.unlock();
+ }
+ }
+ }
+
+ public Object poll() throws InterruptedException {
+
+ if (totalQueueLength.get() == 0) {
+ try {
+ waitLock.lockInterruptibly();
+ notEmpty.await();
+ } finally {
+ waitLock.unlock();
+ }
+ }
+
+ Object item = highPriority.poll();
+ if (item != null) {
+ totalQueueLength.decrementAndGet();
+ return item;
+ }
+ item = lowPriority.poll();
+ if (item != null) {
+ totalQueueLength.decrementAndGet();
+ return item;
+ }
+
+ throw new RuntimeException("totalQueueCount is incorrect (> 0) while all queues are empty!");
+ }
+
+ public Object poll(long waitTimeInMs) throws InterruptedException {
+
+ if (totalQueueLength.get() == 0) {
+ try {
+ waitLock.lockInterruptibly();
+ notEmpty.await(waitTimeInMs,TimeUnit.MILLISECONDS);
+ } finally {
+ waitLock.unlock();
+ }
+ }
+
+ if (totalQueueLength.get() == 0)
+ return null;
+
+ Object item = highPriority.poll();
+ if (item != null) {
+ totalQueueLength.decrementAndGet();
+ return item;
+ }
+ item = lowPriority.poll();
+ if (item != null) {
+ totalQueueLength.decrementAndGet();
+ return item;
+ }
+
+ throw new RuntimeException("totalQueueCount is incorrect (> 0) while all queues are empty!");
+ }
+
+
+}
diff --git a/servers/src/org/xtreemfs/common/HeartbeatThread.java b/servers/src/org/xtreemfs/common/HeartbeatThread.java
new file mode 100644
index 0000000000000000000000000000000000000000..c8840152094d2a236c896f53df54921fc758015e
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/HeartbeatThread.java
@@ -0,0 +1,301 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin,
+ Barcelona Supercomputing Center - Centro Nacional de Supercomputacion
+ and Consiglio Nazionale delle Ricerche.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB), Jesús Malo (BSC)
+ * Eugenio Cesario (CNR)
+ */
+
+package org.xtreemfs.common;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.xtreemfs.common.clients.HttpErrorException;
+import org.xtreemfs.common.clients.RPCClient;
+import org.xtreemfs.common.clients.RPCResponse;
+import org.xtreemfs.common.clients.dir.DIRClient;
+import org.xtreemfs.common.config.ServiceConfig;
+import org.xtreemfs.common.logging.Logging;
+import org.xtreemfs.common.util.NetUtils;
+import org.xtreemfs.common.util.OutputUtils;
+import org.xtreemfs.common.uuids.ServiceUUID;
+import org.xtreemfs.foundation.LifeCycleThread;
+import org.xtreemfs.foundation.json.JSONException;
+
+/**
+ * A thread that regularly sends a heartbeat signal with fresh service data to
+ * the Directory Service.
+ */
+public class HeartbeatThread extends LifeCycleThread {
+
+ /**
+ * An interface that generates service data to be sent to the Directory
+ * Service. Each time a heartbeat signal is sent, new service data will be
+ * generated by means of invoking getServiceData() .
+ */
+ public interface ServiceDataGenerator {
+ public Map> getServiceData();
+ }
+
+ private static final long UPDATE_INTERVAL = 60 * 1000; // 60s
+
+ private ServiceUUID uuid;
+
+ private ServiceDataGenerator serviceDataGen;
+
+ private DIRClient client;
+
+ private String authString;
+
+ private volatile boolean quit;
+
+ private final ServiceConfig config;
+
+ public HeartbeatThread(String name, DIRClient client, ServiceUUID uuid,
+ ServiceDataGenerator serviceDataGen, String authString, ServiceConfig config) {
+
+ super(name);
+
+ this.client = client;
+ this.uuid = uuid;
+ this.serviceDataGen = serviceDataGen;
+ this.authString = authString;
+ this.config = config;
+ }
+
+ public void shutdown() {
+ this.quit = true;
+ this.interrupt();
+ try {
+ if (client.getSpeedy().isAlive()) {
+ RPCResponse r = client.deregisterEntity(uuid.toString(), authString);
+ r.waitForResponse(2000);
+ Logging.logMessage(Logging.LEVEL_INFO, this, uuid + " dergistered");
+ }
+ } catch (Exception ex) {
+ Logging.logMessage(Logging.LEVEL_ERROR, this, "cannot deregister at DIR: " + ex);
+ }
+ }
+
+ public void run() {
+
+ List responses = new LinkedList();
+ Map verMap = new HashMap();
+
+ // initially, ...
+ try {
+
+ // ... for each UUID, ...
+ for (Entry> mapEntry : serviceDataGen.getServiceData()
+ .entrySet()) {
+
+ // ... remove old DS entry if necessary
+ if (!"volume".equals(mapEntry.getValue().get("type"))) {
+ RPCResponse r = client.deregisterEntity(mapEntry.getKey(), authString);
+ r.waitForResponse();
+ responses.add(r);
+ }
+
+ // ... register the entity
+ registerEntity(mapEntry.getKey(), mapEntry.getValue(), verMap, authString,
+ responses);
+ if (Logging.isDebug())
+ Logging.logMessage(Logging.LEVEL_DEBUG, this, uuid
+ + " successfully registered at Directory Service");
+ }
+
+ // ... register the address mapping for the service
+
+ List> endpoints = null;
+
+ //check if a listen.address is set
+ if (config.getAddress() == null) {
+ endpoints = NetUtils.getReachableEndpoints(uuid.getAddress()
+ .getPort(), uuid.getProtocol());
+ } else {
+ //if it is set, we should use that for UUID mapping!
+ endpoints = new ArrayList(1);
+ Map m = RPCClient.generateMap("address", config.getAddress().toString(),
+ "port", uuid.getAddress().getPort(), "protocol", uuid.getProtocol(),
+ "ttl", 3600, "match_network", "*");
+ endpoints.add(m);
+ }
+
+ // fetch the latest address mapping version from the Directory
+ // Serivce
+ long version = 0;
+ RPCResponse r2 = client.getAddressMapping(uuid.toString(), authString);
+ try {
+ Map> result = (Map>) r2.get();
+
+ // retrieve the version number from the address mapping
+ Collection>> entries = result.entrySet();
+ if (entries.size() != 0) {
+ List valueList = entries.iterator().next().getValue();
+ version = (Long) valueList.get(0);
+ }
+ } finally {
+ responses.add(r2);
+ }
+
+ // register/update the current address mapping
+ RPCResponse r3 = client.registerAddressMapping(uuid.toString(), endpoints, version,
+ authString);
+ try {
+ r3.waitForResponse();
+ } finally {
+ responses.add(r3);
+ }
+
+ } catch (Exception ex) {
+ Logging.logMessage(Logging.LEVEL_ERROR, this,
+ "an error occurred while initially contacting the Directory Service: "
+ + OutputUtils.stackTraceToString(ex));
+ notifyCrashed(ex);
+ } finally {
+ for (RPCResponse resp : responses)
+ resp.freeBuffers();
+ }
+
+ notifyStarted();
+
+ // periodically, ...
+ while (!quit) {
+
+ responses.clear();
+
+ try {
+
+ // ... for each UUID, ...
+ for (Entry> mapEntry : serviceDataGen.getServiceData()
+ .entrySet()) {
+
+ // ... update the Directory Service entry for the service
+ registerEntity(mapEntry.getKey(), mapEntry.getValue(), verMap, authString,
+ responses);
+ if (Logging.isDebug())
+ Logging.logMessage(Logging.LEVEL_DEBUG, this, uuid
+ + " successfully updated at Directory Service");
+ }
+
+ } catch (IOException ex) {
+ Logging.logMessage(Logging.LEVEL_ERROR, this, ex);
+ } catch (JSONException ex) {
+ Logging.logMessage(Logging.LEVEL_ERROR, this, ex);
+ } catch (InterruptedException ex) {
+ quit = true;
+ break;
+ } finally {
+ for (RPCResponse resp : responses)
+ resp.freeBuffers();
+ }
+
+ if (quit)
+ break;
+
+ try {
+ Thread.sleep(UPDATE_INTERVAL);
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ }
+
+ notifyStopped();
+ }
+
+ private void registerEntity(String uuid, Map data,
+ Map versionMap, String authString, List responses)
+ throws HttpErrorException, InterruptedException, IOException, JSONException {
+
+ Long ver = versionMap.get(uuid);
+ if (ver == null)
+ ver = 0L;
+
+ // in case the data object refers to a volume, check whether a volume
+ // with the same name exists already
+ if ("volume".equals(data.get("type"))) {
+
+ RPCResponse>> response = client.getEntities(RPCClient
+ .generateMap("name", data.get("name")),
+ RPCClient.generateStringList("version"), authString);
+
+ try {
+ Map> entities = response.get();
+
+ if (!entities.isEmpty()) {
+ String id = entities.keySet().iterator().next();
+
+ if (id != null && !id.equals(uuid)) {
+ Logging.logMessage(Logging.LEVEL_WARN, this,
+ "WARNING: could not register volume '" + data.get("name")
+ + "', as it has already been registered with a different UUID: "
+ + id + "!");
+ return;
+
+ } else
+ ver = Long.parseLong((String) entities.get(id).get("version"));
+ }
+ } finally {
+ responses.add(response);
+ }
+ }
+
+ RPCResponse response = client.registerEntity(uuid, data, ver, authString);
+ try {
+ versionMap.put(uuid, response.get());
+ } catch (HttpErrorException exc) {
+
+ // if the version number was outdated for some reason, fetch the
+ // latest version of the entity
+ if (exc.getStatusCode() == 420) {
+
+ RPCResponse>> r = client.getEntities(RPCClient
+ .generateMap("uuid", uuid), RPCClient.generateStringList("version"),
+ authString);
+ try {
+ Map> result = r.get();
+
+ // retrieve the version number
+ Collection>> entries = result.entrySet();
+ if (entries.size() != 0) {
+ Map valueMap = entries.iterator().next().getValue();
+ versionMap.put(uuid, Long.valueOf((String) valueMap.get("version")));
+ }
+
+ } finally {
+ responses.add(r);
+ }
+ }
+
+ throw exc;
+ } finally {
+ responses.add(response);
+ }
+ }
+}
diff --git a/servers/src/org/xtreemfs/common/LRUCache.java b/servers/src/org/xtreemfs/common/LRUCache.java
new file mode 100644
index 0000000000000000000000000000000000000000..47a5404aca07f5858f9b8f3c20c8b3ba194db8b7
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/LRUCache.java
@@ -0,0 +1,49 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin and
+ Barcelona Supercomputing Center - Centro Nacional de Supercomputacion.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+*/
+/*
+ * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB), Jesús Malo (BSC)
+ */
+
+package org.xtreemfs.common;
+
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+/**
+ * This class implements a LRU cache
+ *
+ * @author jmalo
+ */
+public class LRUCache extends LinkedHashMap {
+ private int maximumSize;
+
+ /** Creates a new instance of LRUCache */
+ public LRUCache(int size) {
+ super(size, (float)0.75, true);
+
+ maximumSize = size;
+ }
+
+ protected boolean removeEldestEntry(Map.Entry eldest) {
+ return size() > maximumSize;
+ }
+}
diff --git a/servers/src/org/xtreemfs/common/PrioritizableMessage.java b/servers/src/org/xtreemfs/common/PrioritizableMessage.java
new file mode 100644
index 0000000000000000000000000000000000000000..5e55901eb41b255d310abddaa954236b6a5b9d04
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/PrioritizableMessage.java
@@ -0,0 +1,36 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin and
+ Barcelona Supercomputing Center - Centro Nacional de Supercomputacion.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+*/
+/*
+ * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB), Jesús Malo (BSC)
+ */
+
+package org.xtreemfs.common;
+
+/**
+ *
+ * @author bjko
+ */
+public interface PrioritizableMessage extends Comparable {
+
+ public int getMessagePriority();
+
+}
diff --git a/servers/src/org/xtreemfs/common/Request.java b/servers/src/org/xtreemfs/common/Request.java
new file mode 100644
index 0000000000000000000000000000000000000000..2cd8e23d05528eebddf3e322b43d7bd396f1a059
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/Request.java
@@ -0,0 +1,103 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Christian Lorenz (ZIB), Jan Stender (ZIB), Björn Kolbeck (ZIB)
+ */
+package org.xtreemfs.common;
+
+import org.xtreemfs.foundation.pinky.PinkyRequest;
+import org.xtreemfs.osd.ErrorRecord;
+
+/**
+ *
+ * 29.09.2008
+ *
+ * @author clorenz
+ */
+public abstract class Request {
+
+ /**
+ * The HTTP request object.
+ */
+ private PinkyRequest pinkyRequest;
+
+ /**
+ * request id used for tracking.
+ */
+ protected long requestId;
+
+ /**
+ * error record, if an error occurred
+ */
+ protected ErrorRecord error;
+
+ private Object attachment;
+
+ private long enqueueNanos, finishNanos;
+
+ protected Request(PinkyRequest pr) {
+ this.setPinkyRequest(pr);
+ }
+
+ public long getRequestId() {
+ return requestId;
+ }
+
+ public ErrorRecord getError() {
+ return error;
+ }
+
+ public void setError(ErrorRecord error) {
+ this.error = error;
+ }
+
+ public PinkyRequest getPinkyRequest() {
+ return pinkyRequest;
+ }
+
+ public void setPinkyRequest(PinkyRequest pinkyRequest) {
+ this.pinkyRequest = pinkyRequest;
+ }
+
+ public Object getAttachment() {
+ return attachment;
+ }
+
+ public void setAttachment(Object attachment) {
+ this.attachment = attachment;
+ }
+
+ public long getEnqueueNanos() {
+ return enqueueNanos;
+ }
+
+ public void setEnqueueNanos(long enqueueNanos) {
+ this.enqueueNanos = enqueueNanos;
+ }
+
+ public long getFinishNanos() {
+ return finishNanos;
+ }
+
+ public void setFinishNanos(long finishNanos) {
+ this.finishNanos = finishNanos;
+ }
+}
diff --git a/servers/src/org/xtreemfs/common/RingBuffer.java b/servers/src/org/xtreemfs/common/RingBuffer.java
new file mode 100644
index 0000000000000000000000000000000000000000..bb99a2dfc334051c90e14de3125726b6e9a6c32a
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/RingBuffer.java
@@ -0,0 +1,87 @@
+/*
+ * To change this template, choose Tools | Templates
+ * and open the template in the editor.
+ */
+
+package org.xtreemfs.common;
+
+import java.util.Iterator;
+
+/**
+ *
+ * @author bjko
+ */
+public class RingBuffer implements Iterable {
+
+ protected T[] items;
+ protected int pointer;
+ protected int start;
+
+ public RingBuffer(int capacity) {
+ items = (T[]) new Object[capacity];
+ pointer = 0;
+ start = 0;
+ }
+
+ public RingBuffer(int capacity, T initialValue) {
+ this(capacity);
+ for (int i = 0; i < capacity; i++)
+ items[i] = initialValue;
+ }
+
+ public void insert(T item) {
+ final T tmp = items[pointer];
+ if (tmp != null) {
+ //overwriting
+ start++;
+ if (start == items.length)
+ start = 0;
+ }
+ items[pointer++] = item;
+ if (pointer == items.length)
+ pointer = 0;
+ }
+
+ private class RingBufferIterator implements Iterator {
+
+ private int position;
+
+ public RingBufferIterator() {
+ position = 0;
+ }
+
+ public boolean hasNext() {
+ if (position >= items.length)
+ return false;
+ return items[ (position+start) % items.length] != null;
+ }
+
+ public T next() {
+ return (T) items[ ((position++)+start) % items.length];
+ }
+
+ public void remove() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
+
+ }
+
+ public Iterator iterator() {
+ return new RingBufferIterator();
+ }
+
+ public String toString() {
+ StringBuilder contents = new StringBuilder();
+ contents.append("[ ");
+ for (int i = 0; i < items.length; i++) {
+ T item = items[ (i+start) % items.length];
+ if (item == null)
+ break;
+ contents.append(item);
+ contents.append(", ");
+ }
+ contents.append("] ");
+ return contents.toString();
+ }
+
+}
diff --git a/servers/src/org/xtreemfs/common/TimeSync.java b/servers/src/org/xtreemfs/common/TimeSync.java
new file mode 100644
index 0000000000000000000000000000000000000000..778c92355cb53fcc704738782f103dbb56d77945
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/TimeSync.java
@@ -0,0 +1,250 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin and
+ Barcelona Supercomputing Center - Centro Nacional de Supercomputacion.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+*/
+/*
+ * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB), Jesús Malo (BSC)
+ */
+
+package org.xtreemfs.common;
+
+import java.net.InetSocketAddress;
+
+import org.xtreemfs.common.auth.NullAuthProvider;
+import org.xtreemfs.common.clients.RPCResponse;
+import org.xtreemfs.common.clients.dir.DIRClient;
+import org.xtreemfs.common.logging.Logging;
+
+/**
+ * A class that offers a local time w/ adjustable granularity and a global time
+ * based on the time reported by the DIR. Global time is adjusted periodically.
+ * This class should be used to minimize the number of calls to
+ * System.currentTimeMillis which is a costly system call on Linux. Moreover it
+ * offers a system-global time.
+ *
+ * @author bjko
+ */
+public final class TimeSync extends Thread {
+
+ /**
+ * A dir client used to synchronize clocks
+ */
+ private final DIRClient dir;
+
+ /**
+ * interval in ms to wait between to synchronizations.
+ */
+ private final int timeSyncInterval;
+
+ /**
+ * interval between updates of the local system clock.
+ */
+ private final int localTimeRenew;
+
+ /**
+ * local sys time as of last update
+ */
+ private volatile long localSysTime;
+
+ /**
+ * drift between local clock and global time as of last resync() operation.
+ */
+ private volatile long currentDrift;
+
+ /**
+ * set to true to stop thread
+ */
+ private volatile boolean quit;
+
+ /**
+ * timestamp of last resync operation
+ */
+ private long lastSync;
+
+ /**
+ * authentication string sent to the DIR
+ */
+ private final String authStr;
+
+ private static TimeSync theInstance;
+
+ /** Creates a new instance of TimeSync
+ @dir a directory server to use for synchronizing clocks, can be null for test setups only
+ */
+ private TimeSync(DIRClient dir, int timeSyncInterval, int localTimeRenew,
+ String dirAuthStr) {
+ super("TimeSync Thread");
+ setDaemon(true);
+ this.localTimeRenew = localTimeRenew;
+ this.timeSyncInterval = timeSyncInterval;
+ this.dir = dir;
+ this.authStr = dirAuthStr;
+ TimeSync.theInstance = this;
+ }
+
+ /**
+ * main loop
+ */
+ @Override
+ public void run() {
+ while (!quit) {
+ localSysTime = System.currentTimeMillis();
+ if (localSysTime - lastSync > timeSyncInterval) {
+ resync();
+ }
+ try {
+ TimeSync.sleep(localTimeRenew);
+ } catch (InterruptedException ex) {
+ }
+
+ }
+ theInstance = null;
+ }
+
+ /**
+ * Initializes the time synchronizer. Note that only the first invocation of
+ * this method has an effect, any further invocations will be ignored.
+ *
+ * @param dir
+ * @param timeSyncInterval
+ * @param localTimeRenew
+ * @param dirAuthStr
+ */
+ public static void initialize(DIRClient dir, int timeSyncInterval,
+ int localTimeRenew, String dirAuthStr) {
+
+ if (theInstance != null)
+ return;
+
+ TimeSync s = new TimeSync(dir, timeSyncInterval, localTimeRenew,
+ dirAuthStr);
+ s.start();
+ }
+
+ public static void close() {
+ if (theInstance == null)
+ return;
+ theInstance.shutdown();
+ }
+
+ /**
+ * stop the thread
+ */
+ public void shutdown() {
+ quit = true;
+ this.interrupt();
+ }
+
+ /**
+ * returns the current value of the local system time variable. Has a
+ * resolution of localTimeRenew ms.
+ */
+ public static long getLocalSystemTime() {
+ return getInstance().localSysTime;
+ }
+
+ /**
+ * returns the current value of the local system time adjusted to global
+ * time. Has a resolution of localTimeRenew ms.
+ */
+ public static long getGlobalTime() {
+ return getInstance().localSysTime + getInstance().currentDrift;
+ }
+
+ public static long getLocalRenewInterval() {
+ return getInstance().localTimeRenew;
+ }
+
+ public static int getTimeSyncInterval() {
+ return getInstance().timeSyncInterval;
+ }
+
+ /**
+ * returns the current clock drift.
+ */
+ public long getDrift() {
+ return this.currentDrift;
+ }
+
+ /**
+ * resynchronizes with the global time obtained from the DIR
+ */
+ private void resync() {
+ if (dir == null)
+ return;
+ try {
+ long tStart = localSysTime;
+
+ long oldDrift = currentDrift;
+ RPCResponse r = dir.getGlobalTime(authStr);
+ Long globalTime = r.get();
+ r.freeBuffers();
+ long tEnd = System.currentTimeMillis();
+ // add half a roundtrip to estimate the delay
+ globalTime += (tEnd - tStart) / 2;
+
+ currentDrift = globalTime - tEnd;
+ lastSync = tEnd;
+
+ if (Math.abs(oldDrift - currentDrift) > 5000) {
+ Logging.logMessage(Logging.LEVEL_ERROR, this,
+ "STRANGE DRIFT CHANGE from " + oldDrift + " to "
+ + currentDrift);
+ }
+
+ } catch (Exception ex) {
+ ex.printStackTrace();
+ lastSync = System.currentTimeMillis();
+ }
+ }
+
+ public static TimeSync getInstance() {
+ if (theInstance == null)
+ throw new RuntimeException("TimeSync not initialized!");
+ return theInstance;
+ }
+
+ /**
+ * Simple demonstration routine
+ */
+ public static void main(String[] args) {
+ try {
+ // simple test
+ Logging.start(Logging.LEVEL_INFO);
+
+ DIRClient dir = new DIRClient(null, new InetSocketAddress(
+ "xtreem.zib.de", 32638));
+ TimeSync ts = new TimeSync(dir, 1000, 50, NullAuthProvider.createAuthString("me", "me"));
+ ts.start();
+
+ for (;;) {
+ Logging.logMessage(Logging.LEVEL_INFO, null, "local time = "
+ + ts.getLocalSystemTime());
+ Logging.logMessage(Logging.LEVEL_INFO, null, "global time = "
+ + ts.getGlobalTime() + " +" + ts.getDrift());
+ Thread.sleep(1000);
+ }
+
+ } catch (Exception ex) {
+ ex.printStackTrace();
+ }
+ }
+
+}
diff --git a/servers/src/org/xtreemfs/common/VersionManagement.java b/servers/src/org/xtreemfs/common/VersionManagement.java
new file mode 100644
index 0000000000000000000000000000000000000000..fc2c1a2d929736eee48b194f2dd0c62e58c6e6a4
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/VersionManagement.java
@@ -0,0 +1,90 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin and
+ Barcelona Supercomputing Center - Centro Nacional de Supercomputacion.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB), Jesús Malo (BSC)
+ */
+
+package org.xtreemfs.common;
+
+import java.util.List;
+
+/**
+ * This class is meant to maintain version numbers for different components used
+ * in XtreemFS, in order to be able to detect possible incompatibilities between
+ * different versions.
+ *
+ * When a new version of the protocol, database, etc. has been implemented, the
+ * corresponding version number should be replaced. XtreemFS will rely on this
+ * class to find out what the current version numbers are.
+ *
+ */
+public class VersionManagement {
+
+ private static final long[] supportedProtocolVersions = { 39 };
+
+ private static final long mrcDataVersion = 2;
+
+ private static final long osdDataVersion = 1;
+
+ public static long getMatchingProtVers(List proposedVersions) {
+
+ int i = 0;
+ int j = 0;
+ long result = -1;
+
+ // find the largest element contained in both lists
+ if (proposedVersions.size() > 0) {
+
+ while (i < proposedVersions.size() && j < supportedProtocolVersions.length) {
+ long diff = proposedVersions.get(i) - supportedProtocolVersions[j];
+
+ if (diff == 0) {
+ result = supportedProtocolVersions[j];
+ break;
+ } else if (diff > 0)
+ i++;
+ else
+ j++;
+ }
+ }
+
+ return result;
+ }
+
+ public static String getSupportedProtVersAsString() {
+ String str = "[";
+ for (int v = 0; v < supportedProtocolVersions.length; v++)
+ str += supportedProtocolVersions[v]
+ + (v == supportedProtocolVersions.length - 1 ? "]" : ", ");
+
+ return str;
+ }
+
+ public static long getMrcDataVersion() {
+ return mrcDataVersion;
+ }
+
+ public static long getOsdDataVersion() {
+ return osdDataVersion;
+ }
+
+}
diff --git a/servers/src/org/xtreemfs/common/auth/AuthenticationException.java b/servers/src/org/xtreemfs/common/auth/AuthenticationException.java
new file mode 100644
index 0000000000000000000000000000000000000000..58f6afed3eb8c9665fdcc133d0cc2bc6dacac533
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/auth/AuthenticationException.java
@@ -0,0 +1,40 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Bjoern Kolbeck (ZIB)
+ */
+package org.xtreemfs.common.auth;
+
+/**
+ * Thrown by an authentication provide when authentication is not
+ * possible for any reason.
+ * @author bjko
+ */
+public class AuthenticationException extends Exception {
+
+ /** creates a new exception.
+ *
+ * @param msg an error message that should be meaningful to users!
+ */
+ public AuthenticationException(String msg) {
+ super(msg);
+ }
+}
diff --git a/servers/src/org/xtreemfs/common/auth/AuthenticationProvider.java b/servers/src/org/xtreemfs/common/auth/AuthenticationProvider.java
new file mode 100644
index 0000000000000000000000000000000000000000..0744a4a5c2a96a9e0eb4f0252e33b3f07800f6c1
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/auth/AuthenticationProvider.java
@@ -0,0 +1,52 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Bjoern Kolbeck (ZIB)
+ */
+
+package org.xtreemfs.common.auth;
+
+import org.xtreemfs.foundation.pinky.channels.ChannelIO;
+
+/**
+ * Authentication Providers extract the credentials (UID/GIDs/SuperUser)
+ * from the authentication header and the certificates.
+ * @author bjko
+ */
+public interface AuthenticationProvider {
+
+ /** initializes the provider class
+ *
+ * @param useSSL true, if SSL is enabled.
+ * @throws java.lang.RuntimeException if the provider cannot be initialized.
+ */
+ void initialize(boolean useSSL) throws RuntimeException;
+
+ /**
+ * Get the effective credentials for an operation.
+ * @param authHeader content of the Authentication header sent by the client
+ * @param channel the channel used, can be used to store attachments and to get certificates
+ * @return the effective user credentials
+ * @throws org.xtreemfs.common.auth.AuthenticationException if authentication is not possible
+ */
+ UserCredentials getEffectiveCredentials(String authHeader, ChannelIO channel) throws AuthenticationException;
+
+}
diff --git a/servers/src/org/xtreemfs/common/auth/BackwdCompatNullAuthProvider.java b/servers/src/org/xtreemfs/common/auth/BackwdCompatNullAuthProvider.java
new file mode 100644
index 0000000000000000000000000000000000000000..14b4c60be77acf3b14e939b04706350b43612fa2
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/auth/BackwdCompatNullAuthProvider.java
@@ -0,0 +1,106 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+Grid Operating System, see for more details.
+The XtreemOS project has been developed with the financial support of the
+European Commission's IST program under contract #FP6-033576.
+XtreemFS is free software: you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation, either version 2 of the License, or (at your option)
+any later version.
+XtreemFS is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+You should have received a copy of the GNU General Public License
+along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Bjoern Kolbeck (ZIB)
+ */
+package org.xtreemfs.common.auth;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.StringTokenizer;
+import org.xtreemfs.foundation.json.JSONParser;
+import org.xtreemfs.foundation.json.JSONString;
+import org.xtreemfs.foundation.pinky.channels.ChannelIO;
+import org.xtreemfs.common.auth.AuthenticationException;
+import org.xtreemfs.common.auth.AuthenticationProvider;
+import org.xtreemfs.common.auth.UserCredentials;
+
+/**
+ * A simple provider that parses the JSON string sent in the authentication header
+ * as described in the protocol spec.
+ * @author bjko
+ */
+public class BackwdCompatNullAuthProvider implements AuthenticationProvider {
+
+ public BackwdCompatNullAuthProvider() {
+
+ }
+
+ public UserCredentials getEffectiveCredentials(String authHeader, ChannelIO channel)
+ throws AuthenticationException {
+
+ if (authHeader.startsWith("{")) {
+ //new JSON header format
+
+ String GUID = null;
+ List GGIDs = null;
+ String mech = null;
+ try {
+ //parse the JSON string in header field
+ JSONString authStr = new JSONString(authHeader);
+ Map authInfo = (Map) JSONParser.parseJSON(authStr);
+ mech = (String) authInfo.get("mechanism");
+ GUID = (String) authInfo.get("guid");
+ GGIDs = (List) authInfo.get("ggids");
+ } catch (Exception ex) {
+ throw new AuthenticationException("malformed authentication credentials: " + ex);
+ }
+
+ if (!mech.equals("nullauth")) {
+ throw new AuthenticationException("unknown authorization mechanism: " + mech);
+ }
+
+ return new UserCredentials(GUID, GGIDs, GUID.equals("root"));
+ } else {
+ String GUID = null;
+ List GGIDs = null;
+ //old header format for comapatability!
+ StringTokenizer st = new StringTokenizer(
+ authHeader, " ");
+ String mech = st.nextToken();
+
+ if (mech.equals("nullauth")) {
+
+ if (!st.hasMoreTokens()) {
+ throw new AuthenticationException("nullauth: user ID required");
+ }
+
+ // set the user ID
+ GUID = st.nextToken();
+
+ if (!st.hasMoreTokens()) {
+ throw new AuthenticationException("nullauth: at least one group ID required");
+ }
+
+ // set the group IDs
+ GGIDs = new ArrayList();
+ while (st.hasMoreTokens()) {
+ GGIDs.add(st.nextToken());
+ }
+
+ return new UserCredentials(GUID, GGIDs, GUID.equals("root"));
+ } else {
+ throw new AuthenticationException("unknown authorization mechanism: " + mech);
+ }
+
+ }
+ }
+
+ public void initialize(boolean useSSL) throws RuntimeException {
+ }
+}
diff --git a/servers/src/org/xtreemfs/common/auth/NullAuthProvider.java b/servers/src/org/xtreemfs/common/auth/NullAuthProvider.java
new file mode 100644
index 0000000000000000000000000000000000000000..daf50f3faef162cd928a834e3c12e594f50c497b
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/auth/NullAuthProvider.java
@@ -0,0 +1,112 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Bjoern Kolbeck (ZIB)
+ */
+
+package org.xtreemfs.common.auth;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import org.xtreemfs.foundation.json.JSONException;
+import org.xtreemfs.foundation.json.JSONParser;
+import org.xtreemfs.foundation.json.JSONString;
+import org.xtreemfs.foundation.pinky.channels.ChannelIO;
+
+/**
+ * A simple provider that parses the JSON string sent in the authentication
+ * header as described in the protocol spec.
+ *
+ * @author bjko
+ */
+public class NullAuthProvider implements AuthenticationProvider {
+
+ public NullAuthProvider() {
+
+ }
+
+ public UserCredentials getEffectiveCredentials(String authHeader, ChannelIO channel)
+ throws AuthenticationException {
+ String GUID = null;
+ List GGIDs = null;
+ String mech = null;
+ try {
+ // parse the JSON string in header field
+ JSONString authStr = new JSONString(authHeader);
+ Map authInfo = (Map) JSONParser.parseJSON(authStr);
+ mech = (String) authInfo.get("mechanism");
+ GUID = (String) authInfo.get("guid");
+ GGIDs = (List) authInfo.get("ggids");
+ } catch (Exception ex) {
+ throw new AuthenticationException("malformed authentication credentials: " + ex);
+ }
+
+ if (!mech.equals("nullauth"))
+ throw new AuthenticationException("unknown authorization mechanism: " + mech);
+
+ return new UserCredentials(GUID, GGIDs, GUID.equals("root"));
+
+ }
+
+ public void initialize(boolean useSSL) throws RuntimeException {
+ }
+
+ /**
+ * Generates a NullAuthProvicer -specific authorization string.
+ *
+ * @param guid
+ * the global user ID
+ * @param ggid
+ * a list of global group IDs
+ * @return an authorization string
+ * @throws JSONException
+ */
+ public static String createAuthString(String guid, List ggid) throws JSONException {
+ StringBuilder sb = new StringBuilder();
+ sb.append("{\"mechanism\":");
+ sb.append(JSONParser.writeJSON("nullauth"));
+ sb.append(",\"guid\":");
+ sb.append(JSONParser.writeJSON(guid));
+ sb.append(",\"ggids\":");
+ sb.append(JSONParser.writeJSON(ggid));
+ sb.append("}");
+ return sb.toString();
+ }
+
+ /**
+ * Generates a NullAuthProvicer -specific authorization string.
+ *
+ * @param guid
+ * the global user ID
+ * @param ggid
+ * the global group ID
+ * @return an authorization string
+ * @throws JSONException
+ */
+ public static String createAuthString(String guid, String ggid) throws JSONException {
+ ArrayList ggids = new ArrayList(1);
+ ggids.add(ggid);
+ return createAuthString(guid, ggids);
+ }
+
+}
diff --git a/servers/src/org/xtreemfs/common/auth/SimpleX509AuthProvider.java b/servers/src/org/xtreemfs/common/auth/SimpleX509AuthProvider.java
new file mode 100644
index 0000000000000000000000000000000000000000..cd274010d87e05aaa612da1f4c3a5ced9242005b
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/auth/SimpleX509AuthProvider.java
@@ -0,0 +1,113 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Bjoern Kolbeck (ZIB)
+ */
+package org.xtreemfs.common.auth;
+
+import java.security.cert.Certificate;
+import java.security.cert.X509Certificate;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.xtreemfs.common.logging.Logging;
+import org.xtreemfs.foundation.pinky.channels.ChannelIO;
+
+/**
+ * authentication provider for XOS certificates.
+ * @author bjko
+ */
+public class SimpleX509AuthProvider implements AuthenticationProvider {
+
+ private NullAuthProvider nullAuth;
+
+ public UserCredentials getEffectiveCredentials(String authHeader, ChannelIO channel) throws AuthenticationException {
+ //use cached info!
+ assert(nullAuth != null);
+ if (channel.getAttachment() != null) {
+ Logging.logMessage(Logging.LEVEL_DEBUG,this,"using attachment...");
+ final Object[] cache = (Object[])channel.getAttachment();
+ final Boolean serviceCert = (Boolean)cache[0];
+ if (serviceCert) {
+ Logging.logMessage(Logging.LEVEL_DEBUG,this,"service cert...");
+ return nullAuth.getEffectiveCredentials(authHeader, channel);
+ } else {
+ Logging.logMessage(Logging.LEVEL_DEBUG,this,"using cached creds: "+cache[1]);
+ return (UserCredentials)cache[1];
+ }
+ }
+ //parse cert if no cached info is present
+ try {
+ final Certificate[] certs = channel.getCerts();
+ if (certs.length > 0) {
+ final X509Certificate cert = ((X509Certificate) certs[0]);
+ String fullDN = cert.getSubjectX500Principal().getName();
+ String commonName = getNameElement(cert.getSubjectX500Principal().getName(),"CN");
+
+ if (commonName.startsWith("host/") || commonName.startsWith("xtreemfs-service/")) {
+ Logging.logMessage(Logging.LEVEL_DEBUG, this, "X.509-host cert present");
+ channel.setAttachment(new Object[]{new Boolean(true)});
+ //use NullAuth in this case to parse JSON header
+ return nullAuth.getEffectiveCredentials(authHeader, null);
+ } else {
+
+ final String globalUID = fullDN;
+ final String globalGID = getNameElement(cert.getSubjectX500Principal().getName(),"OU");
+ List gids = new ArrayList(1);
+ gids.add(globalGID);
+
+ Logging.logMessage(Logging.LEVEL_DEBUG, this, "X.509-User cert present: " + globalUID + "," + globalGID);
+
+ boolean isSuperUser = gids.contains("xtreemfs-admin");
+ final UserCredentials creds = new UserCredentials(globalUID, gids, isSuperUser);
+ channel.setAttachment(new Object[]{new Boolean(false),creds});
+ return creds;
+ }
+ } else {
+ throw new AuthenticationException("no X.509-certificates present");
+ }
+ } catch (Exception ex) {
+ Logging.logMessage(Logging.LEVEL_ERROR, this, ex);
+ throw new AuthenticationException("invalid credentials "+ex);
+ }
+
+ }
+
+ private String getNameElement(String principal, String element) {
+ String[] elems = principal.split(",");
+ for (String elem: elems) {
+ String[] kv = elem.split("=");
+ if (kv.length != 2)
+ continue;
+ if (kv[0].equals(element))
+ return kv[1];
+ }
+ return null;
+ }
+
+ public void initialize(boolean useSSL) throws RuntimeException {
+ if (!useSSL) {
+ throw new RuntimeException(this.getClass().getName() + " can only be used if use_ssl is enabled!");
+ }
+ nullAuth = new NullAuthProvider();
+ nullAuth.initialize(useSSL);
+ }
+}
diff --git a/servers/src/org/xtreemfs/common/auth/UserCredentials.java b/servers/src/org/xtreemfs/common/auth/UserCredentials.java
new file mode 100644
index 0000000000000000000000000000000000000000..98aec3fa01d15b72910eb412d1f9b36e9a57841e
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/auth/UserCredentials.java
@@ -0,0 +1,69 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Bjoern Kolbeck (ZIB)
+ */
+
+package org.xtreemfs.common.auth;
+
+import java.util.List;
+
+/**
+ * User credentials.
+ * @author bjko
+ */
+public class UserCredentials {
+ protected String userID;
+ protected List groupIDs;
+ protected boolean superUser;
+
+ public UserCredentials(String userID,List groupIDs, boolean superUser) {
+ this.userID = userID;
+ this.groupIDs = groupIDs;
+ this.superUser = superUser;
+ }
+
+ public String getUserID() {
+ return userID;
+ }
+
+ public void setUserID(String userID) {
+ this.userID = userID;
+ }
+
+ public List getGroupIDs() {
+ return groupIDs;
+ }
+
+ public void setGroupIDs(List groupIDs) {
+ this.groupIDs = groupIDs;
+ }
+
+ public boolean isSuperUser() {
+ return superUser;
+ }
+
+ public void setSuperUser(boolean superUser) {
+ this.superUser = superUser;
+ }
+
+
+}
diff --git a/servers/src/org/xtreemfs/common/buffer/ASCIIString.java b/servers/src/org/xtreemfs/common/buffer/ASCIIString.java
new file mode 100644
index 0000000000000000000000000000000000000000..22a39f951f2faebb63edfa23a45330d23fd58387
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/buffer/ASCIIString.java
@@ -0,0 +1,115 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+*/
+/*
+ * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB)
+ */
+
+package org.xtreemfs.common.buffer;
+
+import java.io.Serializable;
+
+/**
+ *
+ * @author bjko
+ */
+public final class ASCIIString implements Serializable {
+
+ private byte[] data;
+
+ private int hash;
+
+ protected ASCIIString() {
+
+ }
+
+ /**
+ * Creates a new instance of ASCIIString
+ */
+ public ASCIIString(String str) {
+ this.data = str.getBytes();
+ }
+
+ /**
+ * Creates a new instance of ASCIIString
+ */
+ protected ASCIIString(byte[] data) {
+ this.data = data;
+ }
+
+ public String toString() {
+ return new String(data);
+ }
+
+ public char charAt(int index) {
+
+ return (char)data[index];
+ }
+
+ private byte unckeckedGetByte(int index) {
+ return data[index];
+ }
+
+ public boolean equals(Object o) {
+ if (o == null) return false;
+ try {
+ ASCIIString other = (ASCIIString)o;
+
+ for (int i = 0; i < data.length; i++) {
+ if (this.unckeckedGetByte(i) != other.unckeckedGetByte(i))
+ return false;
+ }
+ return true;
+ } catch (ClassCastException ex) {
+ return false;
+ }
+ }
+
+ public void marshall(ReusableBuffer target) {
+ target.putInt(data.length);
+ target.put(data);
+
+ }
+
+ public static ASCIIString unmarshall(ReusableBuffer target) {
+
+ int length = target.getInt();
+ if (length < 0)
+ return null;
+ byte[] tmp = new byte[length];
+
+ target.get(tmp);
+
+ return new ASCIIString(tmp);
+ }
+
+ public int hashCode() {
+ int h = hash;
+ if (h == 0) {
+
+ for (int i = 0; i < data.length; i++) {
+ h = 31*h + data[i];
+ }
+ hash = h;
+ }
+ return h;
+ }
+
+}
diff --git a/servers/src/org/xtreemfs/common/buffer/BufferConversionUtils.java b/servers/src/org/xtreemfs/common/buffer/BufferConversionUtils.java
new file mode 100644
index 0000000000000000000000000000000000000000..e2f48d7ee215dad0522647c6faf5be8e667770e3
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/buffer/BufferConversionUtils.java
@@ -0,0 +1,60 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+*/
+/*
+ * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB)
+ */
+
+package org.xtreemfs.common.buffer;
+
+import java.nio.ByteBuffer;
+
+/** This class contains some convenience methods for very diverses uses
+ *
+ * @author Jesus Malo (jmalo)
+ */
+public class BufferConversionUtils {
+
+ /**
+ * Creates a new instance of BufferConversionUtils
+ */
+ public BufferConversionUtils() {
+ }
+
+ /** It gets the array of bytes of a ByteBuffer
+ * @param source The object containing the require array of bytes
+ * @return The array of bytes contained in the given ByteBuffer
+ */
+ public static byte [] arrayOf(ByteBuffer source) {
+ byte [] array;
+
+ if (source.hasArray()) {
+ array = source.array();
+ } else {
+ array = new byte[source.capacity()];
+ source.position(0);
+ source.get(array);
+ }
+
+ return array;
+ }
+
+
+}
diff --git a/servers/src/org/xtreemfs/common/buffer/BufferPool.java b/servers/src/org/xtreemfs/common/buffer/BufferPool.java
new file mode 100644
index 0000000000000000000000000000000000000000..28704391ce05b7d79f2c3c21f5de4c3a17b7be13
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/buffer/BufferPool.java
@@ -0,0 +1,264 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+*/
+/*
+ * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB)
+ */
+
+package org.xtreemfs.common.buffer;
+
+import java.nio.ByteBuffer;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * A concurrent pool for buffer recycling.
+ * @author bjko
+ */
+public final class BufferPool {
+
+ /** size of buffers for each class.
+ */
+ public static final int[] BUFF_SIZES = { 8192, 65536, 524288, 2097152 };
+
+ /** max pool size for each class
+ */
+ public static final int[] MAX_POOL_SIZES = { 2000, 6, 10, 5 };
+
+ /** queues to store buffers in
+ */
+ private final ConcurrentLinkedQueue[] pools;
+
+ /** pool sizes to avoid counting elements on each access
+ */
+ private final AtomicInteger[] poolSizes;
+
+ /** stats for num requests and creates of buffers per class
+ */
+ private long[] requests, creates, deletes;
+
+ /** singleton pattern.
+ */
+ private static final BufferPool instance = new BufferPool();
+
+ /**
+ * if true all allocate/free operations record the stack trace.
+ * Useful to find memory leaks but slow.
+ */
+ protected static final boolean recordStackTraces = false;
+
+ /**
+ * Creates a new instance of BufferPool
+ */
+ private BufferPool() {
+ pools = new ConcurrentLinkedQueue[BUFF_SIZES.length];
+ requests = new long[BUFF_SIZES.length+1];
+ creates = new long[BUFF_SIZES.length];
+ deletes = new long[BUFF_SIZES.length+1];
+ poolSizes = new AtomicInteger[BUFF_SIZES.length];
+ for (int i = 0; i < BUFF_SIZES.length; i++) {
+ pools[i] = new ConcurrentLinkedQueue();
+ poolSizes[i] = new AtomicInteger(0);
+ }
+ }
+
+ /** Get a new buffer. The Buffer is taken from the pool or created if none
+ * is available or the size exceedes the largest class.
+ * @param size the buffer's size in bytes
+ * @return a buffer of requested size
+ * @throws OutOfMemoryError if a buffer cannot be allocated
+ */
+ public static ReusableBuffer allocate(int size) {
+ ReusableBuffer tmp = instance.getNewBuffer(size);
+
+ if (recordStackTraces) {
+ try {
+ throw new Exception("allocate stack trace");
+ } catch (Exception e) {
+ tmp.allocStack = "\n";
+ for (StackTraceElement elem : e.getStackTrace())
+ tmp.allocStack += elem.toString()+"\n";
+ }
+ }
+ return tmp;
+ }
+
+ /** Returns a buffer to the pool, if the buffer is reusable. Other
+ * buffers are ignored.
+ * @param buf the buffer to return
+ */
+ public static void free(ReusableBuffer buf) {
+ if (buf != null) {
+ instance.returnBuffer(buf);
+ }
+ }
+
+ /** Returns a buffer which has at least size bytes.
+ * @attention The returned buffer can be larger than requested!
+ */
+ private ReusableBuffer getNewBuffer(int size) {
+ try {
+ ByteBuffer buf = null;
+
+ if (size <= BUFF_SIZES[0]) {
+ buf = pools[0].poll();
+ if (buf == null) {
+ buf = ByteBuffer.allocateDirect(BUFF_SIZES[0]);
+ creates[0]++;
+ } else {
+ poolSizes[0].decrementAndGet();
+ }
+ requests[0]++;
+ return new ReusableBuffer(buf,size);
+ } else if (size <= BUFF_SIZES[1]) {
+ buf = pools[1].poll();
+ if (buf == null) {
+ buf = ByteBuffer.allocateDirect(BUFF_SIZES[1]);
+ creates[1]++;
+ } else {
+ poolSizes[1].decrementAndGet();
+ }
+ requests[1]++;
+ return new ReusableBuffer(buf,size);
+ } else if (size <= BUFF_SIZES[2]) {
+ buf = pools[2].poll();
+ if (buf == null) {
+ buf = ByteBuffer.allocateDirect(BUFF_SIZES[2]);
+ creates[2]++;
+ } else {
+ poolSizes[2].decrementAndGet();
+ }
+ requests[2]++;
+ return new ReusableBuffer(buf,size);
+ } else if (size <= BUFF_SIZES[3]) {
+ buf = pools[3].poll();
+ if (buf == null) {
+ buf = ByteBuffer.allocateDirect(BUFF_SIZES[3]);
+ creates[3]++;
+ } else {
+ poolSizes[3].decrementAndGet();
+ }
+ requests[3]++;
+ return new ReusableBuffer(buf,size);
+ } else {
+ requests[4]++;
+ buf = ByteBuffer.allocateDirect(size);
+ return new ReusableBuffer(buf,size);
+ }
+ } catch (OutOfMemoryError ex) {
+ System.out.println(this.getStatus());
+ throw ex;
+ }
+ }
+
+ /** return a buffer to the pool
+ */
+ private void returnBuffer(ReusableBuffer buffer) {
+ if (!buffer.isReusable())
+ return;
+
+ if (buffer.viewParent != null) {
+ // view buffer
+ if (recordStackTraces) {
+ try {
+ throw new Exception("free stack trace");
+ } catch (Exception e) {
+ buffer.freeStack = "\n";
+ for (StackTraceElement elem : e.getStackTrace())
+ buffer.freeStack += elem.toString()+"\n";
+ }
+ }
+ assert(!buffer.returned) : "buffer was already released: "+buffer.freeStack;
+ buffer.returned = true;
+ returnBuffer(buffer.viewParent);
+
+ } else {
+
+ if (buffer.refCount.getAndDecrement() > 1) {
+ return;
+ }
+
+ assert(!buffer.returned) : "buffer was already released: "+buffer.freeStack;
+ buffer.returned = true;
+
+
+ if (recordStackTraces) {
+ try {
+ throw new Exception("free stack trace");
+ } catch (Exception e) {
+ buffer.freeStack = "\n";
+ for (StackTraceElement elem : e.getStackTrace())
+ buffer.freeStack += elem.toString()+"\n";
+ }
+ }
+
+ ByteBuffer buf = buffer.getParent();
+
+ buf.clear();
+ if (buf.capacity() == BUFF_SIZES[0]) {
+ if (poolSizes[0].get() < MAX_POOL_SIZES[0]) {
+ poolSizes[0].incrementAndGet();
+ pools[0].add(buf);
+ } else {
+ deletes[0]++;
+ }
+ } else if (buf.capacity() == BUFF_SIZES[1]) {
+ if (poolSizes[1].get() < MAX_POOL_SIZES[1]) {
+ poolSizes[1].incrementAndGet();
+ pools[1].add(buf);
+ } else {
+ deletes[1]++;
+ }
+ } else if (buf.capacity() == BUFF_SIZES[2]) {
+ if (poolSizes[2].get() < MAX_POOL_SIZES[2]) {
+ poolSizes[2].incrementAndGet();
+ pools[2].add(buf);
+ } else {
+ deletes[2]++;
+ }
+ } else if (buf.capacity() == BUFF_SIZES[3]) {
+ if (poolSizes[3].get() < MAX_POOL_SIZES[3]) {
+ poolSizes[3].incrementAndGet();
+ pools[3].add(buf);
+ } else {
+ deletes[3]++;
+ }
+ } else {
+ deletes[4]++;
+ }
+ }
+ }
+
+ /** Returns a textual representation of the pool status.
+ * @return a textual representation of the pool status.
+ */
+ public static String getStatus() {
+
+ String str = "";
+ for (int i = 0; i < 4; i++) {
+ str += String.format("%8d: poolSize = %5d numRequests = %8d creates = %8d deletes = %8d\n",
+ instance.BUFF_SIZES[i], instance.poolSizes[i].get(),
+ instance.requests[i], instance.creates[i], instance.deletes[i]);
+ }
+ str += String.format("unpooled (> %8d) numRequests = creates = %8d deletes = %8d",instance.BUFF_SIZES[3],instance.requests[4],instance.deletes[4]);
+ return str;
+ }
+
+}
diff --git a/servers/src/org/xtreemfs/common/buffer/ReusableBuffer.java b/servers/src/org/xtreemfs/common/buffer/ReusableBuffer.java
new file mode 100644
index 0000000000000000000000000000000000000000..6dfdfeac1a873e4e9ca70eace4a20706691de903
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/buffer/ReusableBuffer.java
@@ -0,0 +1,590 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+*/
+/*
+ * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB)
+ */
+
+package org.xtreemfs.common.buffer;
+
+import java.nio.ByteBuffer;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.xtreemfs.common.logging.Logging;
+import org.xtreemfs.common.util.OutputUtils;
+import org.xtreemfs.foundation.pinky.HTTPUtils;
+
+/**
+ *
+ * @author bjko
+ */
+public final class ReusableBuffer {
+
+ /** A view buffer of parentBuffer with the requested size.
+ * For non-reusable buffers this is the buffer itself
+ */
+ private ByteBuffer buffer;
+
+ /** A parent buffer which is returned to the pool
+ */
+ private final ByteBuffer parentBuffer;
+
+ /** True if the buffer can be returned to the pool
+ */
+ private final boolean reusable;
+
+ /** set to true after a buffer was returned to the pool
+ */
+ protected volatile boolean returned;
+
+ /** size (as requested), might be smaller than parentBuffer size but
+ * is always equal to the (view) buffer size.
+ */
+ private int size;
+
+ protected ReusableBuffer viewParent;
+
+ protected String freeStack, allocStack;
+
+ /**
+ * reference count
+ */
+ AtomicInteger refCount;
+
+ /** Creates a new instance of ReusableBuffer.
+ * A view buffer of size is created.
+ * @param buffer the parent buffer
+ * @param size the requested size
+ */
+ protected ReusableBuffer(ByteBuffer buffer, int size) {
+ buffer.position(0);
+ buffer.limit(size);
+ this.buffer = buffer.slice();
+ this.parentBuffer = buffer;
+ this.size = size;
+ this.reusable = true;
+ this.refCount = new AtomicInteger(1);
+ returned = false;
+ viewParent = null;
+ }
+
+ /** A wrapper for a non-reusable buffer.
+ * The buffer is not used by the pool when returned.
+ */
+ public ReusableBuffer(ByteBuffer nonManaged) {
+ this.buffer = nonManaged;
+ this.size = buffer.limit();
+ this.reusable = false;
+ this.parentBuffer = null;
+ returned = false;
+ this.refCount = new AtomicInteger(1);
+ viewParent = null;
+ }
+
+ /**
+ * Creates a non-reusable buffer around a byte array.
+ * Uses the ByteBuffer.wrap method.
+ *
+ * @param data the byte arry containing the data
+ * @return
+ */
+ public static ReusableBuffer wrap(byte[] data) {
+ return new ReusableBuffer(ByteBuffer.wrap(data));
+ }
+
+ public static ReusableBuffer wrap(byte[] data, int offset, int length) {
+ assert(offset >= 0);
+ assert(length >= 0);
+ if (offset+length > data.length)
+ throw new IllegalArgumentException("offset+length > buffer size ("+offset+"+"+length+" > "+data.length);
+ ByteBuffer tmp = ByteBuffer.wrap(data);
+ tmp.position(offset);
+ tmp.limit(offset+length);
+ return new ReusableBuffer(tmp.slice());
+ }
+
+ /**
+ * Creates a new view buffer. This view buffer shares the same data (i.e.
+ * backing byte buffer) but has independet position, limit etc.
+ */
+ public ReusableBuffer createViewBuffer() {
+
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+
+ if (this.viewParent == null) {
+
+ if (parentBuffer == null) {
+ // wraped buffers
+ ReusableBuffer view = new ReusableBuffer(this.buffer, this.size);
+ view.viewParent = this;
+
+ return view;
+
+ } else {
+ // regular buffer
+ ReusableBuffer view = new ReusableBuffer(this.parentBuffer, this.size);
+ view.viewParent = this;
+ this.refCount.incrementAndGet();
+
+ if (BufferPool.recordStackTraces) {
+ try {
+ throw new Exception("allocate stack trace");
+ } catch (Exception e) {
+ view.allocStack = "\n";
+ for (StackTraceElement elem : e.getStackTrace())
+ view.allocStack += elem.toString() + "\n";
+ }
+ }
+
+ return view;
+ }
+
+ } else {
+
+ if (parentBuffer == null) {
+ // wraped buffers
+ ReusableBuffer view = new ReusableBuffer(this.buffer, this.size);
+ view.viewParent = this.viewParent;
+
+ return view;
+
+ } else {
+ // regular buffer: use the parent to create a view buffer
+ ReusableBuffer view = new ReusableBuffer(this.buffer, this.size);
+ view.viewParent = this.viewParent;
+ this.viewParent.refCount.incrementAndGet();
+
+ if (BufferPool.recordStackTraces) {
+ try {
+ throw new Exception("allocate stack trace");
+ } catch (Exception e) {
+ view.allocStack = "\n";
+ for (StackTraceElement elem : e.getStackTrace())
+ view.allocStack += elem.toString() + "\n";
+ }
+ }
+
+ return view;
+ }
+ }
+ }
+
+ /** @see java.nio.Buffer#capacity
+ */
+ public int capacity() {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ return this.size;
+ }
+
+ /** @see java.nio.ByteBuffer#hasArray
+ */
+ public boolean hasArray() {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ return buffer.hasArray();
+ }
+
+ /** Returns the byte array of the buffer, creating a copy if the buffer is not backed by an array
+ * @return a byte array with a copy of the data
+ */
+ public byte [] array() {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ byte [] array;
+
+ if (this.hasArray()) {
+ array = buffer.array();
+ } else {
+ array = new byte[this.limit()];
+ final int oldPos = this.position();
+ this.position(0);
+ this.get(array);
+ this.position(oldPos);
+ }
+
+ return array;
+ }
+
+ /** @see java.nio.Buffer#flip
+ */
+ public void flip() {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ buffer.flip();
+ }
+
+ /** @see java.nio.Buffer#compact
+ */
+ public void compact() {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ buffer.compact();
+ }
+
+ /** @see java.nio.Buffer#limit(int)
+ */
+ public void limit(int l) {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ buffer.limit(l);
+ }
+
+ /** @see java.nio.Buffer#limit()
+ */
+ public int limit() {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ return buffer.limit();
+ }
+
+ /** @see java.nio.Buffer#position(int)
+ */
+ public void position(int p) {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ buffer.position(p);
+ }
+
+ /** @see java.nio.Buffer#position()
+ */
+ public int position() {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ return buffer.position();
+ }
+
+ /** @see java.nio.Buffer#hasRemaining
+ */
+ public boolean hasRemaining() {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ return buffer.hasRemaining();
+ }
+
+ /** Returns the view buffer encapsulated by this ReusableBuffer.
+ * @return the view buffer
+ */
+ public ByteBuffer getBuffer() {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ return this.buffer;
+ }
+
+ /** Returns true, if this buffer is re-usable and can be returned to the pool.
+ * @return true, if this buffer is re-usable
+ */
+ public boolean isReusable() {
+ //assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ return this.reusable;
+ }
+
+ /** Returns the parent buffer.
+ * @return the parent buffer
+ */
+ protected ByteBuffer getParent() {
+ return this.parentBuffer;
+ }
+
+ /** @see java.nio.ByteBuffer#get()
+ */
+ public byte get() {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ return buffer.get();
+ }
+
+ public byte get(int index) {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ return buffer.get(index);
+ }
+
+
+ /** @see java.nio.ByteBuffer#get(byte[])
+ */
+ public ReusableBuffer get(byte[] dst) {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ buffer.get(dst);
+ return this;
+ }
+
+ /** @see java.nio.ByteBuffer#get(byte[], int offset, int length)
+ */
+ public ReusableBuffer get(byte[] dst, int offset, int length) {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ buffer.get(dst, offset, length);
+ return this;
+ }
+
+ /** @see java.nio.ByteBuffer#put(byte)
+ */
+ public ReusableBuffer put(byte b) {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ buffer.put(b);
+ return this;
+ }
+
+ /** @see java.nio.ByteBuffer#put(byte[])
+ */
+ public ReusableBuffer put(byte[] src) {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ buffer.put(src);
+ return this;
+ }
+
+ /** @see java.nio.ByteBuffer#put(ByteBuffer)
+ */
+ public ReusableBuffer put(ByteBuffer src) {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ buffer.put(src);
+ return this;
+ }
+
+ /** Writes the content of src into this buffer.
+ * @param src the buffer to read from
+ * @return this ReusableBuffer after reading
+ * @see java.nio.ByteBuffer#put(ByteBuffer)
+ */
+ public ReusableBuffer put(ReusableBuffer src) {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ buffer.put(src.buffer);
+ return this;
+ }
+
+ /** @see java.nio.ByteBuffer#getInt
+ */
+ public int getInt() {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ return buffer.getInt();
+ }
+
+ /** @see java.nio.ByteBuffer#putInt(int)
+ */
+ public ReusableBuffer putInt(int i) {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ buffer.putInt(i);
+ return this;
+ }
+
+ public long getLong() {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ return buffer.getLong();
+ }
+
+ public ReusableBuffer putLong(long l) {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ buffer.putLong(l);
+ return this;
+ }
+
+ public String getString() {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ int length = buffer.getInt();
+ if (length > 0) {
+ byte[] bytes = new byte[length];
+ buffer.get(bytes);
+ return new String(bytes,HTTPUtils.ENC_UTF8);
+ } else if (length == 0) {
+ return "";
+ } else {
+ return null;
+ }
+ }
+
+ public ReusableBuffer putString(String str) {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ if (str != null) {
+ byte[] bytes = str.getBytes(HTTPUtils.ENC_UTF8);
+ buffer.putInt(bytes.length);
+ buffer.put(bytes);
+ } else {
+ buffer.putInt(-1);
+ }
+ return this;
+ }
+
+ public ASCIIString getBufferBackedASCIIString() {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ return ASCIIString.unmarshall(this);
+ }
+
+ public ReusableBuffer putBufferBackedASCIIString(ASCIIString str) {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ if (str != null) {
+ str.marshall(this);
+ } else {
+ buffer.putInt(-1);
+ }
+ return this;
+ }
+
+ public ReusableBuffer putShort(short s) {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ buffer.putShort(s);
+ return this;
+ }
+
+ public short getShort() {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ return buffer.getShort();
+ }
+
+ /** @see java.nio.ByteBuffer#isDirect
+ */
+ public boolean isDirect() {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ return buffer.isDirect();
+ }
+
+
+ /** @see java.nio.Buffer#remaining
+ */
+ public int remaining() {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ return buffer.remaining();
+ }
+
+ /** @see java.nio.Buffer#clear
+ */
+ public void clear() {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ buffer.clear();
+ }
+
+
+ public byte[] getData() {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ byte[] array = new byte[this.position()];
+ this.position(0);
+ this.get(array);
+ return array;
+ }
+
+ public void shrink(int newSize) {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ if (newSize > size) {
+ throw new IllegalArgumentException("new size must not be larger than old size");
+ }
+ this.size = newSize;
+ int oldPos = buffer.position();
+ if (oldPos > newSize)
+ oldPos = 0;
+
+ // save parent position and limit
+ int position = parentBuffer.position();
+ int limit = parentBuffer.limit();
+
+ parentBuffer.position(0);
+ parentBuffer.limit(newSize);
+ this.buffer = parentBuffer.slice();
+ buffer.position(oldPos);
+
+ // restore parent position and limit
+ parentBuffer.position(position);
+ parentBuffer.limit(limit);
+ }
+
+
+ public boolean enlarge(int newSize) {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ if (newSize > this.parentBuffer.capacity()) {
+ return false;
+ } else {
+
+ this.size = newSize;
+ int oldPos = buffer.position();
+ if (oldPos > newSize)
+ oldPos = 0;
+
+ // save parent position and limit
+ int position = parentBuffer.position();
+ int limit = parentBuffer.limit();
+
+ parentBuffer.position(0);
+ parentBuffer.limit(newSize);
+ this.buffer = parentBuffer.slice();
+ buffer.position(oldPos);
+
+ // restore parent position and limit
+ parentBuffer.position(position);
+ parentBuffer.limit(limit);
+
+ return true;
+ }
+ }
+
+ public void range(int offset, int length) {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+
+ //useless call!
+ if ((offset == 0) && (length == this.size))
+ return;
+
+ if (offset > size) {
+ throw new IllegalArgumentException("offset must be < size. offset="+offset+" siz="+size);
+ }
+ if (offset+length > size) {
+ throw new IllegalArgumentException("offset+length must be <= size. size="+size+" offset="+offset+" length="+length);
+ }
+
+ this.size = length;
+
+ // save parent position and limit
+ int position = parentBuffer.position();
+ int limit = parentBuffer.limit();
+
+ parentBuffer.position(offset);
+ parentBuffer.limit(offset+length);
+ this.buffer = parentBuffer.slice();
+ assert(this.buffer.capacity() == length);
+
+ // restore parent position and limit
+ parentBuffer.position(position);
+ parentBuffer.limit(limit);
+ }
+
+ public ReusableBuffer putBoolean(boolean bool) {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ buffer.put(bool ? (byte)1 : (byte)0);
+ return this;
+ }
+
+ public boolean getBoolean() {
+ assert(!returned) : "Buffer was already freed and cannot be used anymore"+this.freeStack;
+ return buffer.get() == 1;
+ }
+
+ public int getRefCount() {
+ if (this.viewParent == null) {
+ return this.refCount.get();
+ } else {
+ return this.viewParent.refCount.get();
+ }
+ }
+
+ protected void finalize() {
+ if (!returned && reusable) {
+ Logging.logMessage(Logging.LEVEL_ERROR, this,
+ "buffer was finalized but not freed before! this=" + this);
+
+ String content = new String(this.array());
+
+ Logging.logMessage(Logging.LEVEL_ERROR, this, "content: " + content);
+ Logging.logMessage(Logging.LEVEL_ERROR, this, "stacktrace: " + allocStack);
+
+ if (this.viewParent != null) {
+ Logging.logMessage(Logging.LEVEL_ERROR, this, "view parent: " + this.viewParent);
+ Logging.logMessage(Logging.LEVEL_ERROR, this, "ref count: "
+ + this.viewParent.refCount.get());
+ } else {
+ Logging.logMessage(Logging.LEVEL_ERROR, this, "ref count: " + this.refCount.get());
+ }
+
+ }
+ }
+}
diff --git a/servers/src/org/xtreemfs/common/checksums/ChecksumAlgorithm.java b/servers/src/org/xtreemfs/common/checksums/ChecksumAlgorithm.java
new file mode 100644
index 0000000000000000000000000000000000000000..3488769b531f16a61e2eea84670c26aeda6f6d3f
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/checksums/ChecksumAlgorithm.java
@@ -0,0 +1,71 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Christian Lorenz (ZIB)
+ */
+package org.xtreemfs.common.checksums;
+
+import java.nio.ByteBuffer;
+
+/**
+ * An interface which must be implemented by checksum algorithms for XtreemFS.
+ *
+ * 19.08.2008
+ *
+ * @author clorenz
+ */
+public interface ChecksumAlgorithm extends Cloneable {
+ /**
+ * Returns a string that identifies the algorithm, independent of
+ * implementation details.
+ *
+ * @return name of algorithm
+ */
+ public String getName();
+
+ /**
+ * Returns checksum value (as Hex-String) and resets the Algorithm.
+ *
+ * @return checksum
+ */
+ public String getValue();
+
+ /**
+ * Resets checksum to initial value.
+ *
+ * @return
+ */
+ public void reset();
+
+ /**
+ * Updates checksum with specified data.
+ *
+ * @param data
+ */
+ public void update(ByteBuffer data);
+
+ /**
+ * returns a new instance of the checksum algorithm
+ *
+ * @return
+ */
+ public ChecksumAlgorithm clone();
+}
diff --git a/servers/src/org/xtreemfs/common/checksums/ChecksumFactory.java b/servers/src/org/xtreemfs/common/checksums/ChecksumFactory.java
new file mode 100644
index 0000000000000000000000000000000000000000..c940a977c5ed2a00260b44ada9a6e4dba9ca577c
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/checksums/ChecksumFactory.java
@@ -0,0 +1,165 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Christian Lorenz (ZIB)
+ */
+package org.xtreemfs.common.checksums;
+
+import java.security.NoSuchAlgorithmException;
+import java.util.HashMap;
+import java.util.concurrent.ConcurrentLinkedQueue;
+
+/**
+ * A Factory for getting checksum algorithms from checksum provider. Implemented
+ * as a Singleton.
+ *
+ * 19.08.2008
+ *
+ * @author clorenz
+ */
+public class ChecksumFactory {
+ /**
+ * amount of cached instances/algorithm
+ */
+ private static int MAX_CACHE_SIZE = 20;
+
+ private static ChecksumFactory self;
+
+ /**
+ * Contains all available checksum algorithms (only one instance).
+ */
+ private HashMap algorithms;
+
+ /**
+ * Contains all known checksum provider
+ */
+ private HashMap knownProvider;
+
+ /**
+ * Contains cached instances for all available checksum algorithms.
+ */
+ private HashMap> pool;
+
+ /**
+ * creates a new ChecksumFactory
+ */
+ private ChecksumFactory() {
+ super();
+ this.algorithms = new HashMap();
+ this.pool = new HashMap>();
+ this.knownProvider = new HashMap();
+ }
+
+ /**
+ * Get the instance of ChecksumFactory.
+ *
+ * @return the instance
+ */
+ public static ChecksumFactory getInstance() {
+ if (self == null) {
+ self = new ChecksumFactory();
+ }
+ return self;
+ }
+
+ /**
+ * Get an instance of a specific checksum algorithm, if supported.
+ *
+ * @param name
+ * of the algorithm
+ * @return algorithm object or null, if algorithm is not supported
+ */
+ public ChecksumAlgorithm getAlgorithm(String name)
+ throws NoSuchAlgorithmException {
+ ConcurrentLinkedQueue cache = pool.get(name);
+ if (cache == null)
+ throw new NoSuchAlgorithmException("algorithm " + name
+ + " not supported");
+
+ ChecksumAlgorithm algorithm = cache.poll();
+ if (algorithm == null) { // cache is empty
+ return algorithms.get(name).clone(); // create new instance
+ } else {
+ return algorithm; // return caches instance
+ }
+ }
+
+ /**
+ * Returns an instance of a specific checksum algorithm for caching.
+ *
+ * @param instance
+ * of the algorithm
+ */
+ public void returnAlgorithm(ChecksumAlgorithm algorithm) {
+ ConcurrentLinkedQueue cache = pool.get(algorithm
+ .getName());
+ if (cache.size() < MAX_CACHE_SIZE) {
+ algorithm.reset();
+ cache.add(algorithm);
+ }
+ }
+
+ /**
+ * Adds a new provider to factory and adds all supported algorithms from the
+ * provider to the algorithms-list. NOTE: Existing algorithms will be
+ * overridden when the new provider contains the same algorithm (maybe
+ * another implementation).
+ *
+ * @param provider
+ */
+ public void addProvider(ChecksumProvider provider) {
+ knownProvider.put(provider.getName(), provider);
+ for (ChecksumAlgorithm algorithm : provider.getSupportedAlgorithms()) {
+ addAlgorithm(algorithm);
+ }
+ }
+
+ /**
+ * Adds a new Algorithm to factory. NOTE: The same existing algorithm will
+ * be overridden.
+ *
+ * @param algorithm
+ */
+ public void addAlgorithm(ChecksumAlgorithm algorithm) {
+ algorithms.put(algorithm.getName(), algorithm);
+ pool.put(algorithm.getName(),
+ new ConcurrentLinkedQueue());
+ }
+
+ /**
+ * Removes a provider, but not the added algorithms.
+ *
+ * @param provider
+ */
+ public void removeProvider(ChecksumProvider provider) {
+ knownProvider.remove(provider.getName());
+ }
+
+ /**
+ * Removes an algorithm.
+ *
+ * @param algorithm
+ */
+ public void removeAlgorithm(String algorithm) {
+ algorithms.remove(algorithm);
+ pool.remove(algorithm);
+ }
+}
diff --git a/servers/src/org/xtreemfs/common/checksums/ChecksumProvider.java b/servers/src/org/xtreemfs/common/checksums/ChecksumProvider.java
new file mode 100644
index 0000000000000000000000000000000000000000..0852e7adc58733312139bff2bbc61c8f4961a37b
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/checksums/ChecksumProvider.java
@@ -0,0 +1,72 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Christian Lorenz (ZIB)
+ */
+package org.xtreemfs.common.checksums;
+
+import java.util.Collection;
+import java.util.HashMap;
+
+/**
+ * An abstract class which must be implemented by a checksum provider for
+ * XtreemFS.
+ *
+ * 19.08.2008
+ *
+ * @author clorenz
+ */
+public abstract class ChecksumProvider {
+ /**
+ * contains the supported algorithms
+ */
+ protected HashMap algorithms;
+
+ protected ChecksumProvider() {
+ super();
+ this.algorithms = new HashMap();
+ }
+
+ /**
+ * Returns the name of the provider.
+ *
+ * @return name
+ */
+ public abstract String getName();
+
+ /**
+ * Returns all from this provider supported checksum algorithms.
+ *
+ * @return a collection with ChecksumAlgorithms
+ */
+ public Collection getSupportedAlgorithms() {
+ return algorithms.values();
+ }
+
+ /**
+ * adds an algorithm to the map
+ *
+ * @param newAlgorithm
+ */
+ protected void addAlgorithm(ChecksumAlgorithm newAlgorithm) {
+ this.algorithms.put(newAlgorithm.getName(), newAlgorithm);
+ }
+}
diff --git a/servers/src/org/xtreemfs/common/checksums/StringChecksumAlgorithm.java b/servers/src/org/xtreemfs/common/checksums/StringChecksumAlgorithm.java
new file mode 100644
index 0000000000000000000000000000000000000000..2f33dd45ae52e4e137cd01e8f8fed94807f410ac
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/checksums/StringChecksumAlgorithm.java
@@ -0,0 +1,41 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Christian Lorenz (ZIB)
+ */
+package org.xtreemfs.common.checksums;
+
+/**
+ * An interface for checksum algorithms, which are based on computations on
+ * strings.
+ *
+ * 02.09.2008
+ *
+ * @author clorenz
+ */
+public interface StringChecksumAlgorithm extends ChecksumAlgorithm {
+ /**
+ * Updates checksum with specified data.
+ *
+ * @param data
+ */
+ public void digest(String data);
+}
diff --git a/servers/src/org/xtreemfs/common/checksums/algorithms/Adler32.java b/servers/src/org/xtreemfs/common/checksums/algorithms/Adler32.java
new file mode 100644
index 0000000000000000000000000000000000000000..61a893d298a2390c88f6c9cca84f054e9b8065f0
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/checksums/algorithms/Adler32.java
@@ -0,0 +1,47 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Christian Lorenz (ZIB)
+ */
+package org.xtreemfs.common.checksums.algorithms;
+
+/**
+ * The Adler32 algorithm. It uses the Java internal implementation.
+ *
+ * 19.08.2008
+ *
+ * @author clorenz
+ */
+public class Adler32 extends JavaChecksumAlgorithm {
+ public Adler32() {
+ super(new java.util.zip.Adler32(), "Adler32");
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.xtreemfs.common.checksum.ChecksumAlgorithm#clone()
+ */
+ @Override
+ public Adler32 clone() {
+ return new Adler32();
+ }
+}
diff --git a/servers/src/org/xtreemfs/common/checksums/algorithms/CRC32.java b/servers/src/org/xtreemfs/common/checksums/algorithms/CRC32.java
new file mode 100644
index 0000000000000000000000000000000000000000..7da87a4e3dbd2d9bc81346221379f885fcfbda3a
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/checksums/algorithms/CRC32.java
@@ -0,0 +1,47 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Christian Lorenz (ZIB)
+ */
+package org.xtreemfs.common.checksums.algorithms;
+
+/**
+ * The CRC32 algorithm. It uses the Java internal implementation.
+ *
+ * 19.08.2008
+ *
+ * @author clorenz
+ */
+public class CRC32 extends JavaChecksumAlgorithm {
+ public CRC32() {
+ super(new java.util.zip.CRC32(), "CRC32");
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.xtreemfs.common.checksum.ChecksumAlgorithm#clone()
+ */
+ @Override
+ public CRC32 clone() {
+ return new CRC32();
+ }
+}
diff --git a/servers/src/org/xtreemfs/common/checksums/algorithms/JavaChecksumAlgorithm.java b/servers/src/org/xtreemfs/common/checksums/algorithms/JavaChecksumAlgorithm.java
new file mode 100644
index 0000000000000000000000000000000000000000..ea487b41610180ae6a39158c8310788fa1e123f0
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/checksums/algorithms/JavaChecksumAlgorithm.java
@@ -0,0 +1,114 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Christian Lorenz (ZIB)
+ */
+package org.xtreemfs.common.checksums.algorithms;
+
+import java.nio.ByteBuffer;
+import java.util.zip.Checksum;
+
+import org.xtreemfs.common.checksums.ChecksumAlgorithm;
+
+/**
+ * An abstract wrapper for Java internal checksums.
+ *
+ * 19.08.2008
+ *
+ * @author clorenz
+ */
+abstract public class JavaChecksumAlgorithm
+ implements ChecksumAlgorithm {
+ /**
+ * the class, which really implements the selected algorithm
+ */
+ protected RealJavaAlgorithm realAlgorithm;
+
+ protected String name;
+
+ public JavaChecksumAlgorithm(RealJavaAlgorithm realAlgorithm, String name) {
+ super();
+ this.realAlgorithm = realAlgorithm;
+ this.name = name;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.xtreemfs.common.checksum.ChecksumAlgorithm#digest(java.nio.ByteBuffer)
+ */
+ @Override
+ public void update(ByteBuffer data) {
+ byte[] array;
+
+ if (data.hasArray()) {
+ array = data.array();
+ } else {
+ array = new byte[data.capacity()];
+ final int oldPos = data.position();
+ data.position(0);
+ data.get(array);
+ data.position(oldPos);
+ }
+
+ realAlgorithm.update(array, 0, array.length);
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.xtreemfs.common.checksum.ChecksumAlgorithm#getName()
+ */
+ @Override
+ public String getName() {
+ return name;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.xtreemfs.common.checksum.ChecksumAlgorithm#getValue()
+ */
+ @Override
+ public String getValue() {
+ String value = Long.toHexString(realAlgorithm.getValue());
+ realAlgorithm.reset();
+ return value;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.xtreemfs.common.checksum.ChecksumAlgorithm#reset()
+ */
+ @Override
+ public void reset() {
+ realAlgorithm.reset();
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.xtreemfs.common.checksum.ChecksumAlgorithm#clone()
+ */
+ @Override
+ public abstract JavaChecksumAlgorithm clone();
+}
diff --git a/servers/src/org/xtreemfs/common/checksums/algorithms/JavaHash.java b/servers/src/org/xtreemfs/common/checksums/algorithms/JavaHash.java
new file mode 100644
index 0000000000000000000000000000000000000000..01e5edf1791c676c2211050e47943564837032ba
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/checksums/algorithms/JavaHash.java
@@ -0,0 +1,119 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Christian Lorenz (ZIB)
+ */
+package org.xtreemfs.common.checksums.algorithms;
+
+import java.nio.ByteBuffer;
+
+import org.xtreemfs.common.checksums.StringChecksumAlgorithm;
+
+/**
+ * The Java algorithm, which is used for string.hashCode(). It uses the Java
+ * internal implementation.
+ *
+ * 02.09.2008
+ *
+ * @author clorenz
+ */
+public class JavaHash implements StringChecksumAlgorithm {
+ private String hash = null;
+
+ private String name = "Java-Hash";
+
+ /**
+ * Updates checksum with specified data.
+ *
+ * @param data
+ */
+ public void digest(String data) {
+ this.hash = Integer.toHexString(data.hashCode());
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.xtreemfs.common.checksums.ChecksumAlgorithm#digest(java.nio.ByteBuffer)
+ */
+ @Override
+ public void update(ByteBuffer data) {
+ byte[] array;
+
+ if (data.hasArray()) {
+ array = data.array();
+ } else {
+ array = new byte[data.capacity()];
+ final int oldPos = data.position();
+ data.position(0);
+ data.get(array);
+ data.position(oldPos);
+ }
+
+ this.hash = Integer.toHexString(new String(array).hashCode());
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.xtreemfs.common.checksums.ChecksumAlgorithm#getName()
+ */
+ @Override
+ public String getName() {
+ return this.name;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.xtreemfs.common.checksums.ChecksumAlgorithm#getValue()
+ */
+ @Override
+ public String getValue() {
+ String value;
+ if (this.hash != null)
+ value = this.hash;
+ else
+ value = "";
+ reset();
+ return value;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.xtreemfs.common.checksums.ChecksumAlgorithm#reset()
+ */
+ @Override
+ public void reset() {
+ hash = null;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.xtreemfs.common.checksum.ChecksumAlgorithm#clone()
+ */
+ @Override
+ public JavaHash clone() {
+ return new JavaHash();
+ }
+}
diff --git a/servers/src/org/xtreemfs/common/checksums/algorithms/JavaMessageDigestAlgorithm.java b/servers/src/org/xtreemfs/common/checksums/algorithms/JavaMessageDigestAlgorithm.java
new file mode 100644
index 0000000000000000000000000000000000000000..4e8f84aecd6e378a32eb64d533c051cea1046e6c
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/checksums/algorithms/JavaMessageDigestAlgorithm.java
@@ -0,0 +1,143 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Christian Lorenz (ZIB)
+ */
+package org.xtreemfs.common.checksums.algorithms;
+
+import java.nio.ByteBuffer;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
+
+import org.xtreemfs.common.checksums.ChecksumAlgorithm;
+
+/**
+ * An wrapper for Java internal message digest algorithms.
+ *
+ * 01.09.2008
+ *
+ * @author clorenz
+ */
+public class JavaMessageDigestAlgorithm implements ChecksumAlgorithm {
+ /**
+ * the class, which really implements the selected algorithm
+ */
+ protected MessageDigest realAlgorithm;
+
+ protected String name;
+
+ /**
+ * used for converting the byte-array to a hexString
+ */
+ protected StringBuffer hexString;
+
+ public JavaMessageDigestAlgorithm(String realAlgorithm, String name)
+ throws NoSuchAlgorithmException {
+ super();
+ this.realAlgorithm = MessageDigest.getInstance(realAlgorithm);
+ this.name = name;
+ this.hexString = new StringBuffer();
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.xtreemfs.common.checksum.ChecksumAlgorithm#digest(java.nio.ByteBuffer)
+ */
+ @Override
+ public void update(ByteBuffer data) {
+ byte[] array;
+
+ if (data.hasArray()) {
+ array = data.array();
+ } else {
+ array = new byte[data.capacity()];
+ final int oldPos = data.position();
+ data.position(0);
+ data.get(array);
+ data.position(oldPos);
+ }
+
+ realAlgorithm.update(array, 0, array.length);
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.xtreemfs.common.checksum.ChecksumAlgorithm#getName()
+ */
+ @Override
+ public String getName() {
+ return name;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.xtreemfs.common.checksum.ChecksumAlgorithm#getValue()
+ */
+ @Override
+ public String getValue() {
+ return toHexString(realAlgorithm.digest());
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.xtreemfs.common.checksum.ChecksumAlgorithm#reset()
+ */
+ @Override
+ public void reset() {
+ realAlgorithm.reset();
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.xtreemfs.common.checksum.ChecksumAlgorithm#clone()
+ */
+ @Override
+ public JavaMessageDigestAlgorithm clone() {
+ try {
+ return new JavaMessageDigestAlgorithm(this.realAlgorithm
+ .getAlgorithm(), this.name);
+ } catch (NoSuchAlgorithmException e) {
+ // cannot appear, because there is also one instance
+ return null;
+ }
+ }
+
+ /**
+ * converts a hash to a hex-string
+ *
+ * @param hash
+ * @return
+ */
+ protected String toHexString(byte[] hash) {
+ for (int i = 0; i < hash.length; i++) {
+ hexString.append(Integer.toHexString(0xFF & hash[i]));
+ }
+ String checksum = hexString.toString();
+ this.hexString.setLength(0);
+ return checksum;
+ }
+
+}
diff --git a/servers/src/org/xtreemfs/common/checksums/algorithms/SDBM.java b/servers/src/org/xtreemfs/common/checksums/algorithms/SDBM.java
new file mode 100644
index 0000000000000000000000000000000000000000..c2a34feb065ff723bb806cf9d4042ab3a85e9e87
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/checksums/algorithms/SDBM.java
@@ -0,0 +1,132 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Christian Lorenz (ZIB)
+ */
+package org.xtreemfs.common.checksums.algorithms;
+
+import java.nio.ByteBuffer;
+
+import org.xtreemfs.common.checksums.StringChecksumAlgorithm;
+
+/**
+ * The SDBM algorithm.
+ *
+ * 02.09.2008
+ *
+ * @author clorenz
+ */
+public class SDBM implements StringChecksumAlgorithm {
+ private String hash = null;
+
+ private String name = "SDBM";
+
+ /**
+ * Updates checksum with specified data.
+ *
+ * @param data
+ */
+ public void digest(String data) {
+ this.hash = sdbmHash(data);
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.xtreemfs.common.checksums.ChecksumAlgorithm#digest(java.nio.ByteBuffer)
+ */
+ @Override
+ public void update(ByteBuffer data) {
+ byte[] array;
+
+ if (data.hasArray()) {
+ array = data.array();
+ } else {
+ array = new byte[data.capacity()];
+ final int oldPos = data.position();
+ data.position(0);
+ data.get(array);
+ data.position(oldPos);
+ }
+
+ this.hash = sdbmHash(new String(array));
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.xtreemfs.common.checksums.ChecksumAlgorithm#getName()
+ */
+ @Override
+ public String getName() {
+ return this.name;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.xtreemfs.common.checksums.ChecksumAlgorithm#getValue()
+ */
+ @Override
+ public String getValue() {
+ String value;
+ if (this.hash != null)
+ value = this.hash;
+ else
+ value = "";
+ reset();
+ return value;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.xtreemfs.common.checksums.ChecksumAlgorithm#reset()
+ */
+ @Override
+ public void reset() {
+ hash = null;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.xtreemfs.common.checksum.ChecksumAlgorithm#clone()
+ */
+ @Override
+ public SDBM clone() {
+ return new SDBM();
+ }
+
+ /**
+ * SDBM algorithm
+ *
+ * @param str
+ * @return
+ */
+ protected static String sdbmHash(String str) {
+ long hash = 0;
+ for (int c : str.toCharArray()) {
+ hash = c + (hash << 6) + (hash << 16) - hash;
+ }
+ return Long.toHexString(hash);
+ }
+}
diff --git a/servers/src/org/xtreemfs/common/checksums/provider/JavaChecksumProvider.java b/servers/src/org/xtreemfs/common/checksums/provider/JavaChecksumProvider.java
new file mode 100644
index 0000000000000000000000000000000000000000..12c2fa20ce47b9690558dae246962a058dc73b21
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/checksums/provider/JavaChecksumProvider.java
@@ -0,0 +1,71 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Christian Lorenz (ZIB)
+ */
+package org.xtreemfs.common.checksums.provider;
+
+import java.security.NoSuchAlgorithmException;
+
+import org.xtreemfs.common.checksums.ChecksumProvider;
+import org.xtreemfs.common.logging.Logging;
+
+/**
+ * A provider for Java internal checksums. offers the following algorithms:
+ * Adler32, CRC32, MD5, Java-Hash
+ *
+ * 19.08.2008
+ *
+ * @author clorenz
+ */
+public class JavaChecksumProvider extends ChecksumProvider {
+ private static String NAME = "Java Checksum Provider";
+
+ /**
+ * creates a new JavaChecksumProvider
+ */
+ public JavaChecksumProvider() {
+ super();
+
+ addAlgorithm(new org.xtreemfs.common.checksums.algorithms.Adler32());
+ addAlgorithm(new org.xtreemfs.common.checksums.algorithms.CRC32());
+ try {
+ addAlgorithm(new org.xtreemfs.common.checksums.algorithms.JavaMessageDigestAlgorithm(
+ "MD5", "MD5"));
+ addAlgorithm(new org.xtreemfs.common.checksums.algorithms.JavaMessageDigestAlgorithm(
+ "SHA1", "SHA-1"));
+ } catch (NoSuchAlgorithmException e) {
+ Logging.logMessage(Logging.LEVEL_WARN, this, e.getMessage()
+ + " in your java-installation");
+ }
+ addAlgorithm(new org.xtreemfs.common.checksums.algorithms.JavaHash());
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.xtreemfs.common.checksums.ChecksumProvider#getName()
+ */
+ @Override
+ public String getName() {
+ return NAME;
+ }
+}
diff --git a/servers/src/org/xtreemfs/common/clients/HttpErrorException.java b/servers/src/org/xtreemfs/common/clients/HttpErrorException.java
new file mode 100644
index 0000000000000000000000000000000000000000..350b44d8f0f068ea28140cddaf2f77e270058771
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/clients/HttpErrorException.java
@@ -0,0 +1,112 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+*/
+/*
+ * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB)
+ */
+
+package org.xtreemfs.common.clients;
+
+import java.io.IOException;
+import org.xtreemfs.foundation.json.JSONException;
+import org.xtreemfs.foundation.json.JSONParser;
+import org.xtreemfs.foundation.json.JSONString;
+import org.xtreemfs.foundation.pinky.HTTPUtils;
+
+/**
+ * Exception thrown by RPCClient for invalid
+ * server responses (i.e. other than status code 200).
+ * @author bjko
+ */
+public class HttpErrorException extends IOException {
+
+ /**
+ * The status code returned by the server.
+ */
+ protected final int statusCode;
+
+ /**
+ * The response body sent by the server or null if none was sent.
+ */
+ protected final byte[] responseBody;
+
+ /**
+ * Creates a new instance of HttpErrorException
+ * @param statusCode the status code sent by the server
+ * @param responseBody the body sent by the server
+ */
+ public HttpErrorException(int statusCode, byte[] responseBody) {
+ super("status code is "+statusCode + ", error=" + new String(responseBody));
+ this.statusCode = statusCode;
+ this.responseBody = responseBody;
+ }
+
+ /**
+ * Creates a new instance of HttpErrorException
+ * @param statusCode the status code sent by the server
+ */
+ public HttpErrorException(int statusCode) {
+ super("status code is "+statusCode);
+ this.statusCode = statusCode;
+ this.responseBody = null;
+ }
+
+ /**
+ * Returns the status code sent by the server.
+ * @return the status code sent by the server
+ */
+ public int getStatusCode() {
+ return this.statusCode;
+ }
+
+ /**
+ * Returns the response body sent by the server.
+ * @return the response body sent by the server
+ */
+ public byte[] getResponseBody() {
+ return responseBody;
+ }
+
+ /**
+ * Returns the response body's content parsed by the JSON parser.
+ * @throws org.xtreemfs.foundation.json.JSONException if the body does not contain valid JSON
+ * @return the object read from the body
+ */
+ public Object parseJSONResponse() throws JSONException {
+ String body = new String(responseBody, HTTPUtils.ENC_UTF8);
+ return JSONParser.parseJSON(new JSONString(body));
+ }
+
+ /**
+ * A string representation of the exception.
+ * @return a string representation of the exception.
+ */
+ public String toString() {
+ if (responseBody != null)
+ return this.getClass().getSimpleName()+": status code is "+statusCode+", error=" + new String(responseBody);
+ else
+ return this.getClass().getSimpleName()+": status code is "+statusCode;
+ }
+
+ public boolean authenticationRequest() {
+ return this.statusCode == HTTPUtils.SC_UNAUTHORIZED;
+ }
+
+}
diff --git a/servers/src/org/xtreemfs/common/clients/RPCClient.java b/servers/src/org/xtreemfs/common/clients/RPCClient.java
new file mode 100644
index 0000000000000000000000000000000000000000..2ca451c06e1620a9cf10bc9ef1d78a528f82f561
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/clients/RPCClient.java
@@ -0,0 +1,306 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+*/
+/*
+ * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB)
+ */
+
+package org.xtreemfs.common.clients;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.security.MessageDigest;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import org.xtreemfs.common.buffer.ReusableBuffer;
+import org.xtreemfs.common.logging.Logging;
+import org.xtreemfs.common.util.OutputUtils;
+import org.xtreemfs.foundation.json.JSONException;
+import org.xtreemfs.foundation.json.JSONParser;
+import org.xtreemfs.foundation.pinky.HTTPHeaders;
+import org.xtreemfs.foundation.pinky.HTTPUtils;
+import org.xtreemfs.foundation.pinky.SSLOptions;
+import org.xtreemfs.foundation.speedy.MultiSpeedy;
+import org.xtreemfs.foundation.speedy.SpeedyRequest;
+
+/**
+ * Generic RPC over HTTP client. Can send JSON and binary requests via HTTP.
+ * Can also be used as a very simple HTTP client.
+ * @author bjko
+ */
+public class RPCClient {
+
+ /**
+ * The speedy used by the client.
+ */
+ private final MultiSpeedy speedy;
+
+ /**
+ * timeout to use for RPCs
+ */
+ private int timeout;
+
+ /**
+ * default timeout used
+ */
+ public static final int DEFAULT_TIMEOUT = 10000;
+
+ /**
+ * Creates a new client with a private speedy instance.
+ * @throws java.io.IOException if speedy cannot be started.
+ */
+ public RPCClient() throws IOException {
+ this(null);
+ }
+
+ /**
+ * Creates a new instance of the RPCClient
+ * @param sharedSpeedy a speedy shared among several clients. If null, a new speedy instance is created.
+ * @throws java.io.IOException if speedy cannot be started
+ */
+ public RPCClient(final MultiSpeedy sharedSpeedy)
+ throws IOException {
+
+ this.timeout = DEFAULT_TIMEOUT;
+
+ if (sharedSpeedy != null) {
+ speedy = sharedSpeedy;
+ } else {
+ speedy = new MultiSpeedy();
+ speedy.start();
+ }
+
+ Thread.yield();
+ }
+
+ /**
+ * Creates a new instance of the RPCClient
+ * @param sharedSpeedy a speedy shared among several clients. If null, a new speedy instance is created.
+ * @throws java.io.IOException if speedy cannot be started
+ */
+ public RPCClient(MultiSpeedy sharedSpeedy, int timeout)
+ throws IOException {
+ this(sharedSpeedy);
+ this.timeout = timeout;
+ }
+
+ /**
+ * Creates a new instance of the RPCClient
+ * A new speedy instance with SSL support will be created.
+ * @param sslOptions options for ssl connection, null for no SSL
+ * @throws java.io.IOException if speedy cannot be started
+ */
+ public RPCClient(int timeout, SSLOptions sslOptions)
+ throws IOException {
+ speedy = new MultiSpeedy(sslOptions);
+ speedy.start();
+
+ this.timeout = timeout;
+ Thread.yield();
+ }
+
+ /**
+ * Shuts down the speedy used by this client.
+ * @attention Shuts down the speedy also if it is shared!
+ */
+ public void shutdown() {
+ speedy.shutdown();
+ }
+
+ public void waitForShutdown() {
+ try {
+ speedy.waitForShutdown();
+ } catch (Exception e) {
+ Logging.logMessage(Logging.LEVEL_ERROR, this, e);
+ }
+ }
+
+ /**
+ * Sends an xtreemfs JSON RPC request.
+ * @param authString authentication string to send to the remote server
+ * @param addHdrs additional headers to include in the request.
+ * Cannot override headers set by the HTTP client automatically.
+ * @param server the server to send the request to
+ * @param method the RPC method to call (which is the URI sent as part of the HTTP request).
+ * @param data The request's parameters. If null, an empty body is sent. If data is a ReusableBuffer
+ * the data is sent as a binary body. Everything else is sent as a JSON encoded
+ * object.
+ * @return a RPCResponse for asynchrous requests
+ * @throws org.xtreemfs.foundation.json.JSONException if data cannot be translated into a JSON object.
+ * @throws java.io.IOException if the request cannot be sent.
+ */
+ public RPCResponse sendRPC(InetSocketAddress server,
+ String method, Object data, String authString,
+ HTTPHeaders addHdrs) throws IOException,JSONException {
+
+ if (data == null) {
+ return send(server, method, null, addHdrs, authString, HTTPUtils.DATA_TYPE.JSON,HTTPUtils.POST_TOKEN);
+ } else {
+ ReusableBuffer body = null;
+ HTTPUtils.DATA_TYPE type = HTTPUtils.DATA_TYPE.JSON;
+ if (data instanceof ReusableBuffer) {
+ Logging.logMessage(Logging.LEVEL_DEBUG,this,"request body contains binary data");
+ body = (ReusableBuffer)data;
+ type = HTTPUtils.DATA_TYPE.BINARY;
+ } else {
+ Logging.logMessage(Logging.LEVEL_DEBUG,this,"request body contains JSON data");
+ String json = JSONParser.writeJSON(data);
+ body = ReusableBuffer.wrap(json.getBytes(HTTPUtils.ENC_UTF8));
+ }
+ return send(server, method, body, addHdrs,authString, type,HTTPUtils.POST_TOKEN);
+ }
+ }
+
+ /**
+ * Uses the underlying speedy to check if the server is blocked because it is not
+ * responding.
+ * @param server the server to check
+ * @return true, if server is not blocked, false otherwise
+ * @see MultiSpeedy
+ */
+ public boolean serverIsAvailable(InetSocketAddress server) {
+ return speedy.serverIsAvailable(server);
+ }
+
+
+ /**
+ * internal method for sending requests.
+ */
+ protected RPCResponse send(InetSocketAddress server, String uri,
+ ReusableBuffer body, HTTPHeaders headers, String authString,
+ HTTPUtils.DATA_TYPE type, String httpMethod)
+ throws IOException {
+
+ assert(uri != null);
+ //FIXME: should be activated
+ //assert(authString != null);
+
+ SpeedyRequest sr = null;
+
+ if (body != null) {
+ if (headers != null) {
+ sr = new SpeedyRequest(httpMethod, uri, null, authString, body,
+ type, headers);
+ } else {
+ sr = new SpeedyRequest(httpMethod, uri, null, authString, body,
+ type);
+ }
+ } else {
+ if (headers != null) {
+ sr = new SpeedyRequest(httpMethod, uri, null, authString, null,
+ type, headers);
+ } else {
+ sr = new SpeedyRequest(httpMethod, uri, null, authString );
+ }
+ }
+ sr.setTimeout(timeout);
+ RPCResponse resp = new RPCResponse(sr,server);
+ sr.listener = resp;
+ synchronized (speedy) {
+ speedy.sendRequest(sr, server);
+ }
+ return resp;
+ }
+
+ public MultiSpeedy getSpeedy() {
+ return speedy;
+ }
+
+ /**Generates a HashMap from the arguments passed.
+ * e.g. generateMap("key1",value1,"key2",value2)
+ */
+ public static Map generateMap(Object ...args) {
+ if (args.length % 2 != 0) {
+ throw new IllegalArgumentException("require even number of arguments (key1,value1,key2,value2...)");
+ }
+ Map m = new HashMap(args.length/2);
+ for (int i = 0; i < args.length; i = i+2) {
+ m.put((String)args[i],args[i+1]);
+ }
+ return m;
+ }
+
+ /** Generates a list from the arguments passed.
+ */
+ public static List generateList(Object ...args) {
+ List l = new ArrayList(args.length);
+ for (int i = 0; i < args.length; i++) {
+ l.add(args[i]);
+ }
+ return l;
+ }
+
+ /** Generates a list from the string arguments passed.
+ */
+ public static List generateStringList(String ...args) {
+ List l = new ArrayList(args.length);
+ for (int i = 0; i < args.length; i++) {
+ l.add(args[i]);
+ }
+ return l;
+ }
+
+ public void setTimeout(int timeout) {
+ this.timeout = timeout;
+ }
+
+ public int getTimeout() {
+ return this.timeout;
+ }
+
+ public static String createAuthResponseHeader(SpeedyRequest response,
+ String username, String password) {
+ //check header...
+
+ final String authRequestHeader = response.responseHeaders.getHeader(HTTPHeaders.HDR_WWWAUTH);
+ if ((authRequestHeader == null) || (authRequestHeader.length() == 0))
+ return null;
+
+ try {
+ System.out.println("header: "+authRequestHeader);
+
+
+ Pattern p = Pattern.compile("nonce=\\\"(\\S+)\\\"");
+ Matcher m = p.matcher(authRequestHeader);
+ m.find();
+ final String cNonce = m.group(1);
+
+
+ MessageDigest md5 = MessageDigest.getInstance("MD5");
+ md5.update((username+":xtreemfs:"+password).getBytes());
+ byte[] digest = md5.digest();
+ final String HA1 = OutputUtils.byteArrayToHexString(digest).toLowerCase();
+
+ md5.update((response.getMethod()+":"+response.getURI()).getBytes());
+ digest = md5.digest();
+ final String HA2 = OutputUtils.byteArrayToHexString(digest).toLowerCase();
+
+ md5.update((HA1+":"+cNonce+":"+HA2).getBytes());
+ digest = md5.digest();
+ return OutputUtils.byteArrayToHexString(digest).toLowerCase();
+ } catch (Exception ex) {
+ return null;
+ }
+ }
+}
diff --git a/servers/src/org/xtreemfs/common/clients/RPCResponse.java b/servers/src/org/xtreemfs/common/clients/RPCResponse.java
new file mode 100644
index 0000000000000000000000000000000000000000..6c9bb9d3ee79a2114f1ec030ca1a0f95c8e9862a
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/clients/RPCResponse.java
@@ -0,0 +1,301 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+*/
+/*
+ * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB)
+ */
+
+package org.xtreemfs.common.clients;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.concurrent.atomic.AtomicBoolean;
+import org.xtreemfs.common.buffer.ReusableBuffer;
+import org.xtreemfs.common.logging.Logging;
+import org.xtreemfs.foundation.json.JSONException;
+import org.xtreemfs.foundation.json.JSONParser;
+import org.xtreemfs.foundation.json.JSONString;
+import org.xtreemfs.foundation.pinky.HTTPHeaders;
+import org.xtreemfs.foundation.pinky.HTTPUtils;
+import org.xtreemfs.foundation.speedy.MultiSpeedy;
+import org.xtreemfs.foundation.speedy.SpeedyRequest;
+import org.xtreemfs.foundation.speedy.SpeedyResponseListener;
+
+/**
+ * Response for an asynchronous RPC request.
+ *
+ * @author bjko
+ */
+public class RPCResponse implements SpeedyResponseListener {
+
+ public static int MAX_AUTH_RETRY = 3;
+
+ protected InetSocketAddress targetServer;
+
+ protected RPCResponseListener listener;
+
+ /**
+ * The httpRequest used for sending this RPC via Speedy.
+ */
+ protected SpeedyRequest httpRequest;
+
+ /**
+ * Arbitrary attachment for continouations.
+ */
+ protected Object attachment;
+
+ protected AtomicBoolean finished;
+
+ protected String username, password;
+
+ protected int authRetryCount;
+
+ protected MultiSpeedy speedy;
+
+ /**
+ * Creates a new instance of RPCResponse
+ *
+ * @param request
+ * the request sent via Speedy
+ */
+ public RPCResponse(SpeedyRequest request, InetSocketAddress server) {
+ this.httpRequest = request;
+ this.targetServer = server;
+ finished = new AtomicBoolean(false);
+ }
+
+ /**
+ * Creates a new instance of RPCResponse with credentials for digest authentication
+ *
+ * @param request
+ * the request sent via Speedy
+ */
+ public RPCResponse(SpeedyRequest request, InetSocketAddress server, MultiSpeedy speedy, String username, String password) {
+ this(request,server);
+ this.username = username;
+ this.password = password;
+ this.authRetryCount = 0;
+ this.speedy = speedy;
+ }
+
+ /**
+ * Checks the status of the request.
+ *
+ * @return returns true, if the server response is available or the request
+ * has failed.
+ */
+ public boolean isDone() {
+ return this.finished.get();
+ }
+
+ /**
+ * Waits for the response if necessary and throws exceptions if the request
+ * did not succed.
+ *
+ * If the server sent a response and a status code 200 (OK) the method
+ * returns. If another status code is returned, an HttpErrorException is
+ * thrown. If the server is not available or some other communication error
+ * occurrs, an IO exception is thrown.
+ *
+ * @throws java.lang.InterruptedException
+ * if it is interrupted while waiting for the server's response.
+ * @throws org.xtreemfs.common.clients.HttpErrorException
+ * if the server returns a status code other than 200 (OK)
+ * @throws java.io.IOException
+ * if the server is not available or a communication error
+ * occurs
+ */
+ public void waitForResponse() throws InterruptedException,
+ HttpErrorException, IOException {
+ waitForResponse(0);
+ }
+
+ public void waitForResponse(long timeout) throws InterruptedException, HttpErrorException,
+ IOException {
+ synchronized (this) {
+ if (!isDone()) {
+ this.wait(timeout);
+ }
+ }
+ assert (httpRequest != null);
+ if (httpRequest.status == SpeedyRequest.RequestStatus.FINISHED) {
+ if (httpRequest.statusCode == HTTPUtils.SC_OKAY) {
+ return;
+ } else {
+ if ( (httpRequest.statusCode == HTTPUtils.SC_UNAUTHORIZED) &&
+ (username != null) && (this.authRetryCount > MAX_AUTH_RETRY)) {
+ //resend with authentication!
+ httpRequest.addDigestAuthentication(username, password);
+ this.authRetryCount++;
+ assert(httpRequest.listener == this);
+ Logging.logMessage(Logging.LEVEL_DEBUG, this,"resending request with digest authentication");
+ speedy.sendRequest(httpRequest, targetServer);
+ }
+ if (httpRequest.responseBody != null)
+ throw new HttpErrorException(httpRequest.statusCode, httpRequest.responseBody
+ .array());
+ else
+ throw new HttpErrorException(httpRequest.statusCode);
+ }
+ } else if (httpRequest.status == SpeedyRequest.RequestStatus.PENDING) {
+ throw new IOException("server " + targetServer + " is not available");
+ } else if (httpRequest.status == SpeedyRequest.RequestStatus.SENDING) {
+ throw new IOException("cannot establish connection to server " + targetServer);
+ } else if (httpRequest.status == SpeedyRequest.RequestStatus.WAITING) {
+ throw new IOException("server " + targetServer + " did not send a response");
+ } else {
+ throw new IOException("server " + targetServer + " is not available");
+ }
+ }
+
+ /**
+ * Retrieves the response body sent by the server. Waits for the server if
+ * necessary.
+ *
+ * If the server sent a response and a status code 200 (OK) the method
+ * returns. If another status code is returned, an HttpErrorException is
+ * thrown. If the server is not available or some other communication error
+ * occurrs, an IO exception is thrown.
+ *
+ * @param timeout
+ * milliseconds to wait for a server response
+ * @throws java.lang.InterruptedException
+ * if it is interrupted while waiting for the server's response.
+ * @throws org.xtreemfs.common.clients.HttpErrorException
+ * if the server returns a status code other than 200 (OK)
+ * @throws java.io.IOException
+ * if the server is not available or a communication error
+ * occurs
+ * @return the response body
+ */
+ public ReusableBuffer getBody() throws InterruptedException,
+ HttpErrorException, IOException {
+ waitForResponse();
+ return httpRequest.responseBody;
+ }
+
+ public V get() throws InterruptedException, HttpErrorException,
+ IOException, JSONException {
+
+ waitForResponse();
+ if (httpRequest.responseBody == null)
+ return null;
+
+ String body = new String(httpRequest.responseBody.array(),
+ HTTPUtils.ENC_UTF8);
+ Object o = JSONParser.parseJSON(new JSONString(body));
+ return (V) o;
+ }
+
+ /**
+ * Retrieves the response status code. Waits for the server's response if
+ * necessary.
+ *
+ * @param timeout
+ * milliseconds to wait for a server response
+ * @return the status code
+ * @throws java.lang.InterruptedException
+ * if it is interrupted while waiting for the server's response.
+ * @throws org.xtreemfs.common.clients.HttpErrorException
+ * if the server returns a status code other than 200 (OK)
+ * @throws java.io.IOException
+ * if the server is not available or a communication error
+ * occurs
+ */
+ public int getStatusCode() throws InterruptedException, HttpErrorException,
+ IOException {
+ waitForResponse();
+ return httpRequest.statusCode;
+ }
+
+ /**
+ * Retrieves the response headers. Waits for the server's response if
+ * necessary.
+ *
+ * @param timeout
+ * milliseconds to wait for a server response
+ * @throws java.lang.InterruptedException
+ * if it is interrupted while waiting for the server's response.
+ * @throws org.xtreemfs.common.clients.HttpErrorException
+ * if the server returns a status code other than 200 (OK)
+ * @throws java.io.IOException
+ * if the server is not available or a communication error
+ * occurs
+ * @return the response headers sent by the server
+ */
+ public HTTPHeaders getHeaders() throws InterruptedException,
+ HttpErrorException, IOException {
+ waitForResponse();
+ return httpRequest.responseHeaders;
+ }
+
+ public void receiveRequest(SpeedyRequest theRequest) {
+ // Logging.logMessage(Logging.LEVEL_ERROR,this,"EVENT: "+theRequest);
+
+ if (this.finished.getAndSet(true)) {
+ Logging.logMessage(Logging.LEVEL_ERROR, this,
+ "RESPONSE ALREADY SET!");
+ throw new RuntimeException("response already sent!");
+ }
+
+ if (listener != null) {
+ listener.responseAvailable(this);
+ }
+
+ synchronized (this) {
+ this.notifyAll();
+ }
+ }
+
+
+
+ public void setResponseListener(RPCResponseListener listener) {
+ synchronized (this) {
+ this.listener = listener;
+ if (this.isDone())
+ listener.responseAvailable(this);
+ }
+ }
+
+ public void setAttachment(Object attachment) {
+ this.attachment = attachment;
+ }
+
+ public Object getAttachment() {
+ return this.attachment;
+ }
+
+ public SpeedyRequest getSpeedyRequest() {
+ return httpRequest;
+ }
+
+ public void freeBuffers() {
+ this.httpRequest.freeBuffer();
+ this.httpRequest = null;
+ }
+
+ protected void finalize() {
+ if (this.httpRequest != null) {
+ Logging.logMessage(Logging.LEVEL_DEBUG,this,"auto free for: "+this.httpRequest.responseHeaders);
+ freeBuffers();
+ }
+ }
+
+}
diff --git a/servers/src/org/xtreemfs/common/clients/RPCResponseListener.java b/servers/src/org/xtreemfs/common/clients/RPCResponseListener.java
new file mode 100644
index 0000000000000000000000000000000000000000..69def1c4717d129641c7b03a90141d45f12b834b
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/clients/RPCResponseListener.java
@@ -0,0 +1,35 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+*/
+/*
+ * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB)
+ */
+
+package org.xtreemfs.common.clients;
+
+/**
+ *
+ * @author bjko
+ */
+public interface RPCResponseListener {
+
+ void responseAvailable(RPCResponse response);
+
+}
diff --git a/servers/src/org/xtreemfs/common/clients/dir/DIRClient.java b/servers/src/org/xtreemfs/common/clients/dir/DIRClient.java
new file mode 100644
index 0000000000000000000000000000000000000000..0fbd79aa25be06a52c814db8b0d756d592115c6c
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/clients/dir/DIRClient.java
@@ -0,0 +1,244 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Björn Kolbeck (ZIB), Jan Stender (ZIB), Christian Lorenz (ZIB)
+ */
+
+package org.xtreemfs.common.clients.dir;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.xtreemfs.common.clients.HttpErrorException;
+import org.xtreemfs.common.clients.RPCClient;
+import org.xtreemfs.common.clients.RPCResponse;
+import org.xtreemfs.common.uuids.UnknownUUIDException;
+import org.xtreemfs.foundation.json.JSONException;
+import org.xtreemfs.foundation.pinky.SSLOptions;
+import org.xtreemfs.foundation.speedy.MultiSpeedy;
+
+/**
+ *
+ * @author bjko
+ */
+public class DIRClient extends RPCClient {
+
+ public static final long TIMEOUT = 15000;
+
+ private final InetSocketAddress defaultServer;
+
+ public static final String HOMEDIR_PREFIX = "user-";
+
+ /** Creates a new instance of DIRClient */
+ public DIRClient(MultiSpeedy sharedSpeedy, InetSocketAddress defaultServer) throws IOException {
+ super(sharedSpeedy);
+ this.defaultServer = defaultServer;
+ }
+
+ /** Creates a new instance of DIRClient */
+ public DIRClient(MultiSpeedy sharedSpeedy, InetSocketAddress defaultServer, int timeout)
+ throws IOException {
+ super(sharedSpeedy, timeout);
+ this.defaultServer = defaultServer;
+ }
+
+ public DIRClient(InetSocketAddress defaultServer, SSLOptions sslOptions, int timeout)
+ throws IOException {
+ super(timeout, sslOptions);
+ this.defaultServer = defaultServer;
+ }
+
+ public RPCResponse registerEntity(InetSocketAddress server, String uuid,
+ Map data, long version, String authStr) throws IOException,
+ HttpErrorException, JSONException, InterruptedException {
+
+ List args = new ArrayList(2);
+ args.add(uuid);
+ args.add(data);
+ args.add(version);
+
+ RPCResponse r = sendRPC(server, "registerEntity", args, authStr, null);
+ return r;
+ }
+
+ public RPCResponse registerEntity(String uuid, Map data, long version,
+ String authStr) throws IOException, HttpErrorException, JSONException, InterruptedException {
+ return registerEntity(defaultServer, uuid, data, version, authStr);
+ }
+
+ public RPCResponse>> getEntities(InetSocketAddress server,
+ Map query, List attrs, String authStr) throws IOException,
+ HttpErrorException, JSONException, InterruptedException {
+
+ List args = new ArrayList(2);
+ args.add(query);
+ args.add(attrs);
+
+ RPCResponse>> r = sendRPC(server, "getEntities", args,
+ authStr, null);
+
+ return r;
+ }
+
+ public RPCResponse>> getEntities(Map query,
+ List attrs, String authStr) throws IOException, HttpErrorException, JSONException,
+ InterruptedException {
+ return getEntities(defaultServer, query, attrs, authStr);
+ }
+
+ public RPCResponse deregisterEntity(InetSocketAddress server, String uuid, String authStr)
+ throws IOException, HttpErrorException, JSONException, InterruptedException {
+
+ List args = new ArrayList(1);
+ args.add(uuid);
+
+ RPCResponse r = sendRPC(defaultServer, "deregisterEntity", args, authStr, null);
+
+ return r;
+ }
+
+ public RPCResponse deregisterEntity(String uuid, String authStr) throws IOException,
+ HttpErrorException, JSONException, InterruptedException {
+ return deregisterEntity(defaultServer, uuid, authStr);
+ }
+
+ public RPCResponse registerAddressMapping(InetSocketAddress server, String uuid,
+ List> mapping, long version, String authStr) throws IOException,
+ HttpErrorException, JSONException, InterruptedException {
+
+ List args = new ArrayList(3);
+ args.add(uuid);
+ args.add(mapping);
+ args.add(version);
+
+ RPCResponse r = sendRPC(server, "registerAddressMapping", args, authStr, null);
+ return r;
+ }
+
+ public RPCResponse registerAddressMapping(String uuid, List> mapping,
+ long version, String authStr) throws IOException, HttpErrorException, JSONException,
+ InterruptedException {
+ return registerAddressMapping(defaultServer, uuid, mapping, version, authStr);
+ }
+
+ public RPCResponse>>> getAddressMapping(
+ InetSocketAddress server, String uuid, String authStr) throws IOException,
+ HttpErrorException, JSONException, InterruptedException {
+
+ List args = new ArrayList(1);
+ args.add(uuid);
+
+ RPCResponse>>> r = sendRPC(server,
+ "getAddressMapping", args, authStr, null);
+ return r;
+ }
+
+ public RPCResponse>>> getAddressMapping(String uuid,
+ String authStr) throws IOException, HttpErrorException, JSONException, InterruptedException {
+ return getAddressMapping(defaultServer, uuid, authStr);
+ }
+
+ public RPCResponse deregisterAddressMapping(InetSocketAddress server, String uuid,
+ String authStr) throws IOException, HttpErrorException, JSONException, InterruptedException {
+
+ List args = new ArrayList(1);
+ args.add(uuid);
+
+ RPCResponse r = sendRPC(defaultServer, "deregisterAddressMapping", args, authStr, null);
+
+ return r;
+ }
+
+ public RPCResponse deregisterAddressMapping(String uuid, String authStr) throws IOException,
+ HttpErrorException, JSONException, InterruptedException {
+ return deregisterAddressMapping(defaultServer, uuid, authStr);
+ }
+
+ public RPCResponse getGlobalTime(InetSocketAddress server, String authStr)
+ throws IOException, HttpErrorException, JSONException, InterruptedException {
+
+ RPCResponse r = sendRPC(server, "getGlobalTime", new ArrayList(0), authStr, null);
+ return r;
+ }
+
+ public RPCResponse getGlobalTime(String authStr) throws IOException, HttpErrorException,
+ JSONException, InterruptedException {
+
+ return getGlobalTime(defaultServer, authStr);
+ }
+
+ /**
+ * Retrieves the volume URL for a user's home volume.
+ * @param globalUserId the user's global ID (GUID)
+ * @param authStr authentication string for the directory service
+ * @return the URL or null if the volume cannot be found
+ * @throws java.io.IOException
+ * @throws org.xtreemfs.common.clients.HttpErrorException
+ * @throws org.xtreemfs.foundation.json.JSONException
+ * @throws java.lang.InterruptedException
+ */
+ public String locateUserHome(String globalUserId, String authStr) throws IOException, HttpErrorException,
+ JSONException, InterruptedException {
+
+ Map qry = new HashMap();
+ qry.put("type","volume");
+ qry.put("name",HOMEDIR_PREFIX+globalUserId);
+ RPCResponse>> r = this.getEntities(qry, null, authStr);
+ Map> map = r.get();
+
+ if (map.size() == 0)
+ return null;
+
+ String volname = null;
+ String mrcuuid = null;
+ for (String uuid : map.keySet()) {
+ Map data = map.get(uuid);
+ mrcuuid = (String) data.get("mrc");
+ volname = (String) data.get("name");
+ break;
+ }
+ if (mrcuuid == null)
+ return null;
+
+ RPCResponse>>> r2 = this.getAddressMapping(mrcuuid, authStr);
+ r2.waitForResponse(2000);
+ List> l = r2.get().get(mrcuuid);
+ if ((l == null) || (l.size() == 1)) {
+ throw new UnknownUUIDException("MRC's uuid "+mrcuuid+" is not registered at directory server");
+ }
+ List> mappings = (List>) l.get(1);
+ for (int i = 0; i < mappings.size(); i++) {
+ Map addrMapping = mappings.get(i);
+ final String network = (String)addrMapping.get("match_network");
+ if (network.equals("*")) {
+ final String address = (String)addrMapping.get("address");
+ final String protocol = (String)addrMapping.get("protocol");
+ final int port = (int) ((Long)addrMapping.get("port")).intValue();
+ return protocol+"://"+address+":"+port+"/"+volname;
+ }
+ }
+ return null;
+ }
+}
diff --git a/servers/src/org/xtreemfs/common/clients/io/ByteMapper.java b/servers/src/org/xtreemfs/common/clients/io/ByteMapper.java
new file mode 100644
index 0000000000000000000000000000000000000000..0e2948ff6880ca03b0ba4b6cd2fd16ef2653a859
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/clients/io/ByteMapper.java
@@ -0,0 +1,51 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+*/
+/*
+ * AUTHORS: Nele Andersen (ZIB), Björn Kolbeck (ZIB)
+ */
+
+
+package org.xtreemfs.common.clients.io;
+
+public interface ByteMapper {
+
+ /**
+ * reads data from file.
+ * @param data a buffer of length (length+offset) in which the data is stored
+ * @param offset offset within buffer to write to
+ * @param length number of bytes to read
+ * @param filePosition offset within file
+ * @return the number of bytes read
+ * @throws java.lang.Exception
+ */
+ public int read(byte[] data, int offset, int length, long filePosition) throws Exception;
+
+ /**
+ * writes data to a file.
+ * @param data the data to write (buffer must be length+offset bytes long).
+ * @param offset the position within the buffer to start at.
+ * @param length number of bytes to write
+ * @param filePosition the offset within the file
+ * @return the number of bytes written
+ * @throws java.lang.Exception
+ */
+ public int write(byte[] data, int offset, int length, long filePosition) throws Exception;
+}
diff --git a/servers/src/org/xtreemfs/common/clients/io/ByteMapperFactory.java b/servers/src/org/xtreemfs/common/clients/io/ByteMapperFactory.java
new file mode 100644
index 0000000000000000000000000000000000000000..6da1ed42fbeacb0ad66f7874837646f960dbc705
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/clients/io/ByteMapperFactory.java
@@ -0,0 +1,34 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+*/
+/*
+ * AUTHORS: Nele Andersen (ZIB)
+ */
+
+
+package org.xtreemfs.common.clients.io;
+
+public class ByteMapperFactory {
+
+ public static ByteMapper createByteMapper(String policy, int stripeSize, ObjectStore store) {
+ //if( policy == "RAID0" )
+ return new ByteMapperRAID0(stripeSize, store);
+ }
+}
diff --git a/servers/src/org/xtreemfs/common/clients/io/ByteMapperRAID0.java b/servers/src/org/xtreemfs/common/clients/io/ByteMapperRAID0.java
new file mode 100644
index 0000000000000000000000000000000000000000..d31189d42ccebf4c127ec5f9a9d9733e4e9097d4
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/clients/io/ByteMapperRAID0.java
@@ -0,0 +1,149 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+*/
+/*
+ * AUTHORS: Nele Andersen (ZIB), Björn Kolbeck (ZIB)
+ */
+
+package org.xtreemfs.common.clients.io;
+
+import java.io.IOException;
+
+import org.xtreemfs.common.buffer.BufferPool;
+import org.xtreemfs.common.buffer.ReusableBuffer;
+
+class ByteMapperRAID0 implements ByteMapper{
+
+ final int stripeSize;
+
+ ObjectStore objectStore;
+
+ public ByteMapperRAID0(int stripeSize, ObjectStore objectStore){
+ this.stripeSize = stripeSize;
+ this.objectStore = objectStore;
+ }
+
+ /**
+ *
+ * @param resultBuffer - the buffer into which the data is read.
+ * @param offset - the start offset of the data.
+ * @param bytesToRead - the maximum number of bytes read.
+ * @return the total number of bytes read into the buffer, or -1 if
+ * there is no more data because the end of the file has been reached.
+ * @throws Exception
+ * @throws IOException
+ */
+ public int read(byte[] data, int offset, int length, long filePosition) throws Exception{
+
+ if (data.length < offset+length)
+ throw new RuntimeException("buffer is too small!");
+
+ final int firstObject = (int) (filePosition / this.stripeSize);
+ assert(firstObject >= 0);
+
+ int lastObject = (int) ( (filePosition + ((long)length)) / this.stripeSize);
+ if (( (filePosition + ((long)length)) % this.stripeSize) == 0)
+ lastObject--;
+ assert(lastObject >= firstObject);
+
+ final int offsetInFirstObject = (int) (filePosition % this.stripeSize);
+ assert(offsetInFirstObject < stripeSize);
+ final int bytesInLastObject = (int) (((filePosition + length) % this.stripeSize) == 0 ? this.stripeSize :
+ ((filePosition + length) % this.stripeSize));
+ assert(bytesInLastObject > 0);
+ assert(bytesInLastObject <= stripeSize);
+
+ int bytesRead = 0;
+ for (int obj = firstObject; obj <= lastObject; obj++) {
+
+ int bytesToRead = this.stripeSize;
+ int objOffset = 0;
+
+ if (obj == firstObject)
+ objOffset = offsetInFirstObject;
+ if (obj == lastObject)
+ bytesToRead = bytesInLastObject;
+
+ assert(bytesToRead > 0);
+ assert(objOffset >= 0);
+
+ ReusableBuffer rb = objectStore.readObject(obj, objOffset, bytesToRead);
+ assert(offset+bytesRead <= data.length);
+ if (rb == null) {
+ //EOF!
+ break;
+ }
+ if (rb.capacity() < bytesToRead) {
+ //EOF!
+ rb.get(data, offset+bytesRead,rb.capacity());
+ bytesRead += rb.capacity();
+ BufferPool.free(rb);
+ break;
+ }
+ rb.get(data, offset+bytesRead, bytesToRead);
+ bytesRead += rb.capacity();
+ BufferPool.free(rb);
+ }
+ return bytesRead;
+
+ }
+
+ public int write(byte[] data, int offset, int length, long filePosition) throws Exception{
+
+ final int firstObject = (int) (filePosition / this.stripeSize);
+ int lastObject = (int) ( (filePosition + ((long)length)) / this.stripeSize);
+ if (( (filePosition + ((long)length)) % this.stripeSize) == 0)
+ lastObject--;
+
+ final int offsetInFirstObject = (int) (filePosition % this.stripeSize);
+
+
+ int bytesInLastObject = -1;
+ if (firstObject == lastObject) {
+ bytesInLastObject = length;
+ } else {
+ if (((filePosition + length) % this.stripeSize) == 0) {
+ bytesInLastObject = this.stripeSize;
+ } else {
+ bytesInLastObject = (int)((filePosition + length) % this.stripeSize);
+ }
+ }
+
+
+ int bytesWritten = 0;
+ for (int obj = firstObject; obj <= lastObject; obj++) {
+
+ int bytesToWrite = this.stripeSize;
+ int objOffset = 0;
+
+ if (obj == firstObject)
+ objOffset = offsetInFirstObject;
+ if (obj == lastObject)
+ bytesToWrite = bytesInLastObject;
+
+ ReusableBuffer view = ReusableBuffer.wrap(data, offset+bytesWritten, bytesToWrite);
+ objectStore.writeObject(objOffset, obj, view);
+ bytesWritten += bytesToWrite;
+ }
+ return bytesWritten;
+
+ }
+
+}
\ No newline at end of file
diff --git a/servers/src/org/xtreemfs/common/clients/io/ObjectStore.java b/servers/src/org/xtreemfs/common/clients/io/ObjectStore.java
new file mode 100644
index 0000000000000000000000000000000000000000..9c5d21f7de37b90512cc358742f1b7a335edbf91
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/clients/io/ObjectStore.java
@@ -0,0 +1,51 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+*/
+/*
+ * AUTHORS: Nele Andersen (ZIB)
+ */
+
+package org.xtreemfs.common.clients.io;
+
+import java.io.IOException;
+
+import org.xtreemfs.common.buffer.ReusableBuffer;
+import org.xtreemfs.common.clients.HttpErrorException;
+import org.xtreemfs.foundation.json.JSONException;
+
+public interface ObjectStore {
+
+ /**
+ * read an object from an OSD.
+ * @param offset offset within the object
+ * @param objectNo object number (0 is the first object in a file)
+ * @param length number of bytes to read
+ * @return the data read. In case of an EOF the buffer's length will be smaller than requested!
+ * @throws java.io.IOException
+ * @throws org.xtreemfs.foundation.json.JSONException
+ * @throws java.lang.InterruptedException
+ * @throws org.xtreemfs.common.clients.HttpErrorException
+ */
+ ReusableBuffer readObject(long offset, long objectNo, long length) throws IOException,
+ JSONException, InterruptedException, HttpErrorException;
+
+ void writeObject(long offset, long objectNo, ReusableBuffer buffer) throws IOException,
+ JSONException, InterruptedException, HttpErrorException;
+}
diff --git a/servers/src/org/xtreemfs/common/clients/io/RandomAccessFile.java b/servers/src/org/xtreemfs/common/clients/io/RandomAccessFile.java
new file mode 100644
index 0000000000000000000000000000000000000000..73e864b917555a05893029e39d09110c11a71f99
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/clients/io/RandomAccessFile.java
@@ -0,0 +1,386 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Nele Andersen (ZIB), Björn Kolbeck (ZIB)
+ */
+
+package org.xtreemfs.common.clients.io;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.URL;
+import java.util.List;
+import java.util.Map;
+
+import org.xtreemfs.common.Capability;
+import org.xtreemfs.common.auth.NullAuthProvider;
+import org.xtreemfs.common.buffer.ReusableBuffer;
+import org.xtreemfs.common.clients.HttpErrorException;
+import org.xtreemfs.common.clients.RPCResponse;
+import org.xtreemfs.common.clients.mrc.MRCClient;
+import org.xtreemfs.common.clients.osd.OSDClient;
+import org.xtreemfs.common.logging.Logging;
+import org.xtreemfs.common.striping.Location;
+import org.xtreemfs.common.striping.Locations;
+import org.xtreemfs.common.striping.StripingPolicy;
+import org.xtreemfs.common.uuids.ServiceUUID;
+import org.xtreemfs.foundation.json.JSONException;
+import org.xtreemfs.foundation.json.JSONString;
+import org.xtreemfs.foundation.pinky.HTTPHeaders;
+import org.xtreemfs.foundation.speedy.MultiSpeedy;
+import org.xtreemfs.utils.utils;
+
+public class RandomAccessFile implements ObjectStore {
+
+ private MultiSpeedy speedy;
+
+ private MRCClient mrcClient;
+
+ private OSDClient osdClient;
+
+ private Capability capability;
+
+ private Locations locations;
+
+ private Location selectedReplica;
+
+ private StripingPolicy selectedReplicaStripingPolicy;
+
+ private List selectedReplicaOSDs;
+
+ private int selectedReplicaStripeSize;
+
+ private String fileId;
+
+ private String pathName;
+
+ private InetSocketAddress mrcAddress;
+
+ private String authString;
+
+ private ByteMapper byteMapper;
+
+ private String newFileSizeHdr;
+
+ private long filePos;
+
+ private Map capAndXLoc;
+
+ private long capTime;
+
+ public RandomAccessFile(String mode, InetSocketAddress mrcAddress, String pathName,
+ MultiSpeedy speedy, String authString, StripingPolicy spolicy) throws Exception {
+
+ this.speedy = speedy;
+ this.pathName = pathName;
+ this.mrcAddress = mrcAddress;
+ this.authString = authString;
+
+ if (speedy == null)
+ Logging.logMessage(Logging.LEVEL_DEBUG, this, "speedy is null");
+
+ // use the shared speedy to create an MRC and OSD client
+ mrcClient = new MRCClient(speedy, 30000);
+ osdClient = new OSDClient(speedy, 30000);
+
+ // create a new file if necessary
+ try {
+ if (mode.contains("c")) {
+ mode = "w";
+ mrcClient.createFile(mrcAddress, pathName, authString);
+ }
+ } catch (HttpErrorException ex) {
+ // ignore them
+ }
+
+ capAndXLoc = mrcClient.open(mrcAddress, pathName, mode, authString);
+
+ // set and read striping policy
+ locations = new Locations(new JSONString(capAndXLoc.get(HTTPHeaders.HDR_XLOCATIONS)));
+ capability = new Capability(capAndXLoc.get(HTTPHeaders.HDR_XCAPABILITY));
+
+ capTime = System.currentTimeMillis();
+
+ setReplicaNo(0);
+
+ fileId = capability.getFileId();
+ newFileSizeHdr = null;
+ filePos = 0;
+ }
+
+ public RandomAccessFile(String mode, InetSocketAddress mrcAddress, String pathName,
+ MultiSpeedy speedy, String authString) throws Exception {
+ this(mode, mrcAddress, pathName, speedy, authString, null);
+ }
+
+ public RandomAccessFile(String mode, InetSocketAddress mrcAddress, String pathName,
+ MultiSpeedy speedy) throws Exception {
+ this(mode, mrcAddress, pathName, speedy, NullAuthProvider.createAuthString(System
+ .getProperty("user.name"), MRCClient.generateStringList(System
+ .getProperty("user.name"))));
+ }
+
+ public RandomAccessFile(String mode, URL mrcURL, String pathName, MultiSpeedy speedy)
+ throws Exception {
+ this(mode, new InetSocketAddress(mrcURL.getHost(), mrcURL.getPort()), pathName, speedy);
+ }
+
+ public RandomAccessFile(String mode, String pathName, MultiSpeedy speedy) throws Exception {
+ this(mode, new URL(utils.getxattr(pathName, "xtreemfs.url")), pathName, speedy);
+ }
+
+ /**
+ *
+ * @param resultBuffer
+ * - the buffer into which the data is read.
+ * @param offset
+ * - the start offset of the data.
+ * @param bytesToRead
+ * - the maximum number of bytes read.
+ * @return - the total number of bytes read into the buffer, or -1 if there
+ * is no more data because the end of the file has been reached.
+ * @throws Exception
+ * @throws IOException
+ */
+ public int read(byte[] resultBuffer, int offset, int bytesToRead) throws Exception {
+
+ int tmp = byteMapper.read(resultBuffer, offset, bytesToRead, filePos);
+ filePos += tmp;
+ return tmp;
+ }
+
+ /**
+ *
+ * @param objectNo
+ * - relative object number.
+ * @return the number of bytes in the object
+ * @throws HttpErrorException
+ * @throws IOException
+ * @throws JSONException
+ * @throws InterruptedException
+ */
+ public int readObject(int objectNo) throws HttpErrorException, IOException, JSONException,
+ InterruptedException {
+
+ // check whether capability needs to be renewed
+ checkCap();
+
+ RPCResponse response = null;
+
+ try {
+ int current_osd_index = selectedReplicaStripingPolicy.getOSDByObject(objectNo);
+ InetSocketAddress current_osd_address = selectedReplicaOSDs.get(current_osd_index)
+ .getAddress();
+
+ response = osdClient.get(current_osd_address, locations, capability, fileId, objectNo);
+
+ String header = response.getHeaders().getHeader(HTTPHeaders.HDR_XINVALIDCHECKSUM);
+ if (header != null && header.equalsIgnoreCase("true"))
+ throw new IOException("object " + objectNo + " has an invalid checksum");
+
+ ReusableBuffer data = response.getBody();
+ if (data == null)
+ return 0;
+
+ data.flip();
+
+ return data.limit();
+ } finally {
+ if (response != null)
+ response.freeBuffers();
+ }
+ }
+
+ /**
+ *
+ * @param objectNo
+ * - relative object number.
+ * @param firstByteInObject
+ * - the first byte to be read.
+ * @param bytesInObject
+ * - the maximal number of bytes to be read.
+ * @return a ReusableBuffer containing the data which was read.
+ */
+ public ReusableBuffer readObject(long objectNo, long firstByteInObject, long bytesInObject)
+ throws IOException, JSONException, InterruptedException, HttpErrorException {
+
+ int current_osd_index = selectedReplicaStripingPolicy.getOSDByObject(objectNo);
+ InetSocketAddress current_osd_address = selectedReplicaOSDs.get(current_osd_index)
+ .getAddress();
+
+ RPCResponse response = osdClient.get(current_osd_address, locations, capability, fileId,
+ objectNo, firstByteInObject, bytesInObject - 1);
+
+ ReusableBuffer data = response.getBody();
+ if (data == null) {
+ return null;
+ }
+ data.flip();
+ return data;
+ }
+
+ /**
+ * Writes bytesToWrite bytes from the writeFromBuffer byte array starting at
+ * offset to this file.
+ *
+ * @param writeFromBuffer
+ * @param offset
+ * @param bytesToWrite
+ * @return the number of bytes written
+ * @throws Exception
+ */
+ public int write(byte[] writeFromBuffer, int offset, int bytesToWrite) throws Exception {
+
+ int tmp = byteMapper.write(writeFromBuffer, offset, bytesToWrite, filePos);
+ filePos += bytesToWrite;
+ return tmp;
+ }
+
+ /**
+ * Writes...
+ *
+ * @param firstByteInObject
+ * - the start offset in the file
+ * @param objectNo
+ * - the relative object number
+ * @param data
+ * - the data to be written.....
+ */
+ public void writeObject(long firstByteInObject, long objectNo, ReusableBuffer data)
+ throws IOException, JSONException, InterruptedException, HttpErrorException {
+
+ // check whether capability needs to be renewed
+ checkCap();
+
+ int current_osd_index = selectedReplicaStripingPolicy.getOSDByObject(objectNo);
+ InetSocketAddress current_osd_address = selectedReplicaOSDs.get(current_osd_index)
+ .getAddress();
+
+ RPCResponse response = osdClient.put(current_osd_address, locations, capability, fileId,
+ objectNo, firstByteInObject, data);
+
+ response.waitForResponse();
+ final String tmp = response.getHeaders().getHeader(HTTPHeaders.HDR_XNEWFILESIZE);
+ if (tmp != null)
+ newFileSizeHdr = tmp;
+ }
+
+ public String getStripingPolicy() {
+ return selectedReplica.getStripingPolicy().toString();
+ }
+
+ public long getStripeSize() {
+ // the stripe size of a file is constant.
+ return selectedReplicaStripingPolicy.getStripeSize(0);
+ }
+
+ public long getStripeSize(long objectNo) {
+ return selectedReplicaStripingPolicy.getStripeSize(objectNo);
+ }
+
+ public List getOSDs() {
+ return selectedReplicaOSDs;
+ }
+
+ public long length() throws Exception {
+ return (Long) mrcClient.stat(mrcAddress, pathName, false, true, false, authString).get(
+ "size");
+ }
+
+ public long noOfObjects() throws Exception {
+ return (length() / selectedReplicaStripeSize) + 1;
+ }
+
+ public ServiceUUID getOSDId(long objectNo) {
+ long osd = selectedReplicaStripingPolicy.getOSDByObject(objectNo);
+ return selectedReplicaOSDs.get((int) osd);
+ }
+
+ public Locations getLocations() {
+ return locations;
+ }
+
+ public Capability getCapability() {
+ return capability;
+ }
+
+ public String getFileId() {
+ return fileId;
+ }
+
+ public String getPath() {
+ return pathName;
+ }
+
+ public void seek(long pos) {
+ filePos = pos;
+ }
+
+ public long getFilePointer() {
+ return filePos;
+ }
+
+ public void flush() throws Exception {
+ if (newFileSizeHdr != null)
+ this.mrcClient.updateFileSize(mrcAddress, capability.toString(), newFileSizeHdr,
+ authString);
+ }
+
+ public void delete() throws Exception {
+ mrcClient.delete(mrcAddress, pathName, authString);
+ RPCResponse r = osdClient.delete(selectedReplicaOSDs.get(0).getAddress(), locations,
+ capability, fileId);
+ r.waitForResponse();
+ }
+
+ public void finalize() {
+ if (speedy == null) {
+ mrcClient.shutdown();
+ osdClient.shutdown();
+ }
+ }
+
+ private void setReplicaNo(int no) {
+
+ selectedReplica = locations.getLocation(no);
+ selectedReplicaStripingPolicy = selectedReplica.getStripingPolicy();
+ selectedReplicaOSDs = selectedReplica.getOSDs();
+ selectedReplicaStripeSize = (int) selectedReplicaStripingPolicy.getStripeSize(0);
+
+ byteMapper = ByteMapperFactory.createByteMapper(selectedReplicaStripingPolicy
+ .getPolicyName(), selectedReplicaStripeSize, this);
+ }
+
+ private void checkCap() throws IOException {
+
+ long time = System.currentTimeMillis();
+
+ if (time - capTime > (Capability.DEFAULT_VALIDITY - 60) * 1000) {
+ try {
+ capAndXLoc = mrcClient.renew(mrcAddress, capAndXLoc, authString);
+ capTime = time;
+ } catch (Exception e) {
+ throw new IOException(e);
+ }
+ }
+ }
+
+}
diff --git a/servers/src/org/xtreemfs/common/clients/mrc/MRCClient.java b/servers/src/org/xtreemfs/common/clients/mrc/MRCClient.java
new file mode 100644
index 0000000000000000000000000000000000000000..d4d72ecf5178f418685af33885124e5de2bc7345
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/clients/mrc/MRCClient.java
@@ -0,0 +1,622 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB)
+ */
+
+package org.xtreemfs.common.clients.mrc;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.xtreemfs.common.clients.RPCClient;
+import org.xtreemfs.common.clients.RPCResponse;
+import org.xtreemfs.foundation.json.JSONException;
+import org.xtreemfs.foundation.pinky.HTTPHeaders;
+import org.xtreemfs.foundation.pinky.SSLOptions;
+import org.xtreemfs.foundation.speedy.MultiSpeedy;
+
+/**
+ * A client for the MRC. Can be used as a generic client for all JSON-WP34-RPC
+ * calls. Supports sync and async RPCs.
+ *
+ * @author bjko
+ */
+public class MRCClient extends RPCClient {
+
+ /**
+ * Creates a new instance of MRCClient
+ *
+ * @param debug
+ * if true speedy will generate debug messages
+ * @throws java.io.IOException
+ */
+ public MRCClient(MultiSpeedy sharedSpeedy) throws IOException {
+ super(sharedSpeedy);
+ }
+
+ public MRCClient(MultiSpeedy sharedSpeedy, int timeout) throws IOException {
+ super(sharedSpeedy, timeout);
+ }
+
+ public MRCClient() throws IOException {
+ this(null);
+ }
+
+ public MRCClient(int timeout, SSLOptions sslOptions) throws IOException {
+ super(timeout, sslOptions);
+ }
+
+ public void setACLEntries(InetSocketAddress server, String path,
+ Map entries, String authString) throws Exception {
+
+ RPCResponse r = null;
+ try {
+ r = sendRPC(server, "setACLEntries", RPCClient.generateList(path,
+ entries), authString, null);
+ r.waitForResponse();
+ } finally {
+ r.freeBuffers();
+ }
+ }
+
+ public void setXAttrs(InetSocketAddress server, String path,
+ Map attrs, String authString) throws Exception {
+
+ RPCResponse r = null;
+ try {
+ r = sendRPC(server, "setXAttrs", RPCClient
+ .generateList(path, attrs), authString, null);
+ r.waitForResponse();
+ } finally {
+ r.freeBuffers();
+ }
+ }
+
+ public void addReplica(InetSocketAddress server, String fileId,
+ Map stripingPolicy, List osdList,
+ String authString) throws Exception {
+
+ RPCResponse r = null;
+ try {
+ r = sendRPC(server, "addReplica", RPCClient.generateList(fileId,
+ stripingPolicy, osdList), authString, null);
+ r.waitForResponse();
+ } finally {
+ r.freeBuffers();
+ }
+ }
+
+ public void removeReplica(InetSocketAddress server, String fileId,
+ Map stripingPolicy, List osdList,
+ String authString) throws Exception {
+
+ RPCResponse r = null;
+ try {
+ r = sendRPC(server, "removeReplica", RPCClient.generateList(fileId,
+ stripingPolicy, osdList), authString, null);
+ r.waitForResponse();
+ } finally {
+ r.freeBuffers();
+ }
+ }
+
+ public void changeAccessMode(InetSocketAddress server, String path,
+ long mode, String authString) throws Exception {
+
+ RPCResponse r = null;
+ try {
+ r = sendRPC(server, "changeAccessMode", RPCClient.generateList(
+ path, mode), authString, null);
+ r.waitForResponse();
+ } finally {
+ r.freeBuffers();
+ }
+ }
+
+ public void changeOwner(InetSocketAddress server, String path,
+ String userId, String groupId, String authString) throws Exception {
+
+ RPCResponse r = null;
+ try {
+ r = sendRPC(server, "changeOwner", RPCClient.generateList(path,
+ userId, groupId), authString, null);
+ r.waitForResponse();
+ } finally {
+ r.freeBuffers();
+ }
+ }
+
+ public boolean checkAccess(InetSocketAddress server, String path,
+ String mode, String authString) throws Exception {
+
+ RPCResponse r = null;
+ try {
+ r = sendRPC(server, "checkAccess", RPCClient.generateList(path,
+ mode), authString, null);
+ return (Boolean) r.get();
+ } finally {
+ r.freeBuffers();
+ }
+ }
+
+ /**
+ * The MRC makes an analyze stringPattern for the given list of fileIDs.
+ * example result: {001001111...}
+ * 1 means that file does exist, 0 if not and one single 2 will be returned,
+ * if the whole volume does not exist.
+ *
+ * @param server
+ * @param volumeID
+ * @param data
+ *
+ * @return MRC Response
+ *
+ * @throws JSONException
+ * @throws IOException
+ */
+ public RPCResponse checkFileList(InetSocketAddress server, String volumeID,
+ List fileList, String authString) throws IOException, JSONException {
+
+ RPCResponse r = sendRPC(server, "checkFileList",
+ RPCClient.generateList(volumeID, fileList),
+ authString, null);
+ return r;
+ }
+
+ public void createDir(InetSocketAddress server, String dirPath,
+ String authString) throws Exception {
+
+ RPCResponse r = null;
+ try {
+ r = sendRPC(server, "createDir", RPCClient.generateList(dirPath),
+ authString, null);
+ r.waitForResponse();
+ } finally {
+ r.freeBuffers();
+ }
+ }
+
+ public void createDir(InetSocketAddress server, String dirPath,
+ Map attrs, long accessMode, String authString)
+ throws Exception {
+
+ RPCResponse r = null;
+ try {
+ r = sendRPC(server, "createDir", RPCClient.generateList(dirPath,
+ attrs, accessMode), authString, null);
+ r.waitForResponse();
+ } finally {
+ r.freeBuffers();
+ }
+ }
+
+ public void createFile(InetSocketAddress server, String filePath,
+ String authString) throws Exception {
+
+ RPCResponse r = null;
+ try {
+ r = sendRPC(server, "createFile", RPCClient.generateList(filePath),
+ authString, null);
+ r.waitForResponse();
+ } finally {
+ r.freeBuffers();
+ }
+ }
+
+ public void createFile(InetSocketAddress server, String filePath,
+ Map attrs, Map stripingPolicy,
+ long accessMode, String authString) throws Exception {
+
+ RPCResponse r = null;
+ try {
+ r = sendRPC(server, "createFile", RPCClient.generateList(filePath,
+ attrs, stripingPolicy, accessMode), authString, null);
+ r.waitForResponse();
+ } finally {
+ r.freeBuffers();
+ }
+ }
+
+ public Map createFile(InetSocketAddress server,
+ String filePath, Map attrs,
+ Map stripingPolicy, long accessMode, boolean open,
+ String authString) throws Exception {
+
+ RPCResponse r = null;
+ try {
+ r = sendRPC(server, "createFile", RPCClient.generateList(filePath,
+ attrs, stripingPolicy, accessMode, open), authString, null);
+
+ return toXCapMap(r.getHeaders());
+ } finally {
+ r.freeBuffers();
+ }
+ }
+
+ /**
+ * Restore the MetaData for the given fileID.
+ *
+ * @param server
+ * @param filePath
+ * @param fileID
+ * @param fileSize
+ * @param xAttrs
+ * @param authString
+ * @param osd
+ * @param objectSize
+ * @throws Exception
+ */
+ public void restoreFile(InetSocketAddress server, String filePath, long fileID, long fileSize, Map xAttrs,
+ String authString,String osd, long objectSize, String volumeID)
+ throws Exception {
+
+ RPCResponse r = null;
+ try {
+ r = sendRPC(server, "restoreFile", RPCClient.generateList(filePath,
+ fileID, fileSize, xAttrs, osd, objectSize, volumeID), authString, null);
+ r.waitForResponse();
+ } finally {
+ r.freeBuffers();
+ }
+ }
+
+ public void createLink(InetSocketAddress server, String linkPath,
+ String targetPath, String authString) throws Exception {
+
+ RPCResponse r = null;
+ try {
+ r = sendRPC(server, "createLink", RPCClient.generateList(linkPath,
+ targetPath), authString, null);
+ r.waitForResponse();
+ } finally {
+ r.freeBuffers();
+ }
+ }
+
+ public void createSymbolicLink(InetSocketAddress server, String linkPath,
+ String targetPath, String authString) throws Exception {
+
+ RPCResponse r = null;
+ try {
+ r = sendRPC(server, "createSymbolicLink", RPCClient.generateList(
+ linkPath, targetPath), authString, null);
+ r.waitForResponse();
+ } finally {
+ r.freeBuffers();
+ }
+ }
+
+ public void createVolume(InetSocketAddress server, String volumeName,
+ String authString) throws Exception {
+
+ RPCResponse r = null;
+ try {
+ r = sendRPC(server, "createVolume", RPCClient
+ .generateList(volumeName), authString, null);
+ r.waitForResponse();
+ } finally {
+ r.freeBuffers();
+ }
+ }
+
+ public void createVolume(InetSocketAddress server, String volumeName,
+ long osdSelectionPolicyId, Map stripingPolicy,
+ long acPolicyId, long partitioningPolicyId, Map acl,
+ String authString) throws Exception {
+
+ RPCResponse r = null;
+ try {
+ r = sendRPC(server, "createVolume", RPCClient.generateList(
+ volumeName, osdSelectionPolicyId, stripingPolicy, acPolicyId,
+ partitioningPolicyId, acl), authString, null);
+ r.waitForResponse();
+ } finally {
+ r.freeBuffers();
+ }
+ }
+
+ public void delete(InetSocketAddress server, String path, String authString)
+ throws Exception {
+
+ RPCResponse r = null;
+ try {
+ r = sendRPC(server, "delete", RPCClient.generateList(path),
+ authString, null);
+ r.waitForResponse();
+ } finally {
+ r.freeBuffers();
+ }
+ }
+
+ public void deleteVolume(InetSocketAddress server, String name,
+ String authString) throws Exception {
+
+ RPCResponse r = null;
+ try {
+ r = sendRPC(server, "deleteVolume", RPCClient.generateList(name),
+ authString, null);
+ r.waitForResponse();
+ } finally {
+ r.freeBuffers();
+ }
+ }
+
+ public Map getLocalVolumes(InetSocketAddress server,
+ String authString) throws Exception {
+
+ RPCResponse> r = null;
+ try {
+ r = sendRPC(server, "getLocalVolumes", RPCClient.generateList(),
+ authString, null);
+ return r.get();
+ } finally {
+ r.freeBuffers();
+ }
+ }
+
+ public Map getServerConfiguration(InetSocketAddress server,
+ String authString) throws Exception {
+
+ RPCResponse> r = null;
+ try {
+ r = sendRPC(server, "getServerConfiguration", RPCClient.generateList(),
+ authString, null);
+ return r.get();
+ } finally {
+ r.freeBuffers();
+ }
+ }
+
+ public void initFileSystem(InetSocketAddress server, String authString)
+ throws Exception {
+
+ RPCResponse r = null;
+ try {
+ r = sendRPC(server, "initFileSystem", RPCClient.generateList(),
+ authString, null);
+ r.waitForResponse();
+ } finally {
+ r.freeBuffers();
+ }
+ }
+
+ public Map move(InetSocketAddress server,
+ String sourcePath, String targetPath, String authString)
+ throws Exception {
+
+ RPCResponse> r = null;
+ try {
+ r = sendRPC(server, "move", RPCClient.generateList(sourcePath,
+ targetPath), authString, null);
+ return toXCapMap(r.getHeaders());
+ } finally {
+ r.freeBuffers();
+ }
+ }
+
+ public Map open(InetSocketAddress server, String path,
+ String accessMode, String authString) throws Exception {
+
+ RPCResponse> r = null;
+ try {
+ r = sendRPC(server, "open", RPCClient
+ .generateList(path, accessMode), authString, null);
+ return toXCapMap(r.getHeaders());
+ } finally {
+ r.freeBuffers();
+ }
+ }
+
+ public List query(InetSocketAddress server, String path,
+ String queryString, String authString) throws Exception {
+
+ RPCResponse> r = null;
+ try {
+ r = sendRPC(server, "query", RPCClient.generateList(path,
+ queryString), authString, null);
+ return r.get();
+ } finally {
+ r.freeBuffers();
+ }
+ }
+
+ public List readDir(InetSocketAddress server, String path,
+ String authString) throws Exception {
+
+ RPCResponse> r = null;
+ try {
+ r = sendRPC(server, "readDir", RPCClient.generateList(path),
+ authString, null);
+ return r.get();
+ } finally {
+ r.freeBuffers();
+ }
+ }
+
+ public Map> readDirAndStat(
+ InetSocketAddress server, String path, String authString)
+ throws Exception {
+
+ RPCResponse>> r = null;
+ try {
+ r = sendRPC(server, "readDirAndStat", RPCClient.generateList(path),
+ authString, null);
+ return r.get();
+ } finally {
+ r.freeBuffers();
+ }
+ }
+
+ public void removeACLEntries(InetSocketAddress server, String path,
+ List entities, String authString) throws Exception {
+
+ RPCResponse r = null;
+ try {
+ r = sendRPC(server, "removeACLEntries", RPCClient.generateList(
+ path, entities), authString, null);
+ r.waitForResponse();
+ } finally {
+ r.freeBuffers();
+ }
+ }
+
+ public void removeXAttrs(InetSocketAddress server, String path,
+ List attrKeys, String authString) throws Exception {
+
+ RPCResponse r = null;
+ try {
+ r = sendRPC(server, "removeXAttrs", RPCClient.generateList(path,
+ attrKeys), authString, null);
+ r.waitForResponse();
+ } finally {
+ r.freeBuffers();
+ }
+ }
+
+ public Map renew(InetSocketAddress server,
+ Map capability, String authString) throws Exception {
+
+ RPCResponse> r = null;
+ try {
+ r = sendRPC(server, "renew", RPCClient.generateList(), authString,
+ toHTTPHeaders(capability));
+ return toXCapMap(r.getHeaders());
+ } finally {
+ r.freeBuffers();
+ }
+ }
+
+ public void updateFileSize(InetSocketAddress server, String capability,
+ String newFileSizeHeader, String authString) throws Exception {
+
+ Map headers = new HashMap();
+ headers.put(HTTPHeaders.HDR_XCAPABILITY, capability);
+ headers.put(HTTPHeaders.HDR_XNEWFILESIZE, newFileSizeHeader);
+
+ RPCResponse r = null;
+ try {
+ r = sendRPC(server, "updateFileSize", RPCClient.generateList(),
+ authString, toHTTPHeaders(headers));
+ r.waitForResponse();
+ } finally {
+ r.freeBuffers();
+ }
+ }
+
+ public Map stat(InetSocketAddress server, String path,
+ boolean inclReplicas, boolean inclXAttrs, boolean inclACLs,
+ String authString) throws Exception {
+
+ RPCResponse> r = null;
+ try {
+ r = sendRPC(server, "stat", RPCClient.generateList(path,
+ inclReplicas, inclXAttrs, inclACLs), authString, null);
+ return r.get();
+ } finally {
+ if (r != null)
+ r.freeBuffers();
+ }
+ }
+
+ public String getXAttr(InetSocketAddress server, String path, String key,
+ String authString) throws Exception {
+
+ RPCResponse r = null;
+ try {
+ r = sendRPC(server, "getXAttr", RPCClient.generateList(path, key),
+ authString, null);
+ return r.get();
+ } finally {
+ if (r != null)
+ r.freeBuffers();
+ }
+ }
+
+ public void setDefaultStripingPolicy(InetSocketAddress server, String path,
+ Map stripingPolicy, String authString) throws Exception {
+
+ RPCResponse r = null;
+ try {
+ r = sendRPC(server, "setDefaultStripingPolicy", RPCClient
+ .generateList(path, stripingPolicy), authString, null);
+ r.waitForResponse();
+ } finally {
+ r.freeBuffers();
+ }
+ }
+
+ public Map getDefaultStripingPolicy(
+ InetSocketAddress server, String path, String authString)
+ throws Exception {
+
+ RPCResponse> r = null;
+ try {
+ r = sendRPC(server, "getDefaultStripingPolicy", RPCClient
+ .generateList(path), authString, null);
+ return r.get();
+ } finally {
+ if (r != null)
+ r.freeBuffers();
+ }
+ }
+
+ public long getProtocolVersion(InetSocketAddress server, List versions,
+ String authString) throws Exception {
+
+ RPCResponse r = null;
+ try {
+ r = sendRPC(server, "getDefaultStripingPolicy", RPCClient
+ .generateList(versions), authString, null);
+ return r.get();
+ } finally {
+ if (r != null)
+ r.freeBuffers();
+ }
+ }
+
+ private static HTTPHeaders toHTTPHeaders(Map hdrs) {
+
+ HTTPHeaders headers = new HTTPHeaders();
+ for (String key : hdrs.keySet())
+ headers.addHeader(key, hdrs.get(key));
+
+ return headers;
+ }
+
+ private static Map toXCapMap(HTTPHeaders hdrs) {
+
+ Map map = new HashMap();
+
+ if (hdrs.getHeader(HTTPHeaders.HDR_XCAPABILITY) != null)
+ map.put(HTTPHeaders.HDR_XCAPABILITY, hdrs
+ .getHeader(HTTPHeaders.HDR_XCAPABILITY));
+ if (hdrs.getHeader(HTTPHeaders.HDR_XLOCATIONS) != null)
+ map.put(HTTPHeaders.HDR_XLOCATIONS, hdrs
+ .getHeader(HTTPHeaders.HDR_XLOCATIONS));
+
+ return map;
+ }
+
+}
diff --git a/servers/src/org/xtreemfs/common/clients/mrc/MRCClientInterface.java b/servers/src/org/xtreemfs/common/clients/mrc/MRCClientInterface.java
new file mode 100644
index 0000000000000000000000000000000000000000..6366feac77f48b29783f72222b48f064b3247b7e
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/clients/mrc/MRCClientInterface.java
@@ -0,0 +1,718 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+*/
+/*
+ * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB)
+ */
+
+package org.xtreemfs.common.clients.mrc;
+
+import java.util.List;
+import java.util.Map;
+
+import org.xtreemfs.mrc.MRCRequest;
+import org.xtreemfs.mrc.brain.BrainException;
+import org.xtreemfs.mrc.brain.UserException;
+
+/**
+ * The interface to the MRC backend.
+ *
+ * @author stender, bjko
+ *
+ */
+public interface MRCClientInterface {
+
+ /**
+ * Sets up a new file system. The following steps are taken:
+ *
+ * all local volumes are deregistered from the directory service
+ * the database containing local metadata is stopped and all contents
+ * are deleted
+ * an empty database for local metadata is started, representing an
+ * empty file system
+ *
+ *
+ * @throws BrainException
+ * if the deregistration at the Directory Service of existing
+ * volumes failed or if an error occured in the storage backend
+ */
+ public void initFileSystem() throws BrainException;
+
+ /**
+ * Locally creates a new volume with the default OSD, striping and file
+ * access policy without an ACL and registers the volume at the Directory
+ * Service. This method is equivalent to
+ * createVolume(volumeName, null, userId, SimpleSelectionPolicy.POLICY_ID).
+ *
+ * @param volumeName
+ * the name for the new volume
+ * @param userId
+ * the user id
+ * @throws UserException
+ * if the volume already exists
+ * @throws BrainException
+ * if an error occured in the storage backend
+ * @see #createVolume(String, Map, long, long, long, long)
+ */
+ public void createVolume(String volumeName, long userId)
+ throws BrainException, UserException;
+
+ /**
+ * Locally creates a new volume and registers the volume at the Directory
+ * Service. The ACL is provided as described in
+ * setVolumeACL(...).
+ *
+ *
+ * @param volumeName
+ * the name for the new volume
+ * @param volumeACL
+ * the ACL for the volume
+ * @param userId
+ * the user id
+ * @param osdPolicyId
+ * the id of the OSD policy to use with this volume
+ * @param stripingPolicyId
+ * the id of the default striping policy used for files stored in
+ * the volume
+ * @param fileAccessPolicyId
+ * the id of the access policy used for files in the volume
+ * @throws UserException
+ * if the volume already exists
+ * @throws BrainException
+ * if an error occured in the storage backend
+ * @see #setVolumeACL(String, Map)
+ */
+ public void createVolume(MRCRequest request, String volumeName,
+ long osdPolicyId, long stripingPolicyId, long fileAccessPolicyId,
+ long uid, Map volumeACL) throws BrainException,
+ UserException;
+
+ /**
+ * Sets an ACL for the volume with the given name. The ACL is provided
+ * as an access control list of the form {user:long=rights:long, user2=...,
+ * ...}.
+ *
+ *
+ * rights: the rights which the user has on the file.
+ * rights & 1 checks for read access and
+ * rights & 2 checks for write access.
+ * rights & 4 checks for execution access.
+ *
+ *
+ * @param volumeName
+ * the name of the volume
+ * @param volumeACL
+ * the ACL
+ * @throws UserException
+ * if the volume is invalid or the local MRC is not responsible
+ * for the volume
+ * @throws BrainException
+ * if an error occured in the storage backend
+ *
+ * @see #setVolumeACL(String, Map)
+ */
+ public void setVolumeACL(String volumeName, Map volumeACL)
+ throws BrainException, UserException;
+
+ /**
+ * Returns the ACL of the volume with the given name. The ACL is
+ * provided as described in {@link #setVolumeACL(String, Map)}.
+ *
+ * @param volumeName
+ * the name of the volume
+ * @return the ACL of the volume
+ * @throws UserException
+ * if the volume is invalid or the local MRC is not responsible
+ * for the volume
+ * @throws BrainException
+ * if an error occured in the storage backend
+ *
+ * @see #setVolumeACL(String, Map)
+ */
+ public Map getVolumeACL(String volumeName)
+ throws BrainException, UserException;
+
+ /**
+ * Deletes an existing volume held by the local MRC. All associated
+ * directories and files are removed as well.
+ *
+ * @param name
+ * the name of the volume to remove
+ * @throws UserException
+ * if the volume is invalid or the local MRC is not responsible
+ * for the volume
+ * @throws BrainException
+ * if an error occured in the storage backend
+ */
+ public void deleteVolume(String name) throws BrainException, UserException;
+
+ /**
+ * Creates a new file without user attributes and striping policy. This
+ * method is equivalent to createFile(path, null, userId).
+ *
+ * @param path
+ * the path to the file
+ * @param userId
+ * the id of the user on behalf of whom the file is created
+ * @throws UserException
+ * if the path is invalid or the local MRC is not responsible
+ * for the path
+ * @throws BrainException
+ * if an error occured in the storage backend
+ * @see #createFile(String, Map, long, long)
+ */
+ public void createFile(String path, long userId) throws BrainException,
+ UserException;
+
+ /**
+ * Creates a new file.
+ *
+ * @param path
+ * the path to the file
+ * @param attrs
+ * a map containing the file attributes as (key/value) pairs
+ * @param stripingPolicyId
+ * the id of the striping policy used with this file. If
+ * 0 is specified, the volume striping policy will
+ * be used.
+ * @param userId
+ * the id of the user on behalf of whom the file is created
+ * @throws UserException
+ * if the path is invalid or the local MRC is not responsible
+ * for the path
+ * @throws BrainException
+ * if an error occured in the storage backend
+ */
+ public void createFile(String path, Map attrs,
+ long stripingPolicyId, long userId, Map acl)
+ throws BrainException, UserException;
+
+ /**
+ * Adds a user attribute to an existing file. If the attribute already
+ * exists for the given user, it will be overwritten.
+ *
+ * @param path
+ * the path to the file
+ * @param key
+ * the attribute key
+ * @param value
+ * the attribute value
+ * @param userId
+ * the user id associated with the attribute. If 0
+ * is provided, the attribute will be regarded as global, i.e. it
+ * will be visible to any user.
+ * @throws UserException
+ * if the path is invalid or the local MRC is not responsible
+ * for the path
+ * @throws BrainException
+ * if an error occured in the storage backend
+ */
+ public void addUserAttribute(String path, String key, String value,
+ long userId) throws BrainException, UserException;
+
+ /**
+ * Adds multiple user attributes to an existing file. If the attribute
+ * already exists for the given user, it will be overwritten.
+ *
+ * @param path
+ * the path to the file
+ * @param attrs
+ * a map containing the file attributes as (key/value) pairs
+ * @param userId
+ * the user id associated with the attributes. If 0
+ * is provided, the attributes will be regarded as global, i.e.
+ * they will be visible to any user.
+ * @throws UserException
+ * if the path is invalid or the local MRC is not responsible
+ * for the path
+ * @throws BrainException
+ * if an error occured in the storage backend
+ */
+ public void addUserAttributes(String path, Map attrs,
+ long userId) throws BrainException, UserException;
+
+ /**
+ * Assigns a new replica to an existing file. Each replica of a file
+ * represents the entire file content. Since different replicas may be
+ * striped over multiple OSDs in different ways, each replica is described
+ * by a string containing striping information. The striping information
+ * string will only be stored but not evaluated by the MRC.
+ *
+ * @param globalFileId
+ * the global ID of the file in the form of "volumeId":"fileId"
+ * @param stripingInfo
+ * an opaque string containing striping information about the
+ * replica
+ * @throws UserException
+ * if the path is invalid or the local MRC is not responsible
+ * for the path
+ * @throws BrainException
+ * if an error occured in the storage backend if an error
+ * occured in t
+ */
+ public void addReplica(String globalFileId, String stripingInfo)
+ throws BrainException, UserException;
+
+ /**
+ * Removes a user attribute from an existing file.
+ *
+ * @param path
+ * the path to the file
+ * @param key
+ * the key of the attribute
+ * @param userId
+ * the id of the user who defined the attribute, or
+ * 0 for a global attribute
+ * @throws UserException
+ * if the path is invalid or the local MRC is not responsible
+ * for the path
+ * @throws BrainException
+ * if an error occured in the storage backend
+ */
+ public void removeUserAttribute(String path, String key, long userId)
+ throws BrainException, UserException;
+
+ /**
+ * Removes multiple user attributes from an existing file.
+ *
+ * @param path
+ * the path to the file
+ * @param attrKeys
+ * a list containing all keys of the attribute
+ * @param userId
+ * the id of the user who defined the attributes, or
+ * 0 for a global attribute
+ * @throws UserException
+ * if the path is invalid or the local MRC is not responsible
+ * for the path
+ * @throws BrainException
+ * if an error occured in the storage backend
+ */
+ public void removeUserAttributes(String path, List attrKeys,
+ long userId) throws BrainException, UserException;
+
+ /**
+ * Returns a map containing all user-defined attribute/value pairs of a
+ * file. In case of a directory, null will be returned.
+ *
+ * @param path
+ * the path to the file
+ * @param userId
+ * the user id associated with the attributes
+ * @return a map containing the attributes
+ *
+ * @throws UserException
+ * if the path is invalid or the local MRC is not responsible
+ * for the path
+ * @throws BrainException
+ * if an error occured in the storage backend
+ */
+ public Map getUserAttributes(String path, long userId)
+ throws BrainException, UserException;
+
+ /**
+ * Deletes a file or directory including all user attributes. In case of a
+ * directory, the directory is required to be empty, i.e. it must neither
+ * contain files nor subdirectories.
+ *
+ * @param path
+ * the path to the file
+ * @throws UserException
+ * if the path is invalid or the local MRC is not responsible
+ * for the path
+ * @throws BrainException
+ * if an error occured in the storage backend
+ */
+ public void delete(String path) throws BrainException, UserException;
+
+ /**
+ * Creates a new directory without user attributes. The operation will fail
+ * unless the first n-1 of n components in path refer to an
+ * existing directory. This method is equivalent to
+ * createDir(path, null, userId).
+ *
+ * @param path
+ * complete path including the volume name
+ * @param userId
+ * the id of the user on behalf of whom the directory is created
+ * @throws UserException
+ * if the parent path does not exist or the local MRC is not
+ * responsible for the path
+ * @throws BrainException
+ * if an error occured in the storage backend
+ */
+ public void createDir(String path, long userId) throws BrainException,
+ UserException;
+
+ /**
+ * Creates a new directory with user attributes. The operation will fail
+ * unless the first n-1 of n components in path refer to an
+ * existing directory.
+ *
+ * @param path
+ * complete path including the volume name
+ * @param attrs
+ * a map containing the directory attributes as (key/value) pairs
+ * @param userId
+ * the id of the user on behalf of whom the directory is created
+ * @throws UserException
+ * if the parent path does not exist or the local MRC is not
+ * responsible for the path
+ * @throws BrainException
+ * if an error occured in the storage backend
+ */
+ public void createDir(String path, Map attrs, long userId)
+ throws BrainException, UserException;
+
+ /**
+ * Lists the contents of a directory. Note that no guarantees are given
+ * about the order in which elements are listed.
+ *
+ * @param path
+ * the complete path including the volume
+ * @return a list of strings of the subdirectorie and files in the directory
+ * @throws UserException
+ * if the path is invalid or the local MRC is not responsible
+ * for the path
+ * @throws BrainException
+ * if an error occured in the storage backend
+ */
+ public List readDir(String path) throws BrainException,
+ UserException;
+
+ /**
+ * Returns the result of a 'readdir' combined with a 'stat' for each
+ * directory entry. It is returned in the form of a map which maps the entry
+ * names to maps containing the stat infos as provided by the 'stat' method.
+ *
+ * @param path
+ * the directory of which the contents are returned
+ * @param userId
+ * the id of the user on behalf of whom the stat is returned.
+ * This is necessary in order to properly translate the POSIX
+ * access rights.
+ * @return a list of stats for the directory contents
+ * @throws UserException
+ * if the path is invalid or the local MRC is not responsible
+ * for the path
+ * @throws BrainException
+ * if an error occured in the storage backend
+ */
+ public Map> readDirAndStat(String path,
+ long userId) throws BrainException, UserException;
+
+ /**
+ * Returns information about a single file or directory. The data returned
+ * has the following shape:
+ *
+ * {volId=long, sliceId=long, fileId=long, type=int, userId=long, size=long,
+ * atime=long, mtime=long, ctime=long, posixAccessMode=int}.
+ *
+ *
+ * volId: the id of the volume holding the file or
+ * directory
+ * sliceId: the id of the slice holding the file or
+ * directory
+ * fileId: the id of the file or directory
+ * type: an integer between 0 and 2 describing the type
+ * (0=directory, 1=file, 2=symlink)
+ * userId: the user id of the file owner
+ * size: the file size
+ * atime: the access timestamp
+ * mtime: the modification timestamp
+ * ctime: the change timestamp
+ * posixAccessMode: the posix access rights (rwx) for
+ * the owner, the VO and the rest
+ *
+ *
+ * @param path
+ * the path of the file in the file system
+ * @param userId
+ * the id of the user on behalf of whom the stat is returned.
+ * This is necessary in order to properly translate the POSIX
+ * access rights.
+ * @throws UserException
+ * if the path is invalid or the local MRC is not responsible
+ * for the path
+ * @throws BrainException
+ * if an error occured in the storage backend
+ * @return the stats for the file
+ */
+ public Map stat(String path, long userId)
+ throws BrainException, UserException;
+
+ /**
+ * Creates a symbolic link to the given target. The link itself
+ * behaves like an independent file with its own metadata. When file
+ * contents are read, however, the read request will be redirected to the
+ * given target path. No guarantees are given that the target path is valid,
+ * nor will the softlink be updated when the referenced file is moved or
+ * renamed.
+ *
+ * @param linkPath
+ * the path for the link itself
+ * @param targetPath
+ * the path to the link's target
+ * @param userId
+ * the id of the user on behalf of whom the file is created
+ * @throws UserException
+ * if the link path is invalid or the local MRC is not
+ * responsible for the path
+ * @throws BrainException
+ * if an error occured in the storage backend
+ */
+ public void createSymLink(String linkPath, String targetPath, long userId)
+ throws BrainException, UserException;
+
+ /**
+ * Returns the path to which the symbolic link referenced by the given path
+ * points to.
+ *
+ * @param path
+ * the path to the symbolic link
+ * @return the path which the symbolic link points to
+ * @throws UserException
+ * if the path does not point to a symbolic link or the local
+ * MRC is not responsible for the path
+ * @throws BrainException
+ * if an error occured in the storage backend
+ */
+ public String getSymlinkTarget(String path) throws BrainException,
+ UserException;
+
+ /**
+ * Opens an existing file.
+ *
+ *
+ * If the capability is successfully issued, a map of the following form
+ * will be returned:
+ *
+ * result = {storageLocs:StorageLocList, stripingPolicy:long,
+ * capablity:string}. storageLocs = [feasibleHost_1:string, ... ,
+ * feasibleHost_n:string]
+ *
+ *
+ * storageLocs: a list of strings 'hostname:port'
+ * describing the locations of feasible OSDs
+ * stripingPolicy: the id of the striping policy used
+ * with the given path
+ * capability: the string containing the encrypted
+ * capability
+ *
+ *
+ *
+ * In case the capability could not be issued, null is
+ * returned.
+ *
+ * @param path
+ * the path for which to generate the capability
+ * @param accessMode
+ * the access for the file/directory. Possible attributes are
+ * "rwx".
+ * @param userId
+ * the id of the user on behalf of whom the capability is issued
+ * @return a map of the form described above
+ *
+ * @throws UserException
+ * if the path is invalid or the local MRC is not responsible
+ * for the path
+ * @throws BrainException
+ * if an error occured in the storage backend
+ */
+ public Map open(String path, String accessMode, long userId)
+ throws BrainException, UserException;
+
+ /**
+ * Checks whether the given path refers to a directory.
+ *
+ * @param path
+ * the path
+ * @return true if the path refers to a directory,
+ * false otherwise
+ * @throws UserException
+ * if the path is invalid or the local MRC is not responsible
+ * for the path
+ * @throws BrainException
+ * if an error occured in the storage backend
+ */
+ public boolean isDir(String path) throws BrainException, UserException;
+
+ /**
+ * Checks whether the given path refers to a symbolic link.
+ *
+ * @param path
+ * the path
+ * @return true if the path refers to a symbolic link,
+ * false otherwise
+ * @throws UserException
+ * if the path is invalid or the local MRC is not responsible
+ * for the path
+ * @throws BrainException
+ * if an error occured in the storage backend
+ */
+ public boolean isSymLink(String path) throws BrainException, UserException;
+
+ /**
+ * Moves the file or directory referenced by the source path to the given
+ * target path.
+ *
+ * The behavior of this method depends on what both paths are pointing to.
+ * The source path must point to a valid file or directory which is managed
+ * by the local MRC.
+ *
+ * The behavior is a follows:
+ *
+ * source points to a file:
+ *
+ *
+ * target is a file or does not exist: the source file will be moved to
+ * the target's parent directory where the old file (if exists) is removed
+ * target is a directory: the source file will be moved to the target
+ * directory
+ *
+ * source points to a directory:
+ *
+ *
+ * target is a file: an exception is thrown
+ * target is a directory: the source directory tree will be moved to
+ * the target directory
+ * target does not exist: the source directory will be moved to the
+ * target's parent directory and renamed
+ *
+ *
+ *
+ *
+ * @param sourcePath
+ * the path pointing to the source file or directory
+ * @param targetPath
+ * the path pointing to the target file or directory
+ * @throws UserException
+ * if the source or target path is invalid or the local MRC is
+ * not responsible for the source path
+ * @throws BrainException
+ * if an error occured in the storage backend
+ */
+ public void move(String sourcePath, String targetPath)
+ throws BrainException, UserException;
+
+ /**
+ * Submits a query. A list of files matching the given query string is
+ * returned in the form of path names.
+ *
+ * @param path
+ * the path from which the query is executed. Query results will
+ * be restricted to paths that are contained by the given path.
+ * @param queryString
+ * a string representing the query
+ * @param userId
+ * the id of the user on behalf of whom the query is executed
+ * @return if the path or query string is invalid or the local MRC is not
+ * responsible for the path or an I/O error occured
+ * @throws UserException
+ * if the query is invalid
+ * @throws BrainException
+ * if an error occured in the storage backend
+ */
+ public List query(String path, String queryString, long userId)
+ throws BrainException, UserException;
+
+ /**
+ * Sets the size of a file.
+ *
+ * @param path
+ * the path of the file
+ * @param fileSize
+ * the new size of the file
+ * @throws UserException
+ * if the path is invalid or the local MRC is not responsible
+ * for the path
+ * @throws BrainException
+ * if an error occured in the storage backend
+ */
+ public void updateFileSize(String path, long fileSize)
+ throws BrainException, UserException;
+
+ /**
+ * Terminates the Brain instance. All connections to remote hosts will be
+ * closed, and unconfirmed writes will be flushed to disk.
+ *
+ * @throws BrainException
+ * if an error occured in the storage backend
+ */
+ public void shutdown() throws BrainException;
+
+ /**
+ * Returns the list containing striping information about replicas for the
+ * file with the given path.
+ *
+ * @param globalFileId
+ * the global ID of the file in the form of "volumeId":"fileId"
+ * @return a list of strings containing striping information
+ * @throws UserException
+ * if the path is invalid or the local MRC is not responsible
+ * for the path
+ * @throws BrainException
+ * if an error occured in the storage backend
+ */
+ public List getReplicas(String globalFileId) throws BrainException,
+ UserException;
+
+ /**
+ * Locally creates a file tree from the given tree data. The tree is
+ * inserted at the given target path. The purpose of this method is to
+ * allow remote MRC instances to transfer directory trees to the local file
+ * system, e.g.\ in connection with a 'move' operation.
+ *
+ * The tree data is provided as follows:
+ *
+ * treeData:TreeData = [fileData:FileData, attrs:AttributeList,
+ * osdData:OSDEndpointList, stripingPolicyId: long, ref:string,
+ * subElements:TreeData] FileData = {name:string, atime:long,
+ * ctime:long, mtime:long, size:long, userId:long, isDirectory:boolean}
+ * AttributeList = [{key:string, value:string, type:long,
+ * userId:long}, {...}] OSDEndpointList = [endpoint1:string,
+ * ...]
+ *
+ * @param treeData
+ * the data representing the subtree to add
+ * @param targetPath
+ * the path where to add the subtree
+ * @throws BrainException
+ * if an error occured in the storage backend
+ * @throws UserException
+ * if the path is invalid or the local MRC is not responsible
+ * for the path
+ */
+ public void createFileTree(List treeData, String targetPath)
+ throws BrainException, UserException;
+
+ // -- MONITORING ROUTINES
+
+ public Map getPerVolumeOSDs();
+
+ /**
+ * Returns a map of volumes held by the local MRCs. The result is returned
+ * in the form of a mapping from volume ids to volume names.
+ *
+ * @return a map volumeId -> volumeName of all volumes on the local server
+ */
+ public Map getLocalVolumes() throws Exception;
+
+}
diff --git a/servers/src/org/xtreemfs/common/clients/mrc/XtreemFile.java b/servers/src/org/xtreemfs/common/clients/mrc/XtreemFile.java
new file mode 100644
index 0000000000000000000000000000000000000000..47084836d777a9a172b966025bd189042b200273
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/clients/mrc/XtreemFile.java
@@ -0,0 +1,441 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB)
+ */
+
+package org.xtreemfs.common.clients.mrc;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+import org.xtreemfs.common.auth.NullAuthProvider;
+import org.xtreemfs.common.clients.RPCResponse;
+import org.xtreemfs.common.logging.Logging;
+import org.xtreemfs.foundation.json.JSONException;
+import org.xtreemfs.mrc.utils.MessageUtils;
+
+/**
+ *
+ * @author bjko
+ */
+public class XtreemFile {
+
+ public static String pathSeparator = "/";
+
+ public static char pathSeparatorChar = '/';
+
+ private final String volumeName;
+
+ private final String filename;
+
+ private Map statInfo;
+
+ private MRCClient client;
+
+ private InetSocketAddress mrc;
+
+ private final boolean isVolumeList;
+
+ private final boolean invalidVolume;
+
+ private final boolean isSysDir;
+
+ private static final HashMap vcache = new HashMap();
+
+ /** Creates a new instance of XtreemFile */
+ public XtreemFile(MRCClient client, InetSocketAddress dirService, String filename)
+ throws IOException {
+ System.out.println("created new file for: " + filename);
+ this.client = client;
+ // first extract the volume name
+ String woPrefix = filename;
+ if (filename.startsWith("/xtreemfs")) {
+ woPrefix = filename.substring("/xtreemfs".length());
+ }
+ if (woPrefix.length() == 0)
+ woPrefix = "/";
+ int posSecondslash = woPrefix.substring(1).indexOf(pathSeparatorChar);
+ if (posSecondslash == -1) {
+ volumeName = woPrefix.substring(1);
+ this.filename = "/";
+ } else {
+ volumeName = woPrefix.substring(1, posSecondslash + 1);
+ this.filename = woPrefix.substring(posSecondslash + 1);
+ }
+ System.out.println("XtreemFile: voumeName=" + volumeName + " filename=" + this.filename);
+
+ if (volumeName.length() > 0) {
+ // ask the dir service for the MRC holding the volume
+
+ // check, if it is a system dir
+ if (woPrefix.endsWith("/.") || woPrefix.endsWith("/..")) {
+ isSysDir = true;
+ statInfo = null;
+ isVolumeList = false;
+ invalidVolume = false;
+ } else {
+ // check my cache
+ VolCacheEntry vci = vcache.get(this.volumeName);
+ if (vci != null) {
+ if (vci.created > System.currentTimeMillis() + 1000 * 60) {
+ vci = null;
+ }
+ }
+ if (vci == null) {
+ ArrayList params = new ArrayList();
+ params.add(volumeName);
+ Object o = null;
+ // try {
+ // RPCResponse resp =
+ // client.sendGenericRequest(dirService,"getVolumeInfo",params);
+ // o = resp.get();
+ // } catch (JSONException ex) {
+ // throw new IOException("cannot encode/decode message",ex);
+ // }
+ // FIXME: adapt to new Directory Service
+ System.out.println("VVOLINFO is " + o);
+ if (o == null) {
+ invalidVolume = true;
+ } else {
+ Map volInfo = (Map) o;
+ Map mrcMap = (Map) volInfo.get("mrcMap");
+ mrc = MessageUtils.addrFromString((String) mrcMap.keySet().toArray()[0]);
+ vci = new VolCacheEntry();
+ vci.created = System.currentTimeMillis();
+ vci.volName = this.volumeName;
+ vci.mrc = mrc;
+ vcache.put(this.volumeName, vci);
+ invalidVolume = false;
+ }
+ } else {
+ mrc = vci.mrc;
+ invalidVolume = false;
+ }
+ if (!invalidVolume) {
+ try {
+
+ // now we have a mrc..lets fetch the file details
+ statInfo = client.stat(mrc, this.volumeName + this.filename, true, true,
+ true, NullAuthProvider.createAuthString("1", "1"));
+ System.out.println("STAT INFO:" + statInfo);
+ } catch (Exception ex) {
+ Logging.logMessage(Logging.LEVEL_ERROR, this, ex);
+ statInfo = null;
+ } /*
+ * catch (IOException ex) { ex.printStackTrace();
+ * statInfo = null; }
+ */
+ }
+ isVolumeList = false;
+ isSysDir = false;
+ }
+ } else {
+ isSysDir = false;
+ isVolumeList = true;
+ invalidVolume = false;
+ mrc = dirService;
+ }
+ }
+
+ public Map getStatInfo() {
+ return statInfo;
+ }
+
+ public boolean isDirectory() {
+ if (isSysDir)
+ return true;
+ if (isVolumeList)
+ return true;
+ if (statInfo == null) {
+ System.out.println("no stat info");
+ return false;
+ }
+ Long oType = (Long) statInfo.get("objType");
+ System.out.println("isDir= " + (oType == 2) + " type=" + oType);
+ return (oType == 2);
+ }
+
+ public boolean isFile() {
+ if (isSysDir)
+ return false;
+ if (isVolumeList)
+ return false;
+ if (statInfo == null) {
+ System.out.println("no stat info");
+ return false;
+ }
+ Long oType = (Long) statInfo.get("objType");
+ System.out.println("isFile= " + (oType == 1) + " type=" + oType);
+ return (oType == 1);
+ }
+
+ public boolean delete() {
+ if (isSysDir)
+ return false;
+ if (isVolumeList)
+ return false;
+ try {
+ client.delete(mrc, this.volumeName + this.filename, NullAuthProvider.createAuthString(
+ "1", "1"));
+ return true;
+ } catch (Exception ex) {
+ return false;
+ }
+ }
+
+ public long length() {
+ if (isSysDir)
+ return 0l;
+ if (isVolumeList)
+ return 0l;
+ if (statInfo == null)
+ return 0l;
+ return (Long) statInfo.get("size");
+ }
+
+ public String toString() {
+ return this.volumeName + this.filename;
+ }
+
+ public boolean exists() {
+ if (isSysDir)
+ return true;
+ if (isVolumeList && invalidVolume)
+ return false;
+ if (isVolumeList)
+ return true;
+ return (statInfo != null);
+ }
+
+ public boolean renameTo(XtreemFile dest) {
+ if (isSysDir)
+ return false;
+ if (isVolumeList)
+ return false;
+ if (statInfo == null)
+ return false;
+
+ try {
+ client.move(mrc, this.volumeName + this.filename, dest.volumeName + dest.filename,
+ NullAuthProvider.createAuthString("1", "1"));
+ return true;
+ } catch (Exception ex) {
+ return false;
+ }
+ }
+
+ public boolean canExecute() {
+ if (isSysDir)
+ return false;
+ if (isVolumeList)
+ return false;
+ if (statInfo == null)
+ return false;
+ Long posixAccessMode = (Long) statInfo.get("posixAccessMode");
+ return (posixAccessMode.intValue() & 64) > 0;
+ }
+
+ public boolean canRead() {
+ if (isSysDir)
+ return true;
+ if (isVolumeList)
+ return true;
+ if (statInfo == null)
+ return false;
+ Long posixAccessMode = (Long) statInfo.get("posixAccessMode");
+ return (posixAccessMode.intValue() & 256) > 0;
+ }
+
+ public boolean canWrite() {
+ if (isSysDir)
+ return false;
+ if (isVolumeList)
+ return false;
+ if (statInfo == null)
+ return false;
+ Long posixAccessMode = (Long) statInfo.get("posixAccessMode");
+ return (posixAccessMode.intValue() & 128) > 0;
+ }
+
+ public long lastModified() {
+ if (isSysDir)
+ return System.currentTimeMillis();
+ if (isVolumeList)
+ return 0l;
+ if (statInfo == null)
+ return 0l;
+ Long ll = (Long) statInfo.get("mtime") * 1000;
+ return ll;
+ }
+
+ public String[] list() {
+ if (isSysDir)
+ return null;
+ if (isVolumeList) {
+ // list volumes...
+ Object o = null;
+ try {
+ RPCResponse resp = client.sendRPC(mrc, "getVolumeInfos", new ArrayList(),
+ NullAuthProvider.createAuthString("1", "1"), null);
+ o = resp.get();
+ } catch (Exception ex) {
+ System.out.println("cannot get volumes: " + ex);
+ return null;
+ }
+ List vols = (List) o;
+ List volNames = new LinkedList();
+ for (Object vol : vols) {
+ Map mrcMap = (Map) vol;
+ volNames.add((String) mrcMap.get("name"));
+ }
+ return volNames.toArray(new String[0]);
+ } else {
+ if (isDirectory() == false)
+ return null;
+ try {
+
+ List entries = client.readDir(mrc, this.volumeName + this.filename,
+ NullAuthProvider.createAuthString("1", "1"));
+ return entries.toArray(new String[0]);
+ } catch (Exception ex) {
+ Logging.logMessage(Logging.LEVEL_ERROR, this, ex);
+ return null;
+ }
+ }
+ }
+
+ public MiniStatInfo[] listAndStat() {
+ if (isSysDir)
+ return null;
+ if (isVolumeList) {
+ // list volumes...
+ Object o = null;
+ try {
+ RPCResponse resp = client.sendRPC(mrc, "getVolumeInfos", new ArrayList(),
+ NullAuthProvider.createAuthString("1", "1"), null);
+ o = resp.get();
+ } catch (Exception ex) {
+ System.out.println("cannot get volumes: " + ex);
+ return null;
+ }
+ List vols = (List) o;
+ List volNames = new LinkedList();
+ for (Object vol : vols) {
+ Map mrcMap = (Map) vol;
+ MiniStatInfo mi = new MiniStatInfo();
+ mi.type = "vol";
+ mi.name = (String) mrcMap.get("name");
+ volNames.add(mi);
+ }
+ return volNames.toArray(new MiniStatInfo[0]);
+ } else {
+ if (isDirectory() == false)
+ return null;
+ try {
+
+ Map> entries = client.readDirAndStat(mrc,
+ this.volumeName + this.filename, NullAuthProvider.createAuthString("1", "1"));
+ List dir = new LinkedList();
+ for (String entry : entries.keySet()) {
+ MiniStatInfo mi = new MiniStatInfo();
+ Long otype = (Long) entries.get(entry).get("objType");
+ if (otype == 1)
+ mi.type = "file";
+ else
+ mi.type = "dir";
+ mi.name = entry;
+ dir.add(mi);
+ }
+ return dir.toArray(new MiniStatInfo[0]);
+ } catch (Exception ex) {
+ Logging.logMessage(Logging.LEVEL_ERROR, this, ex);
+ return null;
+ }
+ }
+ }
+
+ public boolean mkdir() {
+ if (isSysDir)
+ return false;
+ if (isVolumeList)
+ return false;
+ try {
+ client.createDir(mrc, this.volumeName + this.filename, NullAuthProvider
+ .createAuthString("1", "1"));
+ return true;
+ } catch (Exception ex) {
+ return false;
+ }
+ }
+
+ public boolean touch() {
+ if (isSysDir)
+ return false;
+ if (isVolumeList)
+ return false;
+ try {
+ client.createFile(mrc, this.volumeName + this.filename, NullAuthProvider
+ .createAuthString("1", "1"));
+ return true;
+ } catch (Exception ex) {
+ return false;
+ }
+ }
+
+ public byte[] read(long start, long numBytes) throws IOException {
+ try {
+ // FIXME:not finished yet
+ Map capability = client.open(mrc, this.volumeName + this.filename, "r",
+ NullAuthProvider.createAuthString("1", "1"));
+ } catch (Exception ex) {
+ throw new IOException(ex);
+ }
+
+ // OSDClient oc = new OSDClient();
+
+ return new byte[0];
+ }
+
+ public boolean write(long start, long numBytes) {
+ return true;
+ }
+
+ public static class VolCacheEntry {
+ public long created;
+
+ public String volName;
+
+ public InetSocketAddress mrc;
+ }
+
+ public static class MiniStatInfo {
+ public String type;
+
+ public String name;
+ }
+
+}
diff --git a/servers/src/org/xtreemfs/common/clients/osd/ConcurrentFileMap.java b/servers/src/org/xtreemfs/common/clients/osd/ConcurrentFileMap.java
new file mode 100644
index 0000000000000000000000000000000000000000..2239e3e1e02ed4068075c7841c620cc717779d26
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/clients/osd/ConcurrentFileMap.java
@@ -0,0 +1,466 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin,
+ Barcelona Supercomputing Center - Centro Nacional de Supercomputacion
+ and Consiglio Nazionale delle Ricerche.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHOR: Felix Langner (ZIB)
+ */
+package org.xtreemfs.common.clients.osd;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * This class models a {@link Thread} safe FileMap for listing all available files of an OSD for
+ * service purpose like cleaning deleted files up.
+ *
+ * The first key is the volume as a {@link List} of volumeID, mrcAddress and mrcPort, the second
+ * key is the fileID and the third key some file attributes like fileSize and preview.
+ *
+ * @author langner
+ */
+public final class ConcurrentFileMap {
+ private static final long serialVersionUID = -7736474666790682726L;
+
+ Map>> map = new ConcurrentHashMap>>();
+
+ /**
+ * Default constructor.
+ */
+ public ConcurrentFileMap() {
+ super();
+ }
+
+ /**
+ * Parses a JSON response into a {@link ConcurrentFileMap}.
+ * @param map
+ */
+ public ConcurrentFileMap(Map>> map) {
+ super();
+ for (String key : map.keySet()){
+ this.map.put(Volume.parse(key) , map.get(key));
+ }
+ }
+
+ /**
+ * Inserts a volumeID,fileID pair given by a directory's name into the fileMap.
+ * And adds the attributes size and preview to the entry.
+ *
+ * @param directory hex name (volumeID:fileID)
+ * @param size
+ * @param preview
+ */
+ public synchronized void insert(String directory, long size, String preview, long maxObjectSize) throws IOException{
+ String[] fileDesc = directory.split(":");
+ if (fileDesc.length==2){
+ try{
+ Integer.parseInt(fileDesc[1]);
+
+ Volume newVol = new Volume (fileDesc[0]);
+
+ if (fileDesc.length!=2) throw new IOException("Directory: '"+directory+"' has an illegal format!");
+
+ if(containsKey(newVol))
+ addFile(directory,size,maxObjectSize,preview,(Map>) get(newVol));
+ else
+ map.put(newVol, newVolFile(directory,size,maxObjectSize,preview));
+ }catch(NumberFormatException ne){
+ // ignore
+ }
+ }
+ }
+
+ /**
+ *
+ * @return set of unresolved volumeIDs.
+ */
+ public Set unresolvedVolumeIDSet() {
+ Set result = new HashSet();
+
+ Set keys = map.keySet();
+ for(Volume key : keys){
+ if (key.size()==1) result.add(key.get(0));
+ }
+ return result;
+ }
+
+ /**
+ * @return set of resolved volumeIDs.
+ */
+ public Set resolvedVolumeIDSet() {
+ Set result = new HashSet();
+
+ Set keys = map.keySet();
+ for(Volume key : keys){
+ if (key.size()>1 || key.equals(Volume.NOT_AVAILABLE)) result.add(key.get(0));
+ }
+ return result;
+ }
+
+ /**
+ * @return set of resolved volumeIDs without not available volume.
+ */
+ public Set volumeIDSetForRequest() {
+ Set result = new HashSet();
+
+ Set keys = map.keySet();
+ for(Volume key : keys){
+ if (key.size()>1 && !key.equals(Volume.NOT_AVAILABLE)) result.add(key.get(0));
+ }
+ return result;
+ }
+
+ /**
+ * Replaces the entry with the given volumeID with a new one with address.
+ *
+ * If address is null volume is marked as 'unknown'.
+ *
+ * @param volumeID
+ * @param address
+ */
+ public synchronized void saveAddress(String volumeID, InetSocketAddress address) {
+ if (address!=null)
+ map.put(new Volume(volumeID,address.getHostName(),((Integer) address.getPort()).toString()), remove(new Volume(volumeID)));
+ else{
+ if (containsKey(Volume.NOT_AVAILABLE))
+ ((Map>) get(Volume.NOT_AVAILABLE)).putAll((Map>) remove(new Volume(volumeID)));
+ else
+ map.put(Volume.NOT_AVAILABLE, remove(new Volume(volumeID)));
+ }
+ }
+
+ /**
+ *
+ * @param volumeID
+ * @return the address for the given volumeID, or null if not available.
+ */
+ public InetSocketAddress getAddress(String volumeID) {
+ Volume predicate = new Volume(volumeID);
+
+ Set keys = map.keySet();
+ for (Volume key : keys){
+ if (predicate.equals(Volume.NOT_AVAILABLE))
+ return null;
+ else if (key.equals(predicate))
+ return new InetSocketAddress(key.get(1),Integer.parseInt(key.get(2)));
+ }
+
+ return null;
+ }
+
+ /**
+ *
+ * @param volume
+ * @return a {@link Set} of fileIDs for the given volume.
+ */
+ public Set getFileNumberSet(List volume) {
+ Set result = new HashSet();
+ for (String fID : getFileIDSet(volume)){
+ result.add(fID.substring(fID.indexOf(":")+1, fID.length()));
+ }
+ return result;
+ }
+
+ /**
+ *
+ * @param volume
+ * @return a {@link Set} of fileIDs for the given volumeID.
+ */
+ public Set getFileNumberSet(String volumeID) {
+ Set result = new HashSet();
+ for (String fID : getFileIDSet(volumeID)){
+ result.add(fID.substring(fID.indexOf(":")+1, fID.length()));
+ }
+ return result;
+ }
+
+ /**
+ *
+ * @param volumeID
+ * @return a {@link List} of fileNumbers for the given volumeID.
+ */
+ public List getFileNumbers(String volumeID) {
+ List result = new LinkedList();
+ for (String fID : getFileIDs(volumeID)){
+ result.add(fID.substring(fID.indexOf(":")+1, fID.length()));
+ }
+ return result;
+ }
+
+ /**
+ *
+ * @param volume
+ * @return a {@link Set} of fileIDs for the given volume.
+ */
+ public Set getFileIDSet(List volume) {
+ return ((Map>) get(volume)).keySet();
+ }
+
+ /**
+ *
+ * @param volumeID
+ * @return a {@link Set} of fileIDs for the given volumeID.
+ */
+ public Set getFileIDSet(String volumeID) {
+ return ((Map>) get(volumeID)).keySet();
+ }
+
+ /**
+ *
+ * @return the fileMap JSON compatible.
+ */
+ public Map>> getJSONCompatible (){
+ Map>> result = new ConcurrentHashMap>>();
+ for (Volume key : map.keySet()){
+ result.put(key.toString(), get(key));
+ }
+ return result;
+ }
+
+ /**
+ * Removes a file given by volumeID and fileID from the fileMap.
+ * @param volumeID
+ * @param fileID
+ */
+ public void remove(String volumeID, String fileID) {
+ ((Map>) get(new Volume(volumeID))).remove(fileID);
+
+ }
+
+ /**
+ *
+ * @return the number of fileIDs in the fileMap.
+ */
+ public synchronized int size(){
+ int result = 0;
+
+ for (Volume key : ((Set) map.keySet())){
+ result += ((Map>) get(key)).size();
+ }
+
+ return result;
+ }
+
+/*
+ * getter
+ */
+ public Long getFileSize(String volumeID, String file) {
+ return Long.valueOf(get(volumeID).get(file).get("size"));
+ }
+
+ public Long getFileSize(List volume, String file) {
+ return Long.valueOf(get(volume).get(file).get("size"));
+ }
+
+ public String getFilePreview(List volume, String file) {
+ return get(volume).get(file).get("preview");
+ }
+
+ public Long getObjectSize(List volume, String file) {
+ return Long.valueOf(get(volume).get(file).get("objectSize"));
+ }
+
+/*
+ * override
+ */
+
+ public Set> keySetList() {
+ Set> result = new HashSet>();
+ for (Volume v: map.keySet()){
+ result.add(v);
+ }
+
+ return result;
+ }
+
+ public boolean containsKey(Object key) {
+ for (Volume thisKey : map.keySet()){
+ if(thisKey.equals(key)){
+ return true;
+ }
+ }
+ return false;
+ }
+
+ public Map> remove(Object key) {
+ Volume rq = null;
+ for (Volume thisKey : map.keySet()){
+ if(thisKey.get(0).equals(key) || thisKey.equals(key)){
+ rq = thisKey;
+ break;
+ }
+ }
+ return map.remove(rq);
+ }
+
+ public Map> get(Object key) {
+ for (Volume thisKey : map.keySet()){
+ if(thisKey.get(0).equals(key) || thisKey.equals(key)){
+ key = thisKey;
+ break;
+ }
+ }
+ return map.get(key);
+ }
+
+ /**
+ *
+ * @return true, if there are any fileIDs saved in the map.false otherwise.
+ */
+ public boolean isEmpty() {
+ if (!map.isEmpty()){
+ boolean isEmpty = true;
+ for (Map> value : map.values())
+ isEmpty &= value.isEmpty();
+ return isEmpty;
+ }
+ return true;
+ }
+/*
+ * private methods
+ */
+
+ /**
+ *
+ * @param volumeID
+ * @return a {@link List} of fileIDs for the given volumeID.
+ */
+ private List getFileIDs(String volumeID) {
+ List result = new LinkedList();
+ for (String fID : getFileIDSet(volumeID))
+ result.add(fID);
+
+ return result;
+ }
+
+ /**
+ *
+ * @param size
+ * @param objectSize
+ * @param preview
+ * @return a new Map with the given file details in it.
+ */
+ private Map fileDetails (Long size,Long objectSize,String preview){
+ ConcurrentHashMap details = new ConcurrentHashMap();
+ details.put("size", size.toString());
+ details.put("objectSize", objectSize.toString());
+ details.put("preview", preview);
+
+ return details;
+ }
+
+ /**
+ *
+ * @param fileID
+ * @param size
+ * @param preview
+ * @param objectSize
+ * @return a new Map with the fileID with the given details in it.
+ */
+ private Map> newVolFile (String fileID,long size,long objectSize,String preview){
+ Map> volFile = new ConcurrentHashMap>();
+
+ volFile.put(fileID, fileDetails(size,objectSize,preview));
+
+ return volFile;
+ }
+
+ /**
+ * Put the fileID and the file details into the given map.
+ *
+ * @param fileID
+ * @param size
+ * @param preview
+ * @param objectSize
+ * @param map
+ */
+ private void addFile(String fileID,long size,long objectSize,String preview, Map> map){
+ map.put(fileID, fileDetails (size,objectSize,preview));
+ }
+}
+
+ /**
+ * Volume is a {@link List} of volumeID, mrcAddress and mrcPort.
+ * It will just be compared by the first value in the List (the volumeID).
+ *
+ * @author langner
+ *
+ */
+ class Volume extends LinkedList implements List{
+ private static final long serialVersionUID = 7408578018651016089L;
+
+ public Volume(String volID) {
+ super();
+ add(volID);
+ }
+
+ public Volume(String volID, String mrcAddress, String mrcPort) {
+ super();
+ add(volID);
+ add(mrcAddress);
+ add(mrcPort);
+ }
+
+ private Volume(){
+ super();
+ add("unknown");
+ add("unknown");
+ add("unknown");
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (o instanceof String){
+ return get(0).equals(o);
+ }else if (o instanceof Volume){
+ return get(0).equals(((Volume) o).get(0));
+ }
+ return false;
+ }
+
+ public static Volume NOT_AVAILABLE = new Volume();
+
+ public static Volume parse(String key) {
+ Volume result = null;
+ String[] values = key.split(",");
+ for (int i=0;i for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Björn Kolbeck (ZIB), Jesús Malo (BSC), Jan Stender (ZIB)
+ */
+
+package org.xtreemfs.common.clients.osd;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import org.xtreemfs.common.Capability;
+import org.xtreemfs.common.ClientLease;
+import org.xtreemfs.common.buffer.ReusableBuffer;
+import org.xtreemfs.common.clients.RPCClient;
+import org.xtreemfs.common.clients.RPCResponse;
+import org.xtreemfs.common.striping.Locations;
+import org.xtreemfs.foundation.json.JSONException;
+import org.xtreemfs.foundation.json.JSONParser;
+import org.xtreemfs.foundation.pinky.HTTPHeaders;
+import org.xtreemfs.foundation.pinky.HTTPUtils;
+import org.xtreemfs.foundation.pinky.SSLOptions;
+import org.xtreemfs.foundation.speedy.MultiSpeedy;
+import org.xtreemfs.osd.RPCTokens;
+
+/**
+ *
+ * @author bjko
+ */
+public class OSDClient extends RPCClient {
+
+ /** Creates a new instance of NewOSDClient */
+ public OSDClient() throws IOException {
+ super();
+ }
+
+ public OSDClient(MultiSpeedy sharedSpeedy) throws IOException {
+ super(sharedSpeedy);
+ }
+
+ public OSDClient(MultiSpeedy sharedSpeedy, int timeout) throws IOException {
+ super(sharedSpeedy, timeout);
+ }
+
+ public OSDClient(int timeout, SSLOptions sslOptions) throws IOException {
+ super(timeout, sslOptions);
+ }
+
+ public RPCResponse get(InetSocketAddress osd, Locations loc, Capability cap, String file)
+ throws IOException, JSONException {
+ HTTPHeaders headers = new HTTPHeaders();
+ headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString());
+ headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString().asString());
+
+ return send(osd, file, null, headers, null, HTTPUtils.DATA_TYPE.BINARY, HTTPUtils.GET_TOKEN);
+ }
+
+ /**
+ * performs a GET of a range of bytes on an OSD
+ *
+ * @param loc
+ * Location of the files.
+ * @param cap
+ * Capability of the request
+ * @param file
+ * File to use
+ * @param objectNumber
+ * Number of the object to use
+ * @param firstByte
+ * Offset relative to the object of the first requested byte
+ * @param lastByte
+ * Offset relative to the object of the last requested byte
+ * @return The response of the OSD
+ */
+ public RPCResponse get(InetSocketAddress osd, Locations loc, Capability cap, String file,
+ long objectNumber, long firstByte, long lastByte) throws IOException, JSONException {
+ HTTPHeaders headers = new HTTPHeaders();
+ headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString());
+ headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString().asString());
+ headers.addHeader(HTTPHeaders.HDR_XOBJECTNUMBER, Long.toString(objectNumber));
+ headers.addHeader(HTTPHeaders.HDR_CONTENT_RANGE, "bytes " + Long.toString(firstByte) + "-"
+ + Long.toString(lastByte) + "/*");
+
+ return send(osd, file, null, headers, null, HTTPUtils.DATA_TYPE.BINARY, HTTPUtils.GET_TOKEN);
+ }
+
+ /**
+ * performs a GET of a range of bytes on an OSD
+ *
+ * @param loc
+ * Location of the files.
+ * @param cap
+ * Capability of the request
+ * @param file
+ * File to use
+ * @param objectNumber
+ * Number of the object to use
+ * @param firstByte
+ * Offset relative to the object of the first requested byte
+ * @param lastByte
+ * Offset relative to the object of the last requested byte
+ * @return The response of the OSD
+ */
+ public RPCResponse get(InetSocketAddress osd, Locations loc, Capability cap, String file,
+ long objectNumber, ClientLease lease) throws IOException, JSONException {
+ HTTPHeaders headers = new HTTPHeaders();
+ headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString());
+ headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString().asString());
+ headers.addHeader(HTTPHeaders.HDR_XOBJECTNUMBER, Long.toString(objectNumber));
+ headers.addHeader(HTTPHeaders.HDR_XLEASETO, Long.toString(lease.getExpires()));
+
+ return send(osd, file, null, headers, null, HTTPUtils.DATA_TYPE.BINARY, HTTPUtils.GET_TOKEN);
+ }
+
+ /**
+ * performs a GET for an entire object on an OSD
+ *
+ * @param loc
+ * Location of the files.
+ * @param cap
+ * Capability of the request
+ * @param file
+ * File to use
+ * @param objectNumber
+ * Number of the object to use
+ * @return The response of the OSD
+ */
+ public RPCResponse get(InetSocketAddress osd, Locations loc, Capability cap, String file,
+ long objectNumber) throws IOException, JSONException {
+ HTTPHeaders headers = new HTTPHeaders();
+ headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString());
+ headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString().asString());
+ headers.addHeader(HTTPHeaders.HDR_XOBJECTNUMBER, Long.toString(objectNumber));
+
+ return send(osd, file, null, headers, null, HTTPUtils.DATA_TYPE.BINARY, HTTPUtils.GET_TOKEN);
+ }
+
+ /**
+ * It requests to the OSD to perform a PUT of a range of bytes
+ *
+ * @param loc
+ * Location of the files.
+ * @param cap
+ * Capability of the request
+ * @param file
+ * File to use
+ * @param objectNumber
+ * Number of the object to use
+ * @param firstByte
+ * Offset relative to the object of the first byte to write
+ * @param data
+ * Data to write
+ * @return The response of the OSD
+ */
+ public RPCResponse put(InetSocketAddress osd, Locations loc, Capability cap, String file,
+ long objectNumber, long firstByte, ReusableBuffer data) throws IOException, JSONException {
+ HTTPHeaders headers = new HTTPHeaders();
+ headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString());
+ headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString().asString());
+ headers.addHeader(HTTPHeaders.HDR_XOBJECTNUMBER, Long.toString(objectNumber));
+ headers.addHeader(HTTPHeaders.HDR_CONTENT_RANGE, "bytes " + Long.toString(firstByte) + "-"
+ + Long.toString(firstByte + data.capacity() - 1) + "/*");
+
+ return send(osd, file, data, headers, null, HTTPUtils.DATA_TYPE.BINARY, HTTPUtils.PUT_TOKEN);
+ }
+
+ /**
+ * It requests to the OSD to perform a PUT of a range of bytes
+ *
+ * @param loc
+ * Location of the files.
+ * @param cap
+ * Capability of the request
+ * @param file
+ * File to use
+ * @param objectNumber
+ * Number of the object to use
+ * @param firstByte
+ * Offset relative to the object of the first byte to write
+ * @param data
+ * Data to write
+ * @return The response of the OSD
+ */
+ public RPCResponse putWithForcedIncrement(InetSocketAddress osd, Locations loc, Capability cap,
+ String file, long objectNumber, long firstByte, ReusableBuffer data) throws IOException,
+ JSONException {
+ HTTPHeaders headers = new HTTPHeaders();
+ headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString());
+ headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString().asString());
+ headers.addHeader(HTTPHeaders.HDR_XOBJECTNUMBER, Long.toString(objectNumber));
+ headers.addHeader("X-Force-Increment", "yes");
+ headers.addHeader(HTTPHeaders.HDR_CONTENT_RANGE, "bytes " + Long.toString(firstByte) + "-"
+ + Long.toString(firstByte + data.capacity() - 1) + "/*");
+
+ return send(osd, file, data, headers, null, HTTPUtils.DATA_TYPE.BINARY, HTTPUtils.PUT_TOKEN);
+ }
+
+ /**
+ * It requests to the OSD to perform a PUT of a whole object
+ *
+ * @param loc
+ * Location of the files.
+ * @param cap
+ * Capability of the request
+ * @param file
+ * File to use
+ * @param objectNumber
+ * Number of the object to use
+ * @param data
+ * Data to write
+ * @return The response of the OSD
+ */
+ public RPCResponse put(InetSocketAddress osd, Locations loc, Capability cap, String file,
+ long objectNumber, ReusableBuffer data) throws IOException, JSONException {
+ HTTPHeaders headers = new HTTPHeaders();
+ headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString());
+ headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString().asString());
+ headers.addHeader(HTTPHeaders.HDR_XOBJECTNUMBER, Long.toString(objectNumber));
+
+ return send(osd, file, data, headers, null, HTTPUtils.DATA_TYPE.BINARY, HTTPUtils.PUT_TOKEN);
+ }
+
+ /**
+ * writes a full object onto an OSD
+ *
+ * @param loc
+ * Location of the files.
+ * @param cap
+ * Capability of the request
+ * @param file
+ * File to use
+ * @param objectNumber
+ * Number of the object to use
+ * @param data
+ * Data to write
+ * @return The response of the OSD
+ */
+ public RPCResponse put(InetSocketAddress osd, Locations loc, Capability cap, String file,
+ long objectNumber, ReusableBuffer data, ClientLease lease) throws IOException, JSONException {
+ HTTPHeaders headers = new HTTPHeaders();
+ headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString());
+ headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString().asString());
+ headers.addHeader(HTTPHeaders.HDR_XOBJECTNUMBER, Long.toString(objectNumber));
+ headers.addHeader(HTTPHeaders.HDR_XLEASETO, Long.toString(lease.getExpires()));
+
+ return send(osd, file, data, headers, null, HTTPUtils.DATA_TYPE.BINARY, HTTPUtils.PUT_TOKEN);
+ }
+
+ /**
+ * It requests to the OSD to perform a DELETE of a file
+ *
+ * @param loc
+ * Location of the files. If null is given, only the data in the
+ * OSD will be deleted, otherwise, the deletion will be in every
+ * OSD in loc.
+ * @todo This specification will be changed for the new OSD
+ * @param cap
+ * Capability of the request
+ * @param file
+ * File to use
+ * @return The response of the OSD
+ */
+ public RPCResponse delete(InetSocketAddress osd, Locations loc, Capability cap, String file)
+ throws IOException, JSONException {
+ HTTPHeaders headers = new HTTPHeaders();
+ headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString());
+
+ // @todo In the new OSD, loc cannot be null. This has been changed to
+ // deleteReplica
+ if (loc != null)
+ headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString().asString());
+
+ return send(osd, file, null, headers, null, HTTPUtils.DATA_TYPE.BINARY,
+ HTTPUtils.DELETE_TOKEN);
+ }
+
+ /**
+ * It requests to the OSD to perform a getFileSize of a file
+ *
+ * @param loc
+ * Location of the file.
+ * @param cap
+ * Capability of the request
+ * @param file
+ * File whose size is requested
+ * @param knownSize
+ * Current known size of the file
+ * @return The response of the OSD
+ */
+ public RPCResponse globalMax(InetSocketAddress osd, Locations loc, Capability cap, String file)
+ throws IOException, JSONException {
+
+ HTTPHeaders headers = new HTTPHeaders();
+ headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString());
+ headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString().asString());
+ headers.addHeader(HTTPHeaders.HDR_XFILEID, file);
+
+ return sendRPC(osd, RPCTokens.fetchGlobalMaxToken, null, null, headers);
+ }
+
+ /**
+ * It requests to the OSD to perform a truncate of a file
+ *
+ * @param loc
+ * Location of the file.
+ * @param cap
+ * Capability of the request
+ * @param file
+ * File whose size is requested
+ * @param finalSize
+ * Size of the file after truncate
+ * @param exclusion
+ * OSD for the X-Excluded-Location or null if no OSD is excluded
+ * @return The response of the OSD
+ */
+ public RPCResponse truncate(InetSocketAddress osd, Locations loc, Capability cap, String file,
+ long finalSize) throws JSONException, IOException {
+ HTTPHeaders headers = new HTTPHeaders();
+ headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString());
+ headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString().asString());
+
+ ReusableBuffer data = ReusableBuffer.wrap(JSONParser.toJSON(file, Long.valueOf(finalSize))
+ .getBytes(HTTPUtils.ENC_UTF8));
+
+ return sendRPC(osd, RPCTokens.truncateTOKEN, data, null, headers);
+ }
+
+ /**
+ * It requests to delete a certain replica from the specified location
+ *
+ * @param cap
+ * Capability of the request
+ * @param fileID
+ * The fileID of the replica to be deleted.
+ * @return The response of the OSD
+ */
+ public RPCResponse deleteReplica(InetSocketAddress osd, Capability cap, String fileID)
+ throws IOException, JSONException {
+ HTTPHeaders headers = new HTTPHeaders();
+ headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString());
+ headers.addHeader(HTTPHeaders.HDR_XFILEID, fileID);
+
+ return sendRPC(osd, RPCTokens.deleteLocalTOKEN, null, null, headers);
+ }
+
+ public RPCResponse deleteReplica(InetSocketAddress osd, Capability cap, String fileID,
+ int timeout) throws IOException, JSONException {
+ RPCResponse r = deleteReplica(osd, cap, fileID);
+ r.getSpeedyRequest().timeout = timeout;
+ return r;
+ }
+
+ /**
+ * It requests to delete a certain replica from the specified location
+ *
+ * @param cap
+ * Capability of the request
+ * @param file
+ * The fileID of the replica to be deleted.
+ * @param newFileSize
+ * Size of the file after truncate
+ * @return The response of the OSD
+ */
+ public RPCResponse truncateReplica(InetSocketAddress osd, Locations loc, Capability cap,
+ String file, Long newFileSize) throws IOException, JSONException {
+ HTTPHeaders headers = new HTTPHeaders();
+ headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString());
+ headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString().asString());
+
+ ReusableBuffer data = ReusableBuffer.wrap(JSONParser.toJSON(file, newFileSize).getBytes(
+ HTTPUtils.ENC_UTF8));
+
+ return sendRPC(osd, RPCTokens.truncateLocalTOKEN, data, null, headers);
+ }
+
+ public RPCResponse> getStatistics(InetSocketAddress osd) throws IOException, JSONException {
+ return sendRPC(osd, RPCTokens.getstatsTOKEN, null, null, new HTTPHeaders());
+ }
+
+ /**
+ * Checks consistency of a given object and returns the object's file size.
+ *
+ * @param osd
+ * the OSD holding the object
+ * @param loc
+ * the X-Locations List of the file
+ * @param cap
+ * the capability issued by the MRC
+ * @param file
+ * the file ID
+ * @param objectNumber
+ * the object number
+ * @return the response of the OSD, which contains the size of the object in
+ * bytes if no error has occurred
+ *
+ * @throws IOException
+ * @throws JSONException
+ */
+ public RPCResponse checkObject(InetSocketAddress osd, Locations loc, Capability cap,
+ String file, long objectNumber) throws IOException, JSONException {
+
+ HTTPHeaders headers = new HTTPHeaders();
+ headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString());
+ headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString().asString());
+ headers.addHeader(HTTPHeaders.HDR_XFILEID, file);
+ headers.addHeader(HTTPHeaders.HDR_XOBJECTNUMBER, Long.toString(objectNumber));
+
+ return sendRPC(osd, RPCTokens.checkObjectTOKEN, null, null, headers);
+ }
+
+ public RPCResponse> recordStageStats(InetSocketAddress osd, Boolean measureRqs, Boolean basicStats) throws IOException, JSONException {
+
+ ReusableBuffer data = ReusableBuffer.wrap(JSONParser.toJSON(measureRqs,basicStats).getBytes(
+ HTTPUtils.ENC_UTF8));
+
+ return sendRPC(osd, RPCTokens.recordRqDurationTOKEN, data, null, new HTTPHeaders());
+ }
+
+ /**
+ * Acquires or renews a client lease.
+ * @param osd the osd from which the lease is requested
+ * @param lease the lease object (must contain a lease id for renewal)
+ * @return a list with a JSON-encoded client lease and a timestamp (see XtreemFS protocol for details)
+ * @throws java.io.IOException
+ * @throws org.xtreemfs.foundation.json.JSONException
+ */
+ public RPCResponse>> acquireClientLease(InetSocketAddress osd, Locations loc, Capability cap, ClientLease lease) throws IOException, JSONException {
+
+ HTTPHeaders headers = new HTTPHeaders();
+ headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString());
+ headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString().asString());
+ headers.addHeader(HTTPHeaders.HDR_XFILEID, lease.getFileId());
+
+ List l = new ArrayList(1);
+ l.add(lease.encodeAsMap());
+
+ ReusableBuffer data = ReusableBuffer.wrap(JSONParser.writeJSON(l).getBytes(
+ HTTPUtils.ENC_UTF8));
+
+ return sendRPC(osd, RPCTokens.acquireLeaseTOKEN, data, null, headers);
+ }
+
+ public RPCResponse returnLease(InetSocketAddress osd, Locations loc, Capability cap, ClientLease lease) throws IOException, JSONException {
+ HTTPHeaders headers = new HTTPHeaders();
+ headers.addHeader(HTTPHeaders.HDR_XCAPABILITY, cap.toString());
+ headers.addHeader(HTTPHeaders.HDR_XLOCATIONS, loc.asJSONString().asString());
+ headers.addHeader(HTTPHeaders.HDR_XFILEID, lease.getFileId());
+
+ List l = new ArrayList(1);
+ l.add(lease.encodeAsMap());
+
+ ReusableBuffer data = ReusableBuffer.wrap(JSONParser.writeJSON(l).getBytes(
+ HTTPUtils.ENC_UTF8));
+
+ return sendRPC(osd, RPCTokens.returnLeaseTOKEN, data, null, headers);
+ }
+
+ /**
+ * TODO authenticate the user, to ensure that he has the right capabilities.
+ *
+ * @param osd
+ * @param authString
+ * @return a List of fileIDs from potential zombies.
+ * @throws IOException
+ * @throws JSONException
+ * @throws InterruptedException
+ */
+ public RPCResponse>>> cleanUp(InetSocketAddress osd, String authString) throws IOException, JSONException, InterruptedException {
+ return sendRPC(osd, RPCTokens.cleanUpTOKEN, null, authString, null);
+ }
+
+ /**
+ * If a file was located by the cleanUpOperation this command
+ * deletes a file with the given fileID from the given OSD.
+ *
+ * @param osd
+ * @param authString
+ * @param fileID
+ * @return
+ * @throws IOException
+ * @throws JSONException
+ * @throws InterruptedException
+ */
+ public RPCResponse cleanUpDelete(InetSocketAddress osd, String authString, String fileID) throws IOException, JSONException, InterruptedException {
+ HTTPHeaders headers = new HTTPHeaders();
+ headers.addHeader(HTTPHeaders.HDR_XFILEID, fileID);
+ return sendRPC(osd, RPCTokens.deleteLocalTOKEN, null, authString, headers);
+ }
+}
diff --git a/servers/src/org/xtreemfs/common/clients/scrubber/AsyncScrubber.java b/servers/src/org/xtreemfs/common/clients/scrubber/AsyncScrubber.java
new file mode 100644
index 0000000000000000000000000000000000000000..ce826075c366694eb4fecc4644c4b3fea6dec62b
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/clients/scrubber/AsyncScrubber.java
@@ -0,0 +1,547 @@
+package org.xtreemfs.common.clients.scrubber;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.xtreemfs.common.Capability;
+import org.xtreemfs.common.TimeSync;
+import org.xtreemfs.common.auth.NullAuthProvider;
+import org.xtreemfs.common.clients.RPCClient;
+import org.xtreemfs.common.clients.RPCResponse;
+import org.xtreemfs.common.clients.dir.DIRClient;
+import org.xtreemfs.common.clients.io.RandomAccessFile;
+import org.xtreemfs.common.clients.mrc.MRCClient;
+import org.xtreemfs.common.logging.Logging;
+import org.xtreemfs.common.util.OutputUtils;
+import org.xtreemfs.common.uuids.ServiceUUID;
+import org.xtreemfs.common.uuids.UUIDResolver;
+import org.xtreemfs.foundation.json.JSONException;
+import org.xtreemfs.foundation.pinky.HTTPHeaders;
+import org.xtreemfs.foundation.pinky.SSLOptions;
+import org.xtreemfs.foundation.speedy.MultiSpeedy;
+import org.xtreemfs.utils.CLIParser;
+import org.xtreemfs.utils.DefaultDirConfig;
+import org.xtreemfs.utils.CLIParser.CliOption;
+
+public class AsyncScrubber {
+
+ private static final int DEFAULT_NUM_CONS = 10;
+
+ private static final int DEFAULT_NUM_FILES = 100;
+
+ private static final String DEFAULT_DIR_CONFIG = "/etc/xos/xtreemfs/default_dir";
+
+ private static String authString;
+
+ static {
+ try {
+ authString = NullAuthProvider.createAuthString("root", MRCClient
+ .generateStringList("root"));
+ } catch (JSONException e) {
+ e.printStackTrace();
+ }
+ }
+
+ private AtomicInteger returnCode;
+
+ private long startTime;
+
+ private long lastStatusPrint;
+
+ private long lastBytes;
+
+ private int filesRead;
+
+ private int connectionsPerOSD;
+
+ private boolean updateFileSize;
+
+ private HashMap osds;
+
+ private List currentFiles;
+
+ private VolumeWalker volumeWalker;
+
+ private MultiSpeedy speedy;
+
+ private MRCClient mrcClient;
+
+ private DIRClient dirClient;
+
+ private InetSocketAddress mrcAddress;
+
+ private Logger logger;
+
+ public static String latestScrubAttr = "scrubber.latestscrub";
+
+ private Map osdBytesMap = new HashMap();
+
+ private SSLOptions sslOptions;
+
+ /**
+ * @param sharedSpeedy
+ * @param dirAddress
+ * the address of the directory service
+ * @param mrcAddress
+ * the address of the mrc holding the volume
+ * @param volumeName
+ * @param updateFileSize
+ * true if the file size should be updated.
+ * @throws JSONException
+ * thrown by createAuthString
+ * @throws IOException
+ * thrown when creating a new MRCClient
+ * @throws Exception
+ * thrown when creating a new VolumeWalker
+ */
+ public AsyncScrubber(final MultiSpeedy sharedSpeedy, InetSocketAddress dirAddress,
+ InetSocketAddress mrcAddress, String volumeName, boolean updateFileSize,
+ int connectionsPerOSD, int noFilesToFetch, SSLOptions ssl) throws Exception {
+ this.connectionsPerOSD = connectionsPerOSD;
+ this.updateFileSize = updateFileSize;
+ this.speedy = sharedSpeedy;
+ this.mrcAddress = mrcAddress;
+
+ returnCode = new AtomicInteger(0);
+
+ assert(sharedSpeedy != null);
+ //dirClient = new DIRClient(sharedSpeedy, dirAddress);
+ TimeSync.initialize(dirClient, 100000, 50, authString);
+
+ mrcClient = new MRCClient(sharedSpeedy);
+ //UUIDResolver.shutdown();
+ //UUIDResolver.start(dirClient, 1000, 1000);
+
+ volumeWalker = new VolumeWalker(volumeName, mrcAddress, noFilesToFetch, authString, ssl);
+
+ currentFiles = Collections.synchronizedList(new ArrayList());
+ osds = new HashMap();
+ logger = new Logger(null);
+
+ sslOptions = ssl;
+ }
+
+ public void shutdown() {
+ speedy.shutdown();
+ //dirClient.shutdown();
+ mrcClient.shutdown();
+ volumeWalker.shutdown();
+ for (OSDWorkQueue que : osds.values())
+ que.shutDown();
+
+ //UUIDResolver.shutdown();
+ //TimeSync.getInstance().shutdown();
+ }
+
+ public void waitForShutdown() {
+ mrcClient.shutdown();
+ }
+
+ /**
+ * Called by Main thread. Starts the scrubbing. Adds Files to the osd work
+ * queues until all files in the volume has been scrubbed.
+ *
+ * @throws Exception
+ */
+ public void start() throws Exception {
+ startTime = System.currentTimeMillis();
+
+ if (volumeWalker.hasNext()) {
+ fillOSDs();
+ }
+ while (currentFiles.size() > 0 || volumeWalker.hasNext()) {
+ fillOSDs();
+ }
+ logger.closeFileWriter();
+ System.out.println("Done. Total time: " + (System.currentTimeMillis() - startTime) / 1000
+ + " secs.");
+ }
+
+ /**
+ * Called by Main thread. Prints the total number of files/bytes read and
+ * the speed in KB/s. For each osd: prints the average connection speed in
+ * KB/s and the number of idle connections.
+ */
+ private void printStatus() {
+ long currentStatusPrint = System.currentTimeMillis();
+ try {
+ long bytes = 0;
+ String msg = "";
+ String osdDetails = "OSDs: ";
+
+ for (OSDWorkQueue osd : osds.values()) {
+
+ long osdBytes = osd.getTransferredBytes();
+
+ Long lastOSDBytes = osdBytesMap.get(osd);
+ if (lastOSDBytes == null)
+ lastOSDBytes = 0L;
+
+ osdDetails += osd.getOSDId()
+ + ": "
+ + OutputUtils.formatBytes((osdBytes - lastOSDBytes) * 1000
+ / (currentStatusPrint - lastStatusPrint)) + "/s, "
+ + osd.getNumberOfIdleConnections() + " idle; ";
+ bytes += osdBytes;
+ osdBytesMap.put(osd, osdBytes);
+ }
+
+ msg += "#files scrubbed: "
+ + filesRead
+ + " ("
+ + OutputUtils.formatBytes(bytes)
+ + "), avrg. throughput: "
+ + OutputUtils.formatBytes((bytes - lastBytes) * 1000
+ / (currentStatusPrint - lastStatusPrint)) + "/s, ";
+
+ System.out.println(msg + osdDetails + "\u001b[100D\u001b[A");
+
+ lastStatusPrint = currentStatusPrint;
+ lastBytes = bytes;
+
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+
+ /**
+ * Called by Main thread. Retrieves the next path from the VolumeWalker and
+ * creates a RandomAccessFile of the file specified by the path. If no work
+ * queue exists for an osd on which the file is stored a new queue is
+ * created. The file is added to the list currentFiles.
+ *
+ * @throws Exception
+ * thrown by hasNext
+ */
+ void addNextFileToCurrentFiles() throws Exception {
+ if (volumeWalker.hasNext()) {
+ String path = volumeWalker.removeNextFile();
+ try {
+ RandomAccessFile file = new RandomAccessFile("r", mrcAddress, path, speedy,
+ authString);
+ for (ServiceUUID osdId : file.getOSDs()) {
+ // add new OSD to the scrubbing process
+ if (!osds.containsKey(osdId)) {
+ System.out.println("Adding OSD: " + osdId);
+ osds.put(osdId, new OSDWorkQueue(osdId, connectionsPerOSD,sslOptions));
+ }
+ }
+ currentFiles.add(new ScrubbedFile(this, file));
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+ }
+
+ /**
+ * Called by Main thread. Fills the osd work queues. If currentFiles does
+ * not contain enough request for an osd new files are added to
+ * currentFiles.
+ *
+ * @throws Exception
+ * thrown by addNextFileToCurrentFiles
+ */
+ void fillOSDs() throws Exception {
+ if (System.currentTimeMillis() - lastStatusPrint > 1000)
+ printStatus();
+ if (osds.isEmpty()) {
+ addNextFileToCurrentFiles();
+ }
+ try {
+ for (OSDWorkQueue osd : osds.values()) {
+ fillQueue(osd);
+ if (osd.getNumberOfIdleConnections() > 0) {
+ for (int i = 0; i < 10; i++) {
+ if (volumeWalker.hasNext()) {
+ addNextFileToCurrentFiles();
+ } else
+ break;
+ }
+ fillQueue(osd);
+ }
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+
+ /**
+ * Called by Main thread. Fills the osd work queue specified by the
+ * parameter osd.
+ *
+ * @param osd
+ */
+ void fillQueue(OSDWorkQueue osd) {
+ synchronized (currentFiles) {
+ for (ScrubbedFile file : currentFiles) {
+ while (true) { // get all possible reads for this osd
+ int objectNo = file.getRequestForOSD(osd);
+ if (objectNo == -1) {// no objects for this file
+ break;
+ }
+ if (!osd.readObjectAsync(file, objectNo))
+ return; // OSD has currently no idle connections,
+ // proceed with next OSD
+ }
+ }
+ }
+ }
+
+ /**
+ * @TODO setXattr last scrubber check setXattr(file,
+ * "xtreemfs-scrubber-lastcheck", now()) Called by MultiSpeedy or Main
+ * thread. Invoked when all objects of file has been successfully
+ * read. If result differs from the expected file size the
+ * inconsistency is logged and if updateFileSize is set to true, the
+ * filesize is updated. The file is removed from the list
+ * currentFiles.
+ * @param file
+ * @param result
+ * the number of bytes that has been read (the file size).
+ */
+ void fileFinished(ScrubbedFile file, long result, boolean isUnreadable) {
+ // fileFinished can be called multiple times for a file when there are
+ // outstanding requests. Cannot use remove(file) here, since it could
+ // result in scrubber.shutdown() (when currentFiles.isEmpty) being
+ // called before all updates are finished.
+ boolean firstCall = currentFiles.contains(file);
+
+ if (!firstCall) // do not output messages twice
+ return;
+
+ filesRead++;
+
+ if (isUnreadable) {
+ returnCode.set(2);
+ logger.logError(file.getPath() + ": could not read from OSD, skipping file.");
+ } else if (!(result == file.getExpectedFileSize())) {
+ returnCode.compareAndSet(0, 1);
+ if (updateFileSize == true) {
+ try {
+ updateFileSize(file.getPath(), result);
+ logger.logError(file.getPath()
+ + ": file size in MRC is outdated, updated from "
+ + file.getExpectedFileSize() + " to " + result);
+ } catch (Exception e) {
+ e.printStackTrace();
+ logger.logError(file.getPath() + ": Exception "
+ + "thrown while attempting to update file size");
+ }
+ } else {
+ logger.logError(file.getPath() + " file size in MRC is outdated, was: "
+ + file.getExpectedFileSize() + ", found: " + result);
+ }
+ }
+
+ try {
+ setLastScrubAttr(file.getPath());
+ } catch (Exception e) {
+ e.printStackTrace();
+ logger.logError(file.getPath() + ": Exception "
+ + "thrown while attempting set lastScrub attribute");
+ }
+ // must be invoked after the updates have been made, because it sync.
+ // with the main thread.
+ currentFiles.remove(file);
+ }
+
+ /**
+ *
+ * Called by MultiSpeedy or Main thread. Invoked when an object of file had
+ * invalid checksum.
+ *
+ * @param file
+ */
+
+ void foundInvalidChecksum(ScrubbedFile file, int objectNo) {
+ returnCode.set(2);
+ logger.logError(file.getPath() + ": object no. " + objectNo + " has invalid checksum.");
+ }
+
+ /**
+ * Called by Multispeedy or Main thread. Updates the file size of the file
+ * specified by path to newFileSize
+ *
+ * @param path
+ * @param newFileSize
+ * @throws Exception
+ */
+ public void updateFileSize(String path, long newFileSize) throws Exception {
+ Map open = mrcClient.open(mrcAddress, path, "t", authString);
+ String xcap = open.get(HTTPHeaders.HDR_XCAPABILITY);
+ Capability capability = new Capability(xcap);
+ String newFileSizeHeader = "[" + newFileSize + "," + capability.getEpochNo() + "]";
+ mrcClient.updateFileSize(mrcAddress, xcap, newFileSizeHeader, authString);
+ }
+
+ void setLastScrubAttr(String path) throws Exception {
+ Map newXAttr = new HashMap();
+ long time = System.currentTimeMillis();
+ newXAttr.put(latestScrubAttr, String.valueOf(time));
+ mrcClient.setXAttrs(mrcAddress, path, newXAttr, authString);
+ volumeWalker.fileOrDirScrubbed(path, time);
+ }
+
+ public void enableLogfile(String filename) {
+ logger = new Logger(filename);
+ }
+
+ public int getReturnCode() {
+ return returnCode.get();
+ }
+
+ public static void main(String[] args) throws Exception {
+
+ Logging.start(Logging.LEVEL_WARN);
+
+ Map options = new HashMap();
+ List arguments = new ArrayList(1);
+ options.put("h", new CliOption(CliOption.OPTIONTYPE.SWITCH));
+ options.put("dir", new CliOption(CliOption.OPTIONTYPE.URL));
+ options.put("chk", new CliOption(CliOption.OPTIONTYPE.SWITCH));
+ options.put("cons", new CliOption(CliOption.OPTIONTYPE.NUMBER));
+ options.put("files", new CliOption(CliOption.OPTIONTYPE.NUMBER));
+ options.put("c", new CliOption(CliOption.OPTIONTYPE.STRING));
+ options.put("cp", new CliOption(CliOption.OPTIONTYPE.STRING));
+ options.put("t", new CliOption(CliOption.OPTIONTYPE.STRING));
+ options.put("tp", new CliOption(CliOption.OPTIONTYPE.STRING));
+ options.put("h", new CliOption(CliOption.OPTIONTYPE.SWITCH));
+
+ CLIParser.parseCLI(args, options, arguments);
+
+ if (arguments.size() != 1 || options.get("h").switchValue != null) {
+ usage();
+ return;
+ }
+
+ InetSocketAddress dirAddr = null;
+ boolean useSSL = false;
+ String serviceCredsFile = null;
+ String serviceCredsPass = null;
+ String trustedCAsFile = null;
+ String trustedCAsPass = null;
+
+ URL dirURL = options.get("dir").urlValue;
+
+ // parse security info if protocol is 'https'
+ if (dirURL != null && "https".equals(dirURL.getProtocol())) {
+ useSSL = true;
+ serviceCredsFile = options.get("c").stringValue;
+ serviceCredsPass = options.get("cp").stringValue;
+ trustedCAsFile = options.get("t").stringValue;
+ trustedCAsPass = options.get("tp").stringValue;
+ }
+
+ // read default settings
+ if (dirURL == null) {
+
+ DefaultDirConfig cfg = new DefaultDirConfig(DEFAULT_DIR_CONFIG);
+ cfg.read();
+
+ dirAddr = cfg.getDirectoryService();
+ useSSL = cfg.isSslEnabled();
+ serviceCredsFile = cfg.getServiceCredsFile();
+ serviceCredsPass = cfg.getServiceCredsPassphrase();
+ trustedCAsFile = cfg.getTrustedCertsFile();
+ trustedCAsPass = cfg.getTrustedCertsPassphrase();
+ } else
+ dirAddr = new InetSocketAddress(dirURL.getHost(), dirURL.getPort());
+
+ boolean checkOnly = options.get("chk").switchValue != null;
+
+ int noConnectionsPerOSD = DEFAULT_NUM_CONS;
+ if (options.get("cons").numValue != null)
+ noConnectionsPerOSD = options.get("cons").numValue.intValue();
+
+ int noFilesToFetch = DEFAULT_NUM_FILES;
+ if (options.get("files").numValue != null)
+ noFilesToFetch = options.get("files").numValue.intValue();
+
+ String volume = arguments.get(0);
+ boolean isVolUUID = false;
+ if (volume.startsWith("uuid:")) {
+ volume = volume.substring("uuid:".length());
+ isVolUUID = true;
+ }
+
+ SSLOptions sslOptions = useSSL ? new SSLOptions(serviceCredsFile, serviceCredsPass,
+ SSLOptions.PKCS12_CONTAINER,
+ trustedCAsFile, trustedCAsPass, SSLOptions.JKS_CONTAINER, false) : null;
+
+ // resolve volume MRC
+ Map query = RPCClient.generateMap(isVolUUID ? "uuid" : "name", volume);
+ DIRClient dirClient = new DIRClient(dirAddr, sslOptions, RPCClient.DEFAULT_TIMEOUT);
+ TimeSync.initialize(dirClient, 100000, 50, authString);
+
+ RPCResponse>> resp = dirClient.getEntities(query, RPCClient
+ .generateStringList("mrc", "name"), authString);
+ Map> result = resp.get();
+ resp.freeBuffers();
+
+
+ if (result.isEmpty()) {
+ System.err.println("volume '" + arguments.get(0)
+ + "' could not be found at Directory Service '" + dirURL + "'");
+ System.exit(3);
+ }
+ Map volMap = result.values().iterator().next();
+ String mrc = (String) volMap.get("mrc");
+ volume = (String) volMap.get("name");
+
+ UUIDResolver.start(dirClient, 60*60, 10*60*60);
+
+ ServiceUUID mrcUUID = new ServiceUUID(mrc);
+ InetSocketAddress mrcAddress = mrcUUID.getAddress();
+
+ try {
+
+ MultiSpeedy speedy = new MultiSpeedy(sslOptions);
+ speedy.start();
+ AsyncScrubber scrubber = new AsyncScrubber(speedy, dirAddr, mrcAddress, volume,
+ !checkOnly, noConnectionsPerOSD, noFilesToFetch,sslOptions);
+
+ scrubber.start();
+ scrubber.shutdown();
+ System.exit(scrubber.getReturnCode());
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+
+ TimeSync.close();
+ UUIDResolver.shutdown();
+ dirClient.shutdown();
+
+ }
+
+ private static void usage() {
+ System.out.println("usage: xtfs_scrub [options] | uuid:");
+ System.out.println(" -dir uri directory service to use (e.g. 'http://localhost:32638')");
+ System.out
+ .println(" If no URI is specified, URI and security settings are taken from '"
+ + DEFAULT_DIR_CONFIG + "'");
+ System.out
+ .println(" In case of a secured URI ('https://...'), it is necessary to also specify SSL credentials:");
+ System.out
+ .println(" -c a PKCS#12 file containing user credentials");
+ System.out
+ .println(" -cp a pass phrase to decrypt the the user credentials file");
+ System.out
+ .println(" -t a PKCS#12 file containing a set of certificates from trusted CAs");
+ System.out
+ .println(" -tp a pass phrase to decrypt the trusted CAs file");
+ System.out
+ .println(" -chk check only (do not update file sizes on the MRC in case of inconsistencies)");
+ System.out.println(" -cons n number of connections per OSD (default=" + DEFAULT_NUM_CONS
+ + ")");
+ System.out.println(" -files n number of files to fetch at once from MRC (default="
+ + DEFAULT_NUM_FILES + ")");
+ System.out.println(" -h show usage info");
+ }
+}
diff --git a/servers/src/org/xtreemfs/common/clients/scrubber/FileState.java b/servers/src/org/xtreemfs/common/clients/scrubber/FileState.java
new file mode 100644
index 0000000000000000000000000000000000000000..99402b2dda0d5e8145ae6d0806ca79c5929bebd2
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/clients/scrubber/FileState.java
@@ -0,0 +1,117 @@
+package org.xtreemfs.common.clients.scrubber;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+
+
+public class FileState{
+
+ private enum ObjectState {TODO, READING, DONE;}
+
+ private List objectStates;
+ private int NoOfObjectsEstimate = Integer.MAX_VALUE;
+ private long stripeSize;
+ private long fileSize = -1;
+ private boolean fileDone = false;
+
+ /**
+ *
+ * @param stripeSize
+ * @param size - the file size stored in the meta data
+ */
+ public FileState(long stripeSize, int size) {
+ this.stripeSize = stripeSize;
+ objectStates = Collections.synchronizedList(
+ new ArrayList(size));
+ for(int i = 0; i < size; i++)
+ objectStates.add(ObjectState.TODO);
+ }
+
+ public boolean isFileDone() { return fileDone; }
+
+ /**
+ *
+ * @return returns the file size read if EOF has been read, otherwise -1 is returned.
+ */
+ public long getFileSize() {
+ if(fileDone && fileSize == -1)
+ fileSize = NoOfObjectsEstimate * stripeSize;
+ return fileSize;
+ }
+
+ /**
+ * Called by Multispeedy or Main thread.
+ * Changes the state of the object specified by the parameter objectNo to
+ * DONE. If the file is not marked as unreadable and EOF has been read and
+ * all objects are DONE, the file is marked as done.
+ */
+ public void incorporateReadResult(int objectNo, long bytesRead) {
+ assert objectStates.get(objectNo).equals(ObjectState.READING);
+ objectStates.set(objectNo, ObjectState.DONE);
+ if(bytesRead > 0) { // some data read
+ assert NoOfObjectsEstimate >= objectNo;
+ if(bytesRead != stripeSize) {
+ NoOfObjectsEstimate = objectNo;
+ fileSize = objectNo * stripeSize + bytesRead;
+ }
+ }
+ else { // read of object after after EOF
+ NoOfObjectsEstimate = Math.min(NoOfObjectsEstimate,objectNo);
+ }
+
+ // check if file is finished and update flag
+ if(NoOfObjectsEstimate != Integer.MAX_VALUE){
+ fileDone = true;
+ for(int i = 0; i <= NoOfObjectsEstimate; i++){
+ if(!objectStates.get(i).equals(ObjectState.DONE)){
+ fileDone = false;
+ break;
+ }
+ }
+ }
+ // if the object is the last object and the file is not
+ // done, the file was longer as expected, and another object
+ // is added to the object states.
+ else if((objectStates.size()-1 == objectNo)){
+ addObject();
+ }
+ }
+
+ /**
+ * Called by Multispeedy or Main thread.
+ * Sets the object state to READING
+ * @param objectNo
+ */
+ public void markObjectAsInFlight(int objectNo){
+ objectStates.set(objectNo, ObjectState.READING);
+ }
+
+ /**
+ *
+ * @param objectNo
+ * @return returns true if the object state of the object specified by
+ * objectNo is TODO, returns false otherwise.
+ */
+ public boolean isTodo(int objectNo) {
+ return objectStates.get(objectNo).equals(ObjectState.TODO);
+ }
+
+
+ private void addObject() {
+ objectStates.add(ObjectState.TODO);
+ }
+
+ public void setObjectState(int objectNo, ObjectState state){
+ objectStates.set(objectNo, state);
+ }
+
+ public List getObjectStates(){
+ return objectStates;
+ }
+
+ public int getNoOfObjectStates(){
+ return objectStates.size();
+ }
+}
diff --git a/servers/src/org/xtreemfs/common/clients/scrubber/Logger.java b/servers/src/org/xtreemfs/common/clients/scrubber/Logger.java
new file mode 100644
index 0000000000000000000000000000000000000000..a0512b4a86729030fd70b980b9b1e7d2b9bbe0f2
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/clients/scrubber/Logger.java
@@ -0,0 +1,50 @@
+package org.xtreemfs.common.clients.scrubber;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.Date;
+
+public class Logger extends PrintStream {
+
+ FileWriter writer;
+ public Logger(String logFileName) {
+ super(System.err);
+
+ if(logFileName != null) {
+ File logFile = new File(logFileName);
+ Date date = new Date();
+ try {
+ writer = new FileWriter(logFile);
+ writer.write("Date: " + date.toString() + "\n");
+ } catch (IOException e) {
+ System.err.println("Could not create log file.");
+ e.printStackTrace();
+ }
+ }
+ }
+
+ public void logError(String message) {
+ super.println(message);
+
+ try {
+ if(writer != null)
+ writer.write(message + "\n");
+ } catch (IOException e) {
+ System.err.println("Could not write to log!");
+ e.printStackTrace();
+ }
+ }
+
+ public void closeFileWriter(){
+ try {
+ if(writer != null)
+ writer.close();
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+}
diff --git a/servers/src/org/xtreemfs/common/clients/scrubber/OSDWorkQueue.java b/servers/src/org/xtreemfs/common/clients/scrubber/OSDWorkQueue.java
new file mode 100644
index 0000000000000000000000000000000000000000..5eaca3af3d669bdffb0ad4061caf92699a46c22a
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/clients/scrubber/OSDWorkQueue.java
@@ -0,0 +1,148 @@
+package org.xtreemfs.common.clients.scrubber;
+
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.xtreemfs.common.TimeSync;
+import org.xtreemfs.common.buffer.ReusableBuffer;
+import org.xtreemfs.common.clients.RPCResponse;
+import org.xtreemfs.common.clients.RPCResponseListener;
+import org.xtreemfs.common.clients.osd.OSDClient;
+import org.xtreemfs.common.uuids.ServiceUUID;
+import org.xtreemfs.foundation.pinky.HTTPHeaders;
+import org.xtreemfs.foundation.pinky.SSLOptions;
+import org.xtreemfs.foundation.speedy.MultiSpeedy;
+
+/**
+ * The work queue for an OSD.
+ *
+ * Asynchronously sends off checkObject requests with one of its MultiSpeedies.
+ * Dispatches "finished" callback to respective file.
+ */
+public class OSDWorkQueue implements RPCResponseListener {
+
+ private AtomicLong transferredBytes = new AtomicLong();
+
+ private final ServiceUUID id;
+
+ private MultiSpeedy[] connections;
+
+ private OSDClient[] clients;
+
+ private boolean[] isIdle;
+
+ private AtomicInteger noOfIdleConnections;
+
+ private int noOfConnections;
+
+ public OSDWorkQueue(ServiceUUID id, int noConnections, SSLOptions ssl) throws IOException {
+ this.id = id;
+ connections = new MultiSpeedy[noConnections];
+ clients = new OSDClient[noConnections];
+ isIdle = new boolean[noConnections];
+ noOfIdleConnections = new AtomicInteger(0);
+ this.noOfConnections = noConnections;
+
+ for (int i = 0; i < noConnections; i++) {
+ MultiSpeedy speedy = null;
+ if (ssl == null)
+ speedy = new MultiSpeedy();
+ else
+ speedy = new MultiSpeedy(ssl);
+ speedy.start();
+ connections[i] = speedy;
+ clients[i] = new OSDClient(speedy);
+ isIdle[i] = true;
+ }
+ noOfIdleConnections.set(noConnections);
+ }
+
+ ServiceUUID getOSDId() {
+ return id;
+ }
+
+ int getNumberOfIdleConnections() {
+ return noOfIdleConnections.get();
+ }
+
+ int getTotalNumberOfConnections() {
+ return noOfConnections;
+ }
+
+ /**
+ * @return the total amount of bytes transferred by the OSD
+ */
+ public long getTransferredBytes() {
+ return transferredBytes.get();
+ }
+
+ /**
+ * Called by the Main thread Reads an object given by the parameters file
+ * and objectNo asynchronously, if the file not marked as unreadable and an
+ * idle connection exists.
+ *
+ * @return returns false if there is no idle connection.
+ */
+ public boolean readObjectAsync(ScrubbedFile file, int objectNo) {
+ for (int i = 0; i < clients.length; i++) {
+ if (isIdle[i]) {
+ // submitrequest
+ isIdle[i] = false;
+ noOfIdleConnections.decrementAndGet();
+ file.markObjectAsInFlight(objectNo);
+ file.readObjectAsync(clients[i], this, new ReadObjectContext(i, file, objectNo,
+ TimeSync.getLocalSystemTime()), objectNo);
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Called by MultiSpeedy or Main thread.
+ *
+ */
+ public void responseAvailable(RPCResponse response) {
+ ReadObjectContext context = (ReadObjectContext) response.getAttachment();
+
+ // unsynchronized access to shared variable!
+ // ok here as only connection might be unused for another round
+ isIdle[context.connectionNo] = true;
+ noOfIdleConnections.incrementAndGet(); // atomic!
+ try {
+ if (response.getStatusCode() == 200) {// no error occurred
+ ReusableBuffer data = response.getBody();
+ if (data != null) {// read was successful
+ data.flip();
+ String tmp = new String(data.array());
+ long bytesInObject = Long.valueOf(tmp);
+
+ transferredBytes.addAndGet(bytesInObject);
+ context.file.objectHasBeenRead(bytesInObject, context.objectNo);
+ } else
+ context.file.objectHasBeenRead(0, context.objectNo);
+
+ String header = response.getHeaders().getHeader(HTTPHeaders.HDR_XINVALIDCHECKSUM);
+ if (header != null && header.equalsIgnoreCase("true"))
+ context.file.objectHasInvalidChecksum(context.objectNo);
+ // throw new IOException("object " + context.objectNo +
+ // " has an invalid checksum");
+ // TODO: dont throw, but call method as in objectHasNotBeenRead
+ } else {
+ context.file.couldNotReadObject(context.objectNo);
+ }
+
+ } catch (Exception e) {
+ context.file.couldNotReadObject(context.objectNo);
+ } finally {
+ response.freeBuffers();
+ }
+ }
+
+ void shutDown() {
+ for (int i = 0; i < connections.length; i++) {
+ connections[i].shutdown();
+ }
+ }
+}
\ No newline at end of file
diff --git a/servers/src/org/xtreemfs/common/clients/scrubber/ReadObjectContext.java b/servers/src/org/xtreemfs/common/clients/scrubber/ReadObjectContext.java
new file mode 100644
index 0000000000000000000000000000000000000000..9a4aaf4564af8cde1060c7212e7809f727a0db94
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/clients/scrubber/ReadObjectContext.java
@@ -0,0 +1,20 @@
+package org.xtreemfs.common.clients.scrubber;
+
+/**
+ * Holds information to identify an object and the connection used for
+ * the read request after receiving a response from the osd.
+ */
+public class ReadObjectContext {
+ public long readStart;
+ public int connectionNo;
+ public int objectNo;
+ public ScrubbedFile file;
+
+ ReadObjectContext(int connectionNo, ScrubbedFile file, int objectNo,
+ long readStart) {
+ this.readStart = readStart;
+ this.connectionNo = connectionNo;
+ this.file = file;
+ this.objectNo = objectNo;
+ }
+}
\ No newline at end of file
diff --git a/servers/src/org/xtreemfs/common/clients/scrubber/ScrubbedFile.java b/servers/src/org/xtreemfs/common/clients/scrubber/ScrubbedFile.java
new file mode 100644
index 0000000000000000000000000000000000000000..2e2044ae8ce7145c9047cad821a4ac984a3c9f75
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/clients/scrubber/ScrubbedFile.java
@@ -0,0 +1,125 @@
+package org.xtreemfs.common.clients.scrubber;
+
+import java.net.InetSocketAddress;
+import org.xtreemfs.common.clients.RPCResponse;
+import org.xtreemfs.common.clients.RPCResponseListener;
+import org.xtreemfs.common.clients.io.RandomAccessFile;
+import org.xtreemfs.common.clients.osd.OSDClient;
+import org.xtreemfs.common.uuids.ServiceUUID;
+
+/**
+ *
+ *
+ */
+public class ScrubbedFile {
+ private AsyncScrubber scrubber;
+ private RandomAccessFile file;
+ private long expectedFileSize;
+ private FileState fileState;
+ private boolean stopIssuingRequests = false;
+
+ public ScrubbedFile(AsyncScrubber scrubber, RandomAccessFile file)
+ throws Exception {
+ this.scrubber = scrubber;
+ this.file = file;
+ this.expectedFileSize = file.length();
+ int expectedNoOfObject =
+ (int) (expectedFileSize / file.getStripeSize()) + 1;
+ fileState = new FileState(file.getStripeSize(), expectedNoOfObject);
+ }
+
+ /**
+ * Called by Main thread.
+ * @return returns the next object stored on the osd specified by the
+ * parameter osd which has not been read or -1 if the file is marked as
+ * unreadable or has no unread objects stored on the osd.
+ */
+ public int getRequestForOSD(OSDWorkQueue osd) {
+ // check if osd is in StripingPolicy list of osds for this file
+ if(stopIssuingRequests || !file.getOSDs().contains(osd.getOSDId()))
+ return -1;
+
+ // find next object which has not been read for the osd
+ for(int i = 0; i < fileState.getNoOfObjectStates(); i++){
+ if (fileState.isTodo(i) && file.getOSDId(i).equals(osd.getOSDId())){
+ return i;
+ }
+ }
+ return -1;
+ }
+ /**
+ * Called by Multispeedy or Main thread.
+ * Sets the object state to READING
+ */
+ public void markObjectAsInFlight(int objectNo){
+ fileState.markObjectAsInFlight(objectNo);
+ }
+
+ /**
+ * Called by Multispeedy or Main thread.
+ * Is only invoked after successfully reading the object
+ * @param bytesInObject the number of bytes read
+ * @param objectNo the object which has been read
+ */
+ public void objectHasBeenRead(long bytesInObject, int objectNo) {
+ fileState.incorporateReadResult(objectNo, bytesInObject);
+ if(fileState.isFileDone())
+ scrubber.fileFinished(this, fileState.getFileSize(),false);
+ }
+ /**
+ * Called by Multispeedy or Main thread.
+ * Marks the file as unreadable and removes the file from the scrubbers
+ * currentFiles list.
+ * @param objectNo
+ */
+ public void couldNotReadObject(int objectNo) {
+ stopIssuingRequests = true;
+ scrubber.fileFinished(this, fileState.getFileSize(),true);
+ }
+
+ /***
+ *
+ * @param objectNo
+ */
+
+ public void objectHasInvalidChecksum(int objectNo) {
+ scrubber.foundInvalidChecksum(this,objectNo);
+ }
+
+
+ /**
+ * @TODO logging... ist es notwendig hier... wird es nicht in responseAvailable gemacht?
+ * Called by Main thread.
+ * Sends an checkObject request to the osd holding the object specified by objectNo.
+ * @param osdClient
+ * @param listener
+ * @param context
+ * @param objectNo
+ */
+ public RPCResponse readObjectAsync(OSDClient osdClient,
+ RPCResponseListener listener,
+ ReadObjectContext context,
+ int objectNo) {
+ RPCResponse response = null;
+ try {
+ ServiceUUID osd = file.getOSDId(objectNo);
+ InetSocketAddress current_osd_address = osd.getAddress();
+ response = osdClient.checkObject(current_osd_address, file.getLocations(),
+ file.getCapability(), file.getFileId(), objectNo);
+ response.setAttachment(context);
+ response.setResponseListener(listener);
+ }catch(Exception e){
+ e.printStackTrace();
+ // log "Exception thrown while attempting to read object no. ... of file ... "
+ }
+ return response;
+ }
+
+ public String getPath(){
+ return file.getPath();
+ }
+
+ public long getExpectedFileSize(){
+ return expectedFileSize;
+ }
+}
\ No newline at end of file
diff --git a/servers/src/org/xtreemfs/common/clients/scrubber/VolumeWalker.java b/servers/src/org/xtreemfs/common/clients/scrubber/VolumeWalker.java
new file mode 100644
index 0000000000000000000000000000000000000000..845447ba16dfdd33143d00b5d1d8c756d97d8145
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/clients/scrubber/VolumeWalker.java
@@ -0,0 +1,125 @@
+/**
+ *
+ */
+package org.xtreemfs.common.clients.scrubber;
+
+import java.net.InetSocketAddress;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.Map;
+
+import org.xtreemfs.common.clients.mrc.MRCClient;
+import org.xtreemfs.foundation.pinky.SSLOptions;
+
+public class VolumeWalker {
+
+ private InetSocketAddress mrcAddress;
+ private MRCClient mrcClient;
+ private String authString;
+ private LinkedList files;
+ private LinkedList dirs;
+ public HashMap elementsInDir;
+ private int noFilesToFetch;
+
+ public VolumeWalker(String volumeName, InetSocketAddress mrcAddress,
+ int noFilesToFetch, String authString, SSLOptions ssl) throws Exception{
+ this.mrcAddress = mrcAddress;
+ if (ssl != null) {
+ mrcClient = new MRCClient(MRCClient.DEFAULT_TIMEOUT, ssl);
+ } else {
+ mrcClient = new MRCClient();
+ }
+ this.noFilesToFetch = noFilesToFetch;
+ this.authString = authString;
+ dirs = new LinkedList();
+ dirs.add(volumeName);
+ files = new LinkedList();
+ elementsInDir = new HashMap();
+ }
+/**
+ * Adds files and directories from the volume to the lists files and dirs.
+ * The directories are traversed using depth first search.
+ * @throws Exception thrown by readDirAndStat
+ */
+ private void getMoreFiles() throws Exception {
+ while(!dirs.isEmpty() && files.size() < noFilesToFetch){
+ String dir = dirs.removeFirst();
+ Map> dirsAndFiles =
+ mrcClient.readDirAndStat(mrcAddress, dir, authString);
+ if(dirsAndFiles.isEmpty()){
+ long latestScrub = System.currentTimeMillis();
+ setLatestScrubOfDir(dir, latestScrub);
+ fileOrDirScrubbed(dir, latestScrub);
+ }
+ else
+ elementsInDir.put(dir, dirsAndFiles.size());
+ for(String path : dirsAndFiles.keySet()){
+ String type = dirsAndFiles.get(path).get("objType").toString();
+ //if file
+ if(type.equals("1")){
+ files.add(dir + "/" + path);
+ }
+ //if directory
+ if(type.equals("2")){
+ dirs.add(dir + "/" + path);
+ }
+ }
+ }
+ }
+
+ /**
+ *
+ * @return
+ * @throws Exception thrown by getMoreFiles.
+ */
+ public boolean hasNext() throws Exception {
+
+ if(!files.isEmpty())
+ return true;
+ else if(dirs.isEmpty())
+ return false;
+ else{
+ getMoreFiles();
+ return hasNext();
+ }
+ }
+
+ public String removeNextFile(){
+ return files.removeLast();
+ }
+
+ public void setLatestScrubOfDir(String path, long time) throws Exception {
+ Map newXAttr = new HashMap();
+ newXAttr.put(AsyncScrubber.latestScrubAttr, time);
+ mrcClient.setXAttrs(mrcAddress, path, newXAttr, authString);
+ }
+ /**
+ * @ TODO: currently sets a directories xattr to the largest time of its
+ * entries. should use the minimum though.
+ */
+ public void fileOrDirScrubbed(String path, long time) throws Exception {
+ String dir = getParentDir(path);
+ if(dir != null){
+ int noOfUnscrubbedElements = elementsInDir.get(dir)-1;
+ elementsInDir.put(dir, noOfUnscrubbedElements);
+ if(noOfUnscrubbedElements == 0){
+ setLatestScrubOfDir(dir, time);
+ fileOrDirScrubbed(dir, time);
+ }
+ }
+
+ }
+
+ public String getParentDir(String path) {
+ int lastIndex = path.lastIndexOf('/');
+ if(lastIndex != -1)
+ return path.substring(0, lastIndex);
+ else
+ return null;
+ }
+ public void shutdown() {
+ mrcClient.shutdown();
+ mrcClient.waitForShutdown();
+ }
+
+}
\ No newline at end of file
diff --git a/servers/src/org/xtreemfs/common/config/Config.java b/servers/src/org/xtreemfs/common/config/Config.java
new file mode 100644
index 0000000000000000000000000000000000000000..57e95b57b721f52008971a470a2b001715de8086
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/config/Config.java
@@ -0,0 +1,118 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+*/
+/*
+ * AUTHORS: Christian Lorenz (ZIB), Björn Kolbeck (ZIB), Jan Stender (ZIB)
+ */
+
+package org.xtreemfs.common.config;
+
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.UnknownHostException;
+import java.util.Properties;
+
+/**
+ *
+ * @author bjko
+ */
+abstract public class Config {
+
+ protected final Properties props;
+
+ public Config() {
+ props = new Properties();
+ }
+
+ public Config(Properties prop) {
+ this.props = new Properties(prop);
+ }
+
+ /** Creates a new instance of Config */
+ public Config(String filename) throws IOException {
+ props = new Properties();
+ props.load(new FileInputStream(filename));
+ }
+
+ protected int readRequiredInt(String paramName) {
+ String tmp = props.getProperty(paramName);
+ if (tmp == null)
+ throw new RuntimeException("property '" + paramName
+ + "' is required but was not found");
+ try {
+ return Integer.parseInt(tmp.trim());
+ } catch (NumberFormatException ex) {
+ throw new RuntimeException("property '" + paramName
+ + "' is an integer but '" + tmp + "' is not a valid number");
+ }
+ }
+
+ protected String readRequiredString(String paramName) {
+ String tmp = props.getProperty(paramName);
+ if (tmp == null)
+ throw new RuntimeException("property '" + paramName
+ + "' is required but was not found");
+ return tmp.trim();
+ }
+
+ protected InetSocketAddress readRequiredInetAddr(String hostParam,
+ String portParam) {
+ String host = readRequiredString(hostParam);
+ int port = readRequiredInt(portParam);
+ InetSocketAddress isa = new InetSocketAddress(host, port);
+ return isa;
+ }
+
+ protected boolean readRequiredBoolean(String paramName) {
+ String tmp = props.getProperty(paramName);
+ if (tmp == null)
+ throw new RuntimeException("property '" + paramName
+ + "' is required but was not found");
+ return Boolean.parseBoolean(tmp.trim());
+ }
+
+ protected boolean readOptionalBoolean(String paramName, boolean defaultValue) {
+ String tmp = props.getProperty(paramName);
+ if (tmp == null)
+ return defaultValue;
+ else
+ return Boolean.parseBoolean(tmp.trim());
+ }
+
+ protected InetAddress readOptionalInetAddr(String paramName,
+ InetAddress defaultValue) throws UnknownHostException {
+ String tmp = props.getProperty(paramName);
+ if (tmp == null)
+ return defaultValue;
+ else
+ return InetAddress.getByName(tmp);
+ }
+
+ protected String readOptionalString(String paramName, String defaultValue) {
+ return props.getProperty(paramName, defaultValue);
+ }
+
+ public Properties getProps() {
+ return props;
+ }
+
+}
diff --git a/servers/src/org/xtreemfs/common/config/ServiceConfig.java b/servers/src/org/xtreemfs/common/config/ServiceConfig.java
new file mode 100644
index 0000000000000000000000000000000000000000..1828ef4b28e4e20af05a3592a61c68fa0eb9a89e
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/config/ServiceConfig.java
@@ -0,0 +1,143 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+*/
+/*
+ * AUTHORS: Christian Lorenz (ZIB), Björn Kolbeck (ZIB), Jan Stender (ZIB)
+ */
+
+package org.xtreemfs.common.config;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.util.Properties;
+
+public class ServiceConfig extends Config {
+
+ protected int debugLevel;
+
+ protected int port;
+
+ protected InetAddress address;
+
+ protected boolean useSSL;
+
+ protected String serviceCredsFile;
+
+ protected String serviceCredsPassphrase;
+
+ protected String serviceCredsContainer;
+
+ protected String trustedCertsFile;
+
+ protected String trustedCertsPassphrase;
+
+ protected String trustedCertsContainer;
+
+ private String geoCoordinates;
+
+ public ServiceConfig() {
+ super();
+ }
+
+ public ServiceConfig(Properties prop) {
+ super(prop);
+ }
+
+ public ServiceConfig(String filename) throws IOException {
+ super(filename);
+ }
+
+ public void read() throws IOException {
+
+ this.debugLevel = this.readRequiredInt("debug_level");
+
+ this.port = this.readRequiredInt("listen.port");
+
+ this.address = this.readOptionalInetAddr("listen.address", null);
+
+ if(this.useSSL = this.readRequiredBoolean("ssl.enabled")){
+ this.serviceCredsFile = this.readRequiredString("ssl.service_creds");
+
+ this.serviceCredsPassphrase = this.readRequiredString("ssl.service_creds.pw");
+
+ this.serviceCredsContainer = this.readRequiredString("ssl.service_creds.container");
+
+ this.trustedCertsFile = this.readRequiredString("ssl.trusted_certs");
+
+ this.trustedCertsPassphrase = this.readRequiredString("ssl.trusted_certs.pw");
+
+ this.trustedCertsContainer = this.readRequiredString("ssl.trusted_certs.container");
+ }
+
+ this.geoCoordinates = this.readOptionalString("geographic_coordinates", "");
+
+
+
+ }
+
+ public int getDebugLevel() {
+ return this.debugLevel;
+ }
+
+ public int getPort() {
+ return this.port;
+ }
+
+ public InetAddress getAddress() {
+ return this.address;
+ }
+
+ public boolean isUsingSSL() {
+ return this.useSSL;
+ }
+
+ public String getServiceCredsContainer() {
+ return this.serviceCredsContainer;
+ }
+
+ public String getServiceCredsFile() {
+ return this.serviceCredsFile;
+ }
+
+ public String getServiceCredsPassphrase() {
+ return this.serviceCredsPassphrase;
+ }
+
+ public String getTrustedCertsContainer() {
+ return this.trustedCertsContainer;
+ }
+
+ public String getTrustedCertsFile() {
+ return this.trustedCertsFile;
+ }
+
+ public String getTrustedCertsPassphrase() {
+ return this.trustedCertsPassphrase;
+ }
+
+ public String getGeoCoordinates() {
+ return geoCoordinates;
+ }
+
+ public void setGeoCoordinates(String geoCoordinates) {
+ this.geoCoordinates = geoCoordinates;
+ }
+
+}
diff --git a/servers/src/org/xtreemfs/common/logging/Logging.java b/servers/src/org/xtreemfs/common/logging/Logging.java
new file mode 100644
index 0000000000000000000000000000000000000000..5fe78ee406070ca7b124bb0c086079f8d9f21355
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/logging/Logging.java
@@ -0,0 +1,191 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Björn Kolbeck (ZIB)
+ */
+
+package org.xtreemfs.common.logging;
+
+/**
+ *
+ * @author bjko
+ */
+public class Logging {
+
+ protected static final char ABBREV_LEVEL_INFO = 'I';
+
+ protected static final char ABBREV_LEVEL_DEBUG = 'D';
+
+ protected static final char ABBREV_LEVEL_WARN = 'W';
+
+ protected static final char ABBREV_LEVEL_ERROR = 'E';
+
+ protected static final char ABBREV_LEVEL_TRACE = 'T';
+
+ public static final int LEVEL_ERROR = 0;
+
+ public static final int LEVEL_WARN = 1;
+
+ public static final int LEVEL_INFO = 2;
+
+ public static final int LEVEL_DEBUG = 3;
+
+ public static final int LEVEL_TRACE = 10;
+
+ public static final String FORMAT_PATTERN = "[ %c | %-20s | %-15s | %3d | %9s] %s";
+
+ protected static Logging instance;
+
+ protected static boolean tracingEnabled = false;
+
+ private final int level;
+
+ private long startTime;
+
+ /**
+ * Creates a new instance of Logging
+ */
+ private Logging(int level) {
+
+ if (level < 0)
+ this.level = 0;
+ else
+ this.level = level;
+
+ instance = this;
+
+ if (this.level >= LEVEL_TRACE)
+ tracingEnabled = true;
+
+ startTime = System.currentTimeMillis();
+ }
+
+ public static void logMessage(int level, Object me, String msg) {
+ if (level <= instance.level) {
+ char levelName = getLevelName(level);
+ if (me == null) {
+ System.out.println(String.format(FORMAT_PATTERN, levelName, "-", Thread
+ .currentThread().getName(), Thread.currentThread().getId(), getTimeStamp(),
+ msg));
+ } else {
+ System.out.println(String.format(FORMAT_PATTERN, levelName, me.getClass()
+ .getSimpleName(), Thread.currentThread().getName(), Thread.currentThread()
+ .getId(), getTimeStamp(), msg));
+ }
+ }
+ }
+
+ public static void logMessage(int level, Object me, Throwable msg) {
+ if (level <= instance.level) {
+ char levelName = getLevelName(level);
+ if (me == null) {
+ System.out.println(String.format(FORMAT_PATTERN, levelName, "-", Thread
+ .currentThread().getName(), Thread.currentThread().getId(), getTimeStamp(),
+ msg.toString()));
+ for (StackTraceElement elem : msg.getStackTrace()) {
+ System.out.println(" ... "
+ + elem.toString());
+ }
+ if (msg.getCause() != null) {
+ System.out.println(String.format(FORMAT_PATTERN, levelName, "-", Thread
+ .currentThread().getName(), Thread.currentThread().getId(),
+ getTimeStamp(), "root cause: " + msg.getCause()));
+ for (StackTraceElement elem : msg.getCause().getStackTrace()) {
+ System.out.println(" ... "
+ + elem.toString());
+ }
+ }
+ } else {
+ System.out.println(String.format(FORMAT_PATTERN, levelName, me.getClass()
+ .getSimpleName(), Thread.currentThread().getName(), Thread.currentThread()
+ .getId(), getTimeStamp(), msg));
+ for (StackTraceElement elem : msg.getStackTrace()) {
+ System.out.println(" ... "
+ + elem.toString());
+ }
+ if (msg.getCause() != null) {
+ System.out.println(String.format(FORMAT_PATTERN, levelName, me.getClass(),
+ Thread.currentThread().getName(), Thread.currentThread().getId(),
+ getTimeStamp(), "root cause: " + msg.getCause()));
+ for (StackTraceElement elem : msg.getCause().getStackTrace()) {
+ System.out.println(" ... "
+ + elem.toString());
+ }
+ }
+ }
+ }
+ }
+
+ public static char getLevelName(int level) {
+ switch (level) {
+ case LEVEL_ERROR:
+ return ABBREV_LEVEL_ERROR;
+ case LEVEL_INFO:
+ return ABBREV_LEVEL_INFO;
+ case LEVEL_WARN:
+ return ABBREV_LEVEL_WARN;
+ case LEVEL_DEBUG:
+ return ABBREV_LEVEL_DEBUG;
+ case LEVEL_TRACE:
+ return ABBREV_LEVEL_TRACE;
+ default:
+ return '?';
+ }
+ }
+
+ public synchronized static void start(int level) {
+ if (instance == null) {
+ instance = new Logging(level);
+ }
+ }
+
+ /*public static void setLevel(int level) {
+ if (instance != null)
+ instance.level = level;
+ }*/
+
+ public static boolean isDebug() {
+ if (instance == null)
+ return false;
+ else
+ return instance.level >= LEVEL_DEBUG;
+ }
+
+ public static boolean isInfo() {
+ if (instance == null)
+ return false;
+ else
+ return instance.level >= LEVEL_INFO;
+ }
+
+ public static boolean tracingEnabled() {
+ return tracingEnabled;
+ }
+
+ private static String getTimeStamp() {
+ long seconds = (System.currentTimeMillis() - instance.startTime) / 1000;
+ long hours = seconds / 3600;
+ long mins = (seconds % 3600) / 60;
+ long secs = seconds % 60;
+ return hours + ":" + (mins < 10 ? "0" : "") + mins + ":" + (secs < 10 ? "0" : "") + secs;
+ }
+
+}
diff --git a/servers/src/org/xtreemfs/common/logging/Utils.java b/servers/src/org/xtreemfs/common/logging/Utils.java
new file mode 100644
index 0000000000000000000000000000000000000000..84084553a9703ce5ad2104f0c59dd19c26b1c6df
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/logging/Utils.java
@@ -0,0 +1,55 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+*/
+/*
+ * AUTHORS: Björn Kolbeck (ZIB)
+ */
+
+package org.xtreemfs.common.logging;
+
+/**
+ *
+ * @author bjko
+ */
+public class Utils {
+
+ public static final char LEVEL_INFO = 'I';
+ public static final char LEVEL_DEBUG = 'D';
+ public static final char LEVEL_WARN = 'W';
+ public static final char LEVEL_ERROR = 'E';
+
+
+ public static void logMessage(char level, Object me, String msg) {
+ if (me == null) {
+ System.out.println(String.format("[ %c | %-20s | %3d ] %s",
+ level,"?",Thread.currentThread().getId(),
+ msg));
+ } else {
+ System.out.println(String.format("[ %c | %-20s | %3d ] %s",
+ level,me.getClass().getSimpleName(),Thread.currentThread().getId(),
+ msg));
+ }
+ }
+
+ /** Creates a new instance of Utils */
+ public Utils() {
+ }
+
+}
diff --git a/servers/src/org/xtreemfs/common/striping/Location.java b/servers/src/org/xtreemfs/common/striping/Location.java
new file mode 100644
index 0000000000000000000000000000000000000000..a0627c03d99f16766645b1724bdfb65716869588
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/striping/Location.java
@@ -0,0 +1,305 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin and
+ Barcelona Supercomputing Center - Centro Nacional de Supercomputacion.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Christian Lorenz (ZIB), Jan Stender (ZIB), Björn Kolbeck (ZIB), Jesús Malo (BSC)
+ */
+
+package org.xtreemfs.common.striping;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import org.xtreemfs.common.uuids.ServiceUUID;
+import org.xtreemfs.common.uuids.UnknownUUIDException;
+import org.xtreemfs.foundation.json.JSONException;
+import org.xtreemfs.foundation.json.JSONParser;
+import org.xtreemfs.foundation.json.JSONString;
+
+/**
+ * It models the locations of one replica. Every instance of this class will be
+ * an object oriented representation of one replica
+ *
+ * @author clorenz
+ */
+public class Location {
+ /**
+ * update policies
+ */
+ public static final String REPLICA_UPDATE_SYNC = "sync";
+ public static final String REPLICA_UPDATE_ONDEMAND = "lazy";
+
+ /**
+ * used update policy for this replica
+ */
+ private String replicaUpdatePolicy;
+
+ /**
+ * striping policy which is used for this replica
+ */
+ private StripingPolicy policy;
+ /**
+ * involved osds
+ */
+ private List osdList;
+
+ /**
+ * It creates a new instance of Location
+ *
+ * @param sp
+ * Striping policy of the replica
+ * @param osds
+ * OSDs containing the pieces of the same replica
+ */
+ public Location(StripingPolicy sp, List osds) {
+ if ((sp != null) && (osds != null)) {
+ if (sp.getWidth() == osds.size()) {
+ policy = sp;
+ osdList = osds;
+ } else
+ throw new IllegalArgumentException(
+ "The striping policy is for " + sp.getWidth()
+ + " OSDs but the list of OSDs contains "
+ + osds.size());
+ } else if (sp == null)
+ throw new IllegalArgumentException("The policy is null");
+ else
+ throw new IllegalArgumentException("The osdList is null");
+ }
+
+ /**
+ * It creates a new instance from a list containing the object
+ *
+ * @param listedObject
+ * The object contained in the general way (as the JSON parser
+ * gives us)
+ */
+ public Location(List listedObject) throws JSONException {
+ initLocation(listedObject);
+ }
+
+ /**
+ * Creates an instance of this class from a JSON representation
+ *
+ * @param plain
+ * JSON representation of an object of this class
+ */
+ public Location(JSONString plain) throws JSONException {
+ List parsed = (List) JSONParser.parseJSON(plain);
+
+ if (parsed == null)
+ throw new IllegalArgumentException(
+ "The location specification is null");
+ else {
+ initLocation(parsed);
+ }
+ }
+
+ /**
+ * Convenience method that initializes the Location
+ */
+ private void initLocation(List listedObject) throws JSONException {
+ if (listedObject.size() != 2)
+ throw new IllegalArgumentException("Incorrect list's length");
+
+ // It gets the striping policy
+ Map policyCandidate = (Map) listedObject
+ .get(0);
+ if (policyCandidate == null)
+ throw new IllegalArgumentException("The striping policy is null");
+
+ policy = StripingPolicy.readFromJSON(policyCandidate);
+
+ // It gets the OSD list
+ List osdListCandidate = (List) listedObject.get(1);
+
+ if (osdListCandidate == null)
+ throw new IllegalArgumentException("The list of replicas is null");
+ else if (osdListCandidate.size() != policy.getWidth())
+ throw new IllegalArgumentException(
+ "The number of replicas in the list is wrong");
+
+ osdList = new ArrayList(osdListCandidate.size());
+ for (String osdUUID : osdListCandidate) {
+ osdList.add(new ServiceUUID(osdUUID));
+ }
+ }
+
+ /**
+ * Provides the responsible OSD for this object.
+ *
+ * @param objectID
+ * @return
+ */
+ public ServiceUUID getOSDByObject(long objectID) {
+ return osdList.get(policy.getOSDByObject(objectID));
+ }
+
+ /**
+ * Provides the responsible OSD for this offset.
+ *
+ * @param objectID
+ * @return
+ */
+ public ServiceUUID getOSDByOffset(long offset) {
+ return osdList.get(policy.getOSDByOffset(offset));
+ }
+
+ /**
+ * Provides the responsible OSD for this byte-range. Returns only a value,
+ * if the byte-range is saved on one OSD.
+ *
+ * @param firstByte
+ * @param lastByte
+ * @return null, if the byte-range covers multiple objects on different OSDs
+ */
+ public ServiceUUID getOSDByByteRange(long firstByte, long lastByte) {
+ List objectRange = policy.getObjects(firstByte, lastByte);
+ if (objectRange.size() > 1) {
+ // throw exception, because byte range covers multiple objects
+ // throw new
+ // NoSuchElementException("byte range covers multiple objects");
+ return null;
+ } else
+ return getOSDByObject(objectRange.get(0).objectNumber);
+ }
+
+ /**
+ * It provides the list of OSDs of the location
+ *
+ * @return The list of OSDs of the object
+ */
+ public List getOSDs() {
+ return osdList;
+ }
+
+ /**
+ * Number of OSDs which contain data of this replica.
+ *
+ * @return
+ */
+ public int getWidth() {
+ return this.osdList.size();
+ }
+
+ /**
+ * Resolves the UUID of all OSDs
+ *
+ * @throws UnknownUUIDException
+ */
+ void resolve() throws UnknownUUIDException {
+ for (ServiceUUID uuid : osdList) {
+ uuid.resolve();
+ }
+ }
+
+ /**
+ * checks if this replica location belongs to the OSD
+ *
+ * @param uuid
+ * @return
+ */
+ public boolean containsOSD(ServiceUUID uuid) {
+ return osdList.contains(uuid);
+ }
+
+ /**
+ * It provides the striping policy of this object
+ *
+ * @return The striping policy of the object
+ */
+ public StripingPolicy getStripingPolicy() {
+ return policy;
+ }
+
+ /**
+ * It provides a listed representation of the object
+ *
+ * @return The representation of this object like a list suitable for JSON
+ */
+ public List asList() {
+ List returnValue = new ArrayList(2);
+ returnValue.add(policy.asMap());
+
+ List osds = new ArrayList(osdList.size());
+ for (ServiceUUID osd : osdList) {
+ osds.add(osd.toString());
+ }
+
+ returnValue.add(osds);
+
+ return returnValue;
+ }
+
+ /**
+ * It gives a JSON string which represents the object.
+ *
+ * @return The string representing the object
+ */
+ public JSONString asJSONString() throws JSONException {
+ return new JSONString(JSONParser.writeJSON(asList()));
+ }
+
+ /**
+ * Provides the used update policy.
+ */
+ public String getReplicaUpdatePolicy() {
+ return this.replicaUpdatePolicy;
+ }
+
+ /**
+ * @param replicaUpdatePolicy
+ * the replicaUpdatePolicy to set
+ */
+ public void setReplicaUpdatePolicy(String replicaUpdatePolicy) {
+ assert (replicaUpdatePolicy.equals(REPLICA_UPDATE_SYNC) || replicaUpdatePolicy
+ .equals(REPLICA_UPDATE_ONDEMAND));
+ this.replicaUpdatePolicy = replicaUpdatePolicy;
+ }
+
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if ((obj == null) || (obj.getClass() != this.getClass()))
+ return false;
+
+ Location other = (Location) obj;
+ return policy.equals(other.policy) && osdList.equals(other.osdList);
+ }
+
+ public int hashCode() {
+ return policy.hashCode() + osdList.hashCode();
+ }
+
+ @Override
+ public String toString() {
+ return osdList.toString() + " ; " + policy;
+ }
+
+ /*
+ * old code
+ */
+ public int indexOf(ServiceUUID osdId) {
+ return osdList.indexOf(osdId);
+ }
+
+}
diff --git a/servers/src/org/xtreemfs/common/striping/Locations.java b/servers/src/org/xtreemfs/common/striping/Locations.java
new file mode 100644
index 0000000000000000000000000000000000000000..fff6df73465855e37333d3c205a12c2a4875cae2
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/striping/Locations.java
@@ -0,0 +1,292 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin and
+ Barcelona Supercomputing Center - Centro Nacional de Supercomputacion.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Christian Lorenz (ZIB), Jan Stender (ZIB), Björn Kolbeck (ZIB), Jesús Malo (BSC)
+ */
+
+package org.xtreemfs.common.striping;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import org.xtreemfs.common.uuids.ServiceUUID;
+import org.xtreemfs.common.uuids.UnknownUUIDException;
+import org.xtreemfs.foundation.json.JSONException;
+import org.xtreemfs.foundation.json.JSONParser;
+import org.xtreemfs.foundation.json.JSONString;
+
+/**
+ * It models the list of locations of replicas
+ *
+ * @author clorenz
+ */
+public class Locations implements Iterable {
+ /**
+ * update policies
+ */
+ public static final String REPLICA_UPDATE_POLICY_SYNC = "sync";
+ public static final String REPLICA_UPDATE_POLICY_ONDEMAND = "lazy";
+
+ /**
+ * used update policy for all replicas
+ */
+ private String replicaUpdatePolicy;
+ /**
+ * it defines how many replicas must be updated synchronous, if update
+ * policy is "sync",
+ */
+ private int replicaUpdatePolicySyncLevel;
+
+ /**
+ * version of the Locations-list
+ */
+ private final long version;
+
+ /**
+ * list of replicas
+ */
+ private final List replicas;
+
+ /**
+ * It creates an instance of Locations with an existing list. It uses the
+ * "ondemand"-policy as default.
+ *
+ * @param locations
+ * List of replica's location
+ */
+ public Locations(List locations) {
+ this(locations, 1, REPLICA_UPDATE_POLICY_ONDEMAND, 0);
+ }
+
+ /**
+ * It creates an instance of Locations with an existing list.
+ *
+ * @param locations
+ * List of replica's location
+ * @param version
+ * version of locations-list
+ * @param replicaUpdatePolicy
+ * policy which will be used
+ * @param replicaSyncLevel
+ * how many replicas must be updated synchronous, if update
+ * policy is "sync"
+ */
+ public Locations(List locations, long version,
+ String replicaUpdatePolicy, int replicaSyncLevel) {
+ if (locations == null)
+ throw new IllegalArgumentException("The list of replicas is null");
+ else if (locations.size() == 0)
+ throw new IllegalArgumentException(
+ "There is no replicas in the list");
+
+ this.replicas = locations;
+ this.version = version;
+ this.replicaUpdatePolicy = replicaUpdatePolicy;
+ this.replicaUpdatePolicySyncLevel = replicaSyncLevel;
+ }
+
+ /**
+ * Creates an instance of this class from a JSON representation
+ *
+ * @param plain
+ * JSON representation of an object of this class
+ */
+ public Locations(JSONString plain) throws JSONException {
+ List list = (List) JSONParser.parseJSON(plain);
+
+ if (list == null)
+ throw new IllegalArgumentException("The list of replicas is null");
+ if (list.size() < 2)
+ throw new IllegalArgumentException("Locations list is not valid.");
+
+ this.version = (Long) list.get(1);
+
+ List> xLocList = (List>) list.get(0);
+ this.replicas = new ArrayList(xLocList.size());
+ for (int i = 0; i < xLocList.size(); i++)
+ this.replicas.add(new Location((List) xLocList.get(i)));
+
+ if (list.size() >= 3)
+ parseRepUpdatePolicy((String) list.get(2));
+ else
+ replicaUpdatePolicy = REPLICA_UPDATE_POLICY_ONDEMAND;
+ }
+
+ /**
+ * parses the JSON-update-policy-string
+ *
+ * @param rp
+ */
+ private void parseRepUpdatePolicy(String rp) {
+ // parse the replication policy
+ int sepIndex = rp.indexOf(':');
+ if (sepIndex == -1) {
+ replicaUpdatePolicy = rp;
+ replicaUpdatePolicySyncLevel = replicas.size();
+ } else {
+ // TODO: conform to the specification: don't allow "lazy:5"
+ replicaUpdatePolicy = rp.substring(0, sepIndex);
+ replicaUpdatePolicySyncLevel = Integer.parseInt(rp
+ .substring(sepIndex + 1));
+ if (replicaUpdatePolicySyncLevel > replicas.size()) // all sync
+ replicaUpdatePolicySyncLevel = replicas.size();
+ }
+ }
+
+ /**
+ * It provides a list representing the object
+ *
+ * @return The listed representation of the object
+ */
+ public List asList() {
+ List returnValue = new ArrayList(replicas.size());
+ for (Location loc : replicas) {
+ returnValue.add(loc.asList());
+ }
+ return returnValue;
+ }
+
+ /**
+ * It provides a JSONString representing the object
+ *
+ * @return The JSONString representation of the object
+ */
+ public JSONString asJSONString() throws JSONException {
+ List args = new ArrayList(3);
+ args.add(asList());
+ args.add(version);
+ if (replicaUpdatePolicy.equals(REPLICA_UPDATE_POLICY_SYNC)
+ && replicaUpdatePolicySyncLevel != replicas.size())
+ args.add(replicaUpdatePolicy + ":" + replicaUpdatePolicySyncLevel);
+ else
+ args.add(replicaUpdatePolicy);
+ return new JSONString(JSONParser.writeJSON(args));
+ }
+
+ /**
+ * It provides the location related to an OSD
+ *
+ * @param osd
+ * OSD to locate
+ * @return The replica location where the osd is taking part.
+ */
+ public Location getLocation(ServiceUUID osd) {
+ for (Location loc : replicas) {
+ if (loc.containsOSD(osd))
+ return loc;
+ }
+ return null;
+ }
+
+ /**
+ * It provides the location of the specified index
+ *
+ * @param index
+ * @return
+ */
+ public Location getLocation(int index) {
+ return replicas.get(index);
+ }
+
+ /**
+ * Provides a list of OSDs which are containing replicas of the given object.
+ * NOTE: If the replicas use different striping policies the same object must not contain the same data.
+ * @param objectID
+ * @return
+ */
+ public List getOSDsByObject(long objectID){
+ List osds = new ArrayList();
+ for(Location loc : replicas){
+ osds.add(loc.getOSDByObject(objectID));
+ }
+ return osds;
+ }
+
+ /**
+ * Resolves the UUID of all OSDs
+ *
+ * @throws UnknownUUIDException
+ */
+ public void resolveAll() throws UnknownUUIDException {
+ for (Location loc : this.replicas) {
+ loc.resolve();
+ }
+ }
+
+ /**
+ * Provides the number, how many replicas are used.
+ *
+ * @return
+ */
+ public int getNumberOfReplicas() {
+ return replicas.size();
+ }
+
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if ((obj == null) || (obj.getClass() != this.getClass()))
+ return false;
+
+ Locations other = (Locations) obj;
+ return replicas.equals(other.replicas);
+ }
+
+ public int hashCode() {
+ return replicas.hashCode();
+ }
+
+ /**
+ * Provides the version of the locations-list.
+ */
+ public long getVersion() {
+ return version;
+ }
+
+ /**
+ * Provides the used update policy.
+ * @return
+ */
+ public String getReplicaUpdatePolicy() {
+ return replicaUpdatePolicy;
+ }
+
+ /**
+ * Provides how many replicas must be updated synchronous, if the used update policy is "sync".
+ * @return
+ */
+ public int getReplicaSyncLevel() {
+ return replicaUpdatePolicySyncLevel;
+ }
+
+ @Override
+ public Iterator iterator() {
+ return replicas.iterator();
+ }
+
+ @Override
+ public String toString() {
+ return "version: " + version + " ; " + replicas.toString() + " ; "
+ + replicaUpdatePolicy + ":" + replicaUpdatePolicySyncLevel;
+ }
+}
diff --git a/servers/src/org/xtreemfs/common/striping/RAID0.java b/servers/src/org/xtreemfs/common/striping/RAID0.java
new file mode 100644
index 0000000000000000000000000000000000000000..809bbdb8615310412a4fe9e666294bec9f1fc022
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/striping/RAID0.java
@@ -0,0 +1,202 @@
+/* Copyright (c) 2008 Barcelona Supercomputing Center - Centro Nacional
+ de Supercomputacion and Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Christian Lorenz (ZIB), Jesús Malo (BSC), Björn Kolbeck (ZIB), Jan Stender (ZIB)
+ */
+
+package org.xtreemfs.common.striping;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.xtreemfs.foundation.json.JSONException;
+import org.xtreemfs.foundation.json.JSONParser;
+import org.xtreemfs.foundation.json.JSONString;
+
+/**
+ * RAID 0
+ *
+ * @author clorenz
+ */
+public final class RAID0 extends StripingPolicy {
+ protected static final long KILOBYTE = 1024L;
+
+ /**
+ * used as key for JSON
+ */
+ public static final String POLICY_NAME = "RAID0";
+ /**
+ * used as key for JSON
+ */
+ protected static final String JSON_STRIPE_SIZE_TOKEN = "stripe-size";
+
+ protected final long stripeSize;
+
+ /**
+ * Creates a new instance of RAID0
+ *
+ * @param size
+ * Size of the stripes in kilobytes (1kB == 1024 bytes)
+ * @param width
+ * Number of OSDs where the file will be striped
+ */
+ public RAID0(long size, long width) {
+ super(width);
+
+ if (size <= 0)
+ throw new IllegalArgumentException("size must be > 0");
+
+ this.stripeSize = size;
+ }
+
+ @Override
+ public long getStripeSize(long objID) {
+ return this.stripeSize * KILOBYTE;
+ }
+
+ @Override
+ public long getRow(long objId) {
+ return objId / this.width;
+ }
+
+ /**
+ * It generates an object from a given map of names and values
+ *
+ * @param translater
+ * Map containing a RAID0 object like a set of pairs (name,
+ * value)
+ * @return The object contained in the map
+ */
+ public static RAID0 readFromJSON(Map translater)
+ throws JSONException {
+ String name = (String) translater.get(JSON_STRIPING_POLICY_TOKEN);
+
+ if (name.equals(POLICY_NAME)) {
+ Object tmp = translater.get(JSON_STRIPE_SIZE_TOKEN);
+ if (tmp == null)
+ throw new JSONException(JSON_STRIPE_SIZE_TOKEN
+ + " argument is missing");
+ long size = (Long) tmp;
+
+ tmp = translater.get(JSON_WIDTH_TOKEN);
+ if (tmp == null)
+ throw new JSONException(JSON_WIDTH_TOKEN
+ + " argument is missing");
+ long width = (Long) tmp;
+
+ return new RAID0(size, width);
+ } else
+ throw new JSONException("[ E | RAID0 ] Bad striping policy name");
+ }
+
+ @Override
+ public JSONString asJSONString() throws JSONException {
+ return new JSONString(JSONParser.writeJSON(asMap()));
+ }
+
+ @Override
+ public Map asMap() {
+ Map returnValue = new HashMap();
+ returnValue.put(JSON_STRIPING_POLICY_TOKEN, POLICY_NAME);
+ returnValue.put(JSON_STRIPE_SIZE_TOKEN, stripeSize);
+ returnValue.put(JSON_WIDTH_TOKEN, getWidth());
+
+ return returnValue;
+ }
+
+ @Override
+ public String toString() {
+ return POLICY_NAME + " with " + this.width + " width and "
+ + this.stripeSize + "kb stripe-size";
+ }
+
+ @Override
+ public long getObject(long offset) {
+ return (offset / this.stripeSize) / KILOBYTE;
+ }
+
+ @Override
+ public long getFirstByte(long object) {
+ return object * this.stripeSize * KILOBYTE;
+ }
+
+ @Override
+ public long getLastByte(long object) {
+ return getFirstByte(object + 1) - 1;
+ }
+
+ @Override
+ public List getObjects(long firstByte, long lastByte) {
+ ArrayList list = new ArrayList(2);
+ long objectID, relativeFirstByte, relativeLastByte, osd;
+
+ // first object
+ objectID = getObject(firstByte);
+ relativeFirstByte = firstByte - getFirstByte(objectID);
+ relativeLastByte = ((relativeFirstByte + (lastByte - firstByte)) < stripeSize
+ * KILOBYTE) ? (relativeFirstByte + (lastByte - firstByte))
+ : (stripeSize * KILOBYTE - 1);
+ osd = getOSDByObject(objectID);
+
+ StripeInfo start = new StripeInfo(objectID, osd, relativeFirstByte,
+ relativeLastByte);
+ list.add(start);
+
+ if ((objectID = getObject(lastByte)) != start.objectNumber) { // multiple
+ // objects
+ // last object
+ relativeFirstByte = 0L;
+ relativeLastByte = lastByte - getFirstByte(objectID);
+ osd = getOSDByObject(objectID);
+
+ StripeInfo end = new StripeInfo(objectID, osd, relativeFirstByte,
+ relativeLastByte);
+ list.add(end);
+ }
+ return list;
+ }
+
+ @Override
+ public int getOSDByObject(long object) {
+ return (int) (object % this.width);
+ }
+
+ @Override
+ public int getOSDByOffset(long offset) {
+ return getOSDByObject(getObject(offset));
+ }
+
+ @Override
+ public String getPolicyName() {
+ return POLICY_NAME;
+ }
+
+ /*
+ * old code
+ */
+ @Override
+ public boolean isLocalObject(long objId, long osdNo) {
+ return objId % getWidth() == osdNo - 1;
+ }
+}
diff --git a/servers/src/org/xtreemfs/common/striping/StripeInfo.java b/servers/src/org/xtreemfs/common/striping/StripeInfo.java
new file mode 100644
index 0000000000000000000000000000000000000000..723ce9e00e75e94a100cac6dd90a6b1c67e0e6b3
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/striping/StripeInfo.java
@@ -0,0 +1,86 @@
+/* Copyright (c) 2008 Barcelona Supercomputing Center - Centro Nacional
+ de Supercomputacion and Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Jesús Malo (BSC), Björn Kolbeck (ZIB), Jan Stender (ZIB)
+ */
+
+package org.xtreemfs.common.striping;
+
+/**
+ * It encapsulates the information related to a stripe
+ *
+ * @author Jesús Malo (jmalo)
+ */
+public class StripeInfo {
+ public final Long objectNumber; // Relative object number
+ public final Long OSD; // Relative osd number
+ public final Long firstByte; // Relative first byte offset
+ public final Long lastByte; // Relative last byte offset
+
+ /**
+ * Creates a new instance of StripeInfo
+ *
+ * @param r
+ * Relative object number
+ * @param o
+ * Relative OSD position (it begins at 0)
+ * @param f
+ * Relative offset of the first byte of the stripe
+ * @param l
+ * Relative offset of the last byte of the stripe
+ * @pre (r >= 0) && (o >= 0) && (f >= 0) && (l >= 0)
+ */
+ public StripeInfo(long r, long o, long f, long l) {
+ assert ((r >= 0) && (o >= 0) && (f >= 0) && (l >= 0)) : "r = " + r
+ + ", o = " + o + ", f = " + f + ", l = " + l;
+
+ objectNumber = Long.valueOf(r);
+ OSD = Long.valueOf(o);
+ firstByte = Long.valueOf(f);
+ lastByte = Long.valueOf(l);
+ }
+
+ public boolean equals(Object obj) {
+
+ if (this == obj)
+ return true;
+
+ if ((obj == null) || (obj.getClass() != this.getClass()))
+ return false;
+
+ final StripeInfo toCompare = (StripeInfo) obj;
+ return objectNumber.equals(toCompare.objectNumber)
+ && OSD.equals(toCompare.OSD)
+ && firstByte.equals(toCompare.firstByte)
+ && lastByte.equals(toCompare.lastByte);
+ }
+
+ public int hashCode() {
+ return objectNumber.hashCode() + OSD.hashCode() + firstByte.hashCode()
+ + lastByte.hashCode();
+ }
+
+ @Override
+ public String toString() {
+ return "StripeInfo: object "+objectNumber+" on osd "+OSD+" with bytes from "+firstByte+" to "+lastByte;
+ }
+}
diff --git a/servers/src/org/xtreemfs/common/striping/StripingPolicy.java b/servers/src/org/xtreemfs/common/striping/StripingPolicy.java
new file mode 100644
index 0000000000000000000000000000000000000000..331f785cd9c00fc196aaaa6a65ab3e08395f0af2
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/striping/StripingPolicy.java
@@ -0,0 +1,255 @@
+/* Copyright (c) 2008 Barcelona Supercomputing Center - Centro Nacional
+ de Supercomputacion and Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Christian Lorenz (ZIB), Jesús Malo (BSC), Björn Kolbeck (ZIB), Jan Stender (ZIB)
+ */
+
+package org.xtreemfs.common.striping;
+
+import java.util.List;
+import java.util.Map;
+
+import org.xtreemfs.foundation.json.JSONException;
+import org.xtreemfs.foundation.json.JSONParser;
+import org.xtreemfs.foundation.json.JSONString;
+
+/**
+ * It models the StripingPolicy.
+ *
+ * @author clorenz
+ */
+public abstract class StripingPolicy {
+ /**
+ * used as key for JSON
+ */
+ protected static final String JSON_STRIPING_POLICY_TOKEN = "policy";
+ /**
+ * used as key for JSON
+ */
+ protected static final String JSON_WIDTH_TOKEN = "width";
+
+ /**
+ * width (number of involved OSDs)
+ */
+ protected final long width;
+
+ /**
+ * Creates an object with the specific width.
+ *
+ * @param w
+ * Number of OSDs where this object will be related to
+ * @pre w > 0
+ */
+ protected StripingPolicy(long w) {
+ if (w <= 0)
+ throw new IllegalArgumentException("width must be > 0");
+
+ this.width = w;
+ }
+
+ /**
+ * It provides the number of OSDs of this striping policy object
+ *
+ * @return The number of OSDs. It will always be greater than zero.
+ */
+ public long getWidth() {
+ return width;
+ }
+
+ /**
+ * Returns the name of the policy.
+ *
+ * @return
+ */
+ public abstract String getPolicyName();
+
+ /**
+ * Returns the last objectID of the file with this filesize.
+ *
+ * @param fileSize
+ * filesize in bytes
+ * @return
+ */
+ public long calculateLastObject(long fileSize) {
+ return getObject(fileSize - 1);
+ }
+
+ /**
+ * Convenient method for getting the size of a stripe in bytes.
+ *
+ * @param objectNumber
+ * Number of the object to get the stripe size.
+ * @return The number of bytes of the stripe
+ */
+ public long getStripeSize(long objID) {
+ return 1 + getFirstByte(objID) - getLastByte(objID);
+ }
+
+ public int hashCode() {
+ return asMap().hashCode();
+ }
+
+ public boolean equals(Object obj) {
+
+ if (this == obj)
+ return true;
+ if ((obj == null) || (obj.getClass() != this.getClass()))
+ return false;
+
+ StripingPolicy other = (StripingPolicy) obj;
+
+ JSONString Iam, ItIs;
+ try {
+ Iam = asJSONString();
+ ItIs = other.asJSONString();
+ } catch (JSONException ex) {
+ throw new IllegalArgumentException();
+ }
+
+ return Iam.equals(ItIs);
+ }
+
+ /**
+ * It generates a mapped representation of this object
+ *
+ * @return The mapped representation of the object
+ */
+ public abstract Map asMap();
+
+ /**
+ * It gives a JSON string which represents the object.
+ *
+ * @return The string representing the object
+ */
+ public abstract JSONString asJSONString() throws JSONException;
+
+ /**
+ * It parses a string and recovers the striping policy contained in it
+ *
+ * @param plain
+ * The string containing a striping policy
+ * @return The object contained in "plain"
+ */
+ public static StripingPolicy readFromJSON(JSONString plain)
+ throws JSONException {
+ Map translater = (Map) JSONParser
+ .parseJSON(plain);
+
+ return readFromJSON(translater);
+ }
+
+ /**
+ * It parses a string and recovers the striping policy contained in it
+ *
+ * @param mappedObject
+ * The map containing a striping policy
+ * @return The object contained in mappedObject
+ */
+ public static StripingPolicy readFromJSON(Map mappedObject)
+ throws JSONException {
+ StripingPolicy translation;
+
+ if (mappedObject.containsKey(JSON_STRIPING_POLICY_TOKEN)) {
+ String selector = (String) mappedObject
+ .get(JSON_STRIPING_POLICY_TOKEN);
+
+ // add here additional striping policies
+ if (selector.equals(RAID0.POLICY_NAME))
+ translation = RAID0.readFromJSON(mappedObject);
+ else
+ throw new JSONException("Unknown striping policy: " + selector);
+ } else
+ throw new JSONException("There is no striping policy in the object");
+
+ return translation;
+ }
+
+ /**
+ * Provides the corresponding object for this byte-offset.
+ * @param offset
+ * @return
+ */
+ public abstract long getObject(long offset);
+
+ /**
+ * Provides the first byte of this object.
+ * @param offset
+ * @return
+ */
+ public abstract long getFirstByte(long object);
+
+ /**
+ * Provides the last byte of this object.
+ * @param offset
+ * @return
+ */
+ public abstract long getLastByte(long object);
+
+ /**
+ * Returns a list of all needed information about the objects which
+ * represents this byte-range. If the byte-range only covers one object,
+ * there will be only one entry, otherwise 2 entries. On this case the first
+ * contains the object where the byte-range starts and the second the object
+ * where it ends.
+ */
+ public abstract List getObjects(long firstByte, long lastByte);
+
+ /**
+ * Provides the OSD position in this row for the given offset.
+ * @param offset
+ * @return
+ */
+ public abstract int getOSDByOffset(long offset);
+
+ /**
+ * Provides the OSD position in this row for the given object.
+ * @param object
+ * @return
+ */
+ public abstract int getOSDByObject(long object);
+
+ /**
+ * Provides the containing row of the object.
+ *
+ * @param absObjId
+ * @return
+ */
+ public abstract long getRow(long absObjId);
+
+ /**
+ * Returns all needed information where the data of the given object is positioned in the other striping policy.
+ * Useful for Re-Striping.
+ * @param localObjectID objectID for THIS striping policy
+ * @param otherPolicy striping policy for which the data should be converted
+ * @return see method "getObjects(long firstByte, long lastByte)"
+ */
+ public List getOtherObjects(long localObjectID,
+ StripingPolicy otherPolicy) {
+ return otherPolicy.getObjects(this.getFirstByte(localObjectID), this
+ .getLastByte(localObjectID));
+ }
+
+ /*
+ * old code
+ */
+ public abstract boolean isLocalObject(long absObjId, long relOsdNo);
+}
diff --git a/servers/src/org/xtreemfs/common/trace/Tracer.java b/servers/src/org/xtreemfs/common/trace/Tracer.java
new file mode 100644
index 0000000000000000000000000000000000000000..d0ea4602ea14fb89928c26cdfda5f1c37d0b8601
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/trace/Tracer.java
@@ -0,0 +1,104 @@
+/*
+ * To change this template, choose Tools | Templates
+ * and open the template in the editor.
+ */
+
+package org.xtreemfs.common.trace;
+
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+import org.xtreemfs.common.logging.Logging;
+
+/**
+ *
+ * @author bjko
+ */
+public class Tracer {
+
+ /**
+ * Set this to true to enable trace log file for all requests.
+ * @attention: MUST BE SET TO FALSE FOR NORMAL OPERATIONS.
+ */
+ public static final boolean COLLECT_TRACES = false;
+
+ public enum TraceEvent {
+
+ RECEIVED('>'),
+ RESPONSE_SENT('<'),
+ ERROR_SENT('E');
+
+ private final char eventType;
+ TraceEvent(char eventType) {
+ this.eventType = eventType;
+ }
+ public char getEventType() {
+ return this.eventType;
+ }
+ };
+
+ private static Tracer theInstance;
+
+
+ private final String traceFileName;
+
+ private final FileOutputStream fos;
+
+ private Tracer(String traceFileName) throws IOException {
+ this.traceFileName = traceFileName;
+ theInstance = this;
+
+ fos = new FileOutputStream(traceFileName,true);
+ Logging.logMessage(Logging.LEVEL_INFO, this,"TRACING IS ENABLED, THIS WILL CAUSE PERFORMANCE TO BE REDUCED!");
+ fos.write("#requestId;internal rq sequence no;event;component;message\n".getBytes());
+ }
+
+ /**
+ * Initialize the tracer.
+ * @param traceFileName file name to write trace data to (append mode).
+ * @throws java.io.IOException if the file cannot be opened
+ */
+ public static void initialize(String traceFileName) throws IOException {
+ new Tracer(traceFileName);
+ }
+
+ private void writeTraceRecord(String requestId, long intRqSeqNo, TraceEvent event, String component, String message) {
+ StringBuffer sb = new StringBuffer();
+
+ if (requestId != null)
+ sb.append(requestId);
+
+ sb.append(';');
+ sb.append(intRqSeqNo);
+ sb.append(';');
+ sb.append(event.getEventType());
+ sb.append(';');
+ if (component != null)
+ sb.append(component);
+ sb.append(';');
+ if (message != null)
+ sb.append(message);
+ sb.append("\n");
+ try {
+ fos.write(sb.toString().getBytes());
+ } catch (IOException ex) {
+ Logging.logMessage(Logging.LEVEL_ERROR, this,ex);
+ }
+ }
+
+ public static void trace(String requestId, long intRqSeqNo, TraceEvent event, String component, String message) {
+ assert(theInstance != null): "Tracer not initialized";
+ theInstance.writeTraceRecord(requestId, intRqSeqNo, event, component, message);
+ }
+
+ @Override
+ public void finalize() {
+ try {
+ fos.close();
+ } catch (IOException ex) {
+ }
+ }
+
+
+}
diff --git a/servers/src/org/xtreemfs/common/util/FSUtils.java b/servers/src/org/xtreemfs/common/util/FSUtils.java
new file mode 100644
index 0000000000000000000000000000000000000000..2d8e7e30e0fc441bf1f6e02c86d3ca141c260906
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/util/FSUtils.java
@@ -0,0 +1,184 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin and
+ Barcelona Supercomputing Center - Centro Nacional de Supercomputacion.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB), Jesús Malo (BSC)
+ */
+
+package org.xtreemfs.common.util;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileFilter;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.nio.MappedByteBuffer;
+import java.nio.channels.FileChannel;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.xtreemfs.common.logging.Logging;
+
+/**
+ * A class containing helper functions for working with the local file system.
+ *
+ * @author stender
+ */
+public class FSUtils {
+
+ /**
+ * Recursively deletes all contents of the given directory.
+ *
+ * @param file
+ * the directory to delete
+ */
+ public static void delTree(File file) {
+
+ if (!file.exists())
+ return;
+
+ for (File f : file.listFiles()) {
+ if (f.isDirectory())
+ delTree(f);
+ else
+ f.delete();
+ }
+
+ file.delete();
+ }
+
+ /**
+ * Copies a whole directory tree to another directory.
+ *
+ * @param srcFile
+ * the source tree
+ * @param trgFile
+ * the target point where to copy the source tree
+ * @throws IOException
+ * if an I/O error occurs
+ */
+ public static void copyTree(File srcFile, File trgFile) throws IOException {
+
+ if (srcFile.isDirectory()) {
+
+ trgFile.mkdir();
+ for (File file : srcFile.listFiles())
+ copyTree(file, new File(trgFile, file.getName()));
+
+ } else {
+
+ FileChannel in = null, out = null;
+
+ try {
+
+ try {
+ in = new FileInputStream(srcFile).getChannel();
+ out = new FileOutputStream(trgFile).getChannel();
+
+ long size = in.size();
+ MappedByteBuffer buf = in.map(FileChannel.MapMode.READ_ONLY, 0, size);
+
+ out.write(buf);
+
+ } finally {
+ if (in != null)
+ in.close();
+ if (out != null)
+ out.close();
+ }
+
+ } finally {
+
+ if (in != null)
+ in.close();
+ if (out != null)
+ out.close();
+ }
+ }
+ }
+
+ /**
+ * Returns the free disk space on the partition storing the given directory.
+ *
+ * @param dir
+ * the directory stored in the partition
+ * @return the free disk space (for non-privileged users)
+ */
+ public static long getFreeSpace(String dir) {
+
+ BufferedReader buf = null;
+
+ // try to retrieve the file size via the native 'stat' command
+// try {
+// Process p = Runtime.getRuntime().exec("stat -f --format %a " + dir);
+// buf = new BufferedReader(new InputStreamReader(p.getInputStream()));
+// long result = Long.parseLong(buf.readLine()) * 4096;
+//
+// return result;
+//
+// } catch (Exception exc) {
+
+ Logging
+ .logMessage(Logging.LEVEL_DEBUG, null,
+ "a problem with 'stat' occurred - command probably not available on local platform");
+ Logging.logMessage(Logging.LEVEL_DEBUG, null,
+ "using the Java mechanism for retrieving free space on the object partition");
+
+ // if some problem occurs, use the dedicated Java mechanism instead
+ return new File(dir).getUsableSpace();
+
+// } finally {
+// if (buf != null)
+// try {
+// buf.close();
+// } catch (IOException e) {
+// Logging.logMessage(Logging.LEVEL_ERROR, null, e);
+// }
+// }
+ }
+
+ public static File[] listRecursively(File rootDir, FileFilter filter) {
+ List list = new ArrayList();
+ listRecursively(rootDir, filter, list);
+ return list.toArray(new File[list.size()]);
+ }
+
+ private static void listRecursively(File rootDir, FileFilter filter, List list) {
+
+ if (!rootDir.exists())
+ return;
+
+ // first, all files in subdirectories
+ File[] nestedDirs = rootDir.listFiles(new FileFilter() {
+ public boolean accept(File pathname) {
+ return pathname.isDirectory();
+ }
+ });
+
+ for (File dir : nestedDirs)
+ listRecursively(dir, filter, list);
+
+ for (File f : rootDir.listFiles(filter))
+ list.add(f);
+ }
+}
diff --git a/servers/src/org/xtreemfs/common/util/NetUtils.java b/servers/src/org/xtreemfs/common/util/NetUtils.java
new file mode 100644
index 0000000000000000000000000000000000000000..0bec912a1f2d5d8e4c5f1964f08fd19ee2f0184f
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/util/NetUtils.java
@@ -0,0 +1,164 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Jan Stender (ZIB)
+ */
+package org.xtreemfs.common.util;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.InterfaceAddress;
+import java.net.NetworkInterface;
+import java.util.Enumeration;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+import org.xtreemfs.common.clients.RPCClient;
+
+public class NetUtils {
+
+ /**
+ * Returns a list of mappings for all reachable network endpoints.
+ *
+ * @param port
+ * the port to assign to the mappings
+ * @param protocol
+ * the protocol for the endpoint
+ * @return a list of mappings
+ * @throws IOException
+ */
+ public static List> getReachableEndpoints(int port, String protocol)
+ throws IOException {
+
+ List> endpoints = new LinkedList>();
+
+ // first, try to find a globally reachable endpoint
+ Enumeration ifcs = NetworkInterface.getNetworkInterfaces();
+ while (ifcs.hasMoreElements()) {
+
+ NetworkInterface ifc = ifcs.nextElement();
+ List addrs = ifc.getInterfaceAddresses();
+
+ // // prefer global addresses to local ones
+ // Collections.sort(addrs, new Comparator() {
+ // public int compare(InterfaceAddress o1, InterfaceAddress o2) {
+ // int o1global = o1.getAddress().isAnyLocalAddress() ? -1 : 1;
+ // int o2global = o2.getAddress().isAnyLocalAddress() ? -1 : 1;
+ // return o1global - o2global;
+ // }
+ //
+ // });
+
+ for (InterfaceAddress addr : addrs) {
+
+ InetAddress inetAddr = addr.getAddress();
+ if (inetAddr.isLoopbackAddress() || inetAddr.isLinkLocalAddress())
+ continue;
+
+ if (!(inetAddr.isLinkLocalAddress() || inetAddr.isSiteLocalAddress())) {
+ endpoints.add(RPCClient.generateMap("address", inetAddr.getHostAddress(),
+ "port", port, "protocol", protocol, "ttl", 3600, "match_network", "*"));
+ break;
+ }
+
+ // endpoints.add(RPCClient.generateMap("address",
+ // inetAddr.getHostAddress(), "port",
+ // port, "protocol", protocol, "ttl", 3600, "match_network",
+ // (inetAddr
+ // .isLinkLocalAddress()
+ // || inetAddr.isSiteLocalAddress() ? inetAddr.getHostAddress()
+ // + "/"
+ // + getSubnetMaskString(addr.getNetworkPrefixLength()) :
+ // "*")));
+ }
+
+ // stop searching for endpoints if an endpoint has been found
+ if (!endpoints.isEmpty())
+ break;
+ }
+
+ // if no globally reachable endpoints are available, pick the first
+ // locally reachable endpoint
+ if (endpoints.isEmpty()) {
+
+ ifcs = NetworkInterface.getNetworkInterfaces();
+ NetworkInterface ifc = ifcs.nextElement();
+ List addrs = ifc.getInterfaceAddresses();
+
+ // first, find all globally-reachable endpoints
+ ifcs = NetworkInterface.getNetworkInterfaces();
+ while (ifcs.hasMoreElements()) {
+
+ // if there is no "public" IP check for a site local address to
+ // use
+ for (InterfaceAddress addr : addrs) {
+
+ InetAddress inetAddr = addr.getAddress();
+
+ if (inetAddr.isSiteLocalAddress()) {
+ endpoints.add(RPCClient.generateMap("address", inetAddr.getHostAddress(),
+ "port", port, "protocol", protocol, "ttl", 3600, "match_network", "*"));
+ break;
+ }
+ }
+
+ if(!endpoints.isEmpty())
+ break;
+ }
+ }
+
+ return endpoints;
+
+ }
+
+ private static String getSubnetMaskString(short prefixLength) {
+
+ long addr = (0xFFFFFFFFL << (32 - prefixLength)) & 0xFFFFFFFFL;
+ StringBuffer sb = new StringBuffer();
+ for (int i = 3; i >= 0; i--) {
+ sb.append((addr & (0xFF << (i * 8))) >> (i * 8));
+ if (i > 0)
+ sb.append(".");
+ }
+
+ return sb.toString();
+ }
+
+ public static void main(String[] args) throws Exception {
+
+ System.out.println("all network interfaces: ");
+ Enumeration ifcs = NetworkInterface.getNetworkInterfaces();
+ while (ifcs.hasMoreElements()) {
+ for (InterfaceAddress addr : ifcs.nextElement().getInterfaceAddresses()) {
+ InetAddress inetAddr = addr.getAddress();
+ System.out.println(inetAddr + ", loopback: " + inetAddr.isLoopbackAddress()
+ + ", linklocal: " + inetAddr.isLinkLocalAddress() + ", reachable: "
+ + inetAddr.isReachable(1000));
+ }
+ }
+
+ System.out.println("\nsuitable network interfaces: ");
+ for (Map endpoint : NetUtils.getReachableEndpoints(32640, "http"))
+ System.out.println(endpoint);
+ }
+
+}
diff --git a/servers/src/org/xtreemfs/common/util/OutputUtils.java b/servers/src/org/xtreemfs/common/util/OutputUtils.java
new file mode 100644
index 0000000000000000000000000000000000000000..bf6d17a8ffcc074e479b58d1093b671b04701b4b
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/util/OutputUtils.java
@@ -0,0 +1,141 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin and
+ Barcelona Supercomputing Center - Centro Nacional de Supercomputacion.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Jan Stender (ZIB), Björn Kolbeck (ZIB), Jesús Malo (BSC)
+ */
+
+package org.xtreemfs.common.util;
+
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+
+/**
+ *
+ * @author bjko
+ */
+public final class OutputUtils {
+
+ public static final char[] trHex = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A',
+ 'B', 'C', 'D', 'E', 'F' };
+
+ public static final String byteToHexString(byte b) {
+ StringBuilder sb = new StringBuilder(2);
+ sb.append(trHex[((b >> 4) & 0x0F)]);
+ sb.append(trHex[(b & 0x0F)]);
+ return sb.toString();
+ }
+
+ public static final String byteArrayToHexString(byte[] array) {
+ StringBuilder sb = new StringBuilder(2 * array.length);
+ for (byte b : array) {
+ sb.append(trHex[((b >> 4) & 0x0F)]);
+ sb.append(trHex[(b & 0x0F)]);
+ }
+ return sb.toString();
+ }
+
+ public static final String byteArrayToFormattedHexString(byte[] array) {
+ StringBuilder sb = new StringBuilder(2 * array.length);
+ for (int i = 0; i < array.length; i++) {
+ sb.append(trHex[((array[i] >> 4) & 0x0F)]);
+ sb.append(trHex[(array[i] & 0x0F)]);
+ if (i % 4 == 3) {
+ if (i % 16 == 15)
+ sb.append("\n");
+ else
+ sb.append(" ");
+ }
+
+ }
+ return sb.toString();
+ }
+
+ public static final String stackTraceToString(Throwable th) {
+
+ PrintStream ps = null;
+ try {
+ ByteArrayOutputStream out = new ByteArrayOutputStream();
+ ps = new PrintStream(out);
+ if (th != null)
+ th.printStackTrace(ps);
+
+ return new String(out.toByteArray());
+
+ } finally {
+ if (ps != null)
+ ps.close();
+ }
+
+ }
+
+ public static String formatBytes(long bytes) {
+
+ double kb = bytes / 1024.0;
+ double mb = bytes / (1024.0 * 1024.0);
+ double gb = bytes / (1024.0 * 1024.0 * 1024.0);
+ double tb = bytes / (1024.0 * 1024.0 * 1024.0 * 1024.0);
+
+ if (tb >= 1.0) {
+ return String.format("%.2f TB", tb);
+ } else if (gb >= 1.0) {
+ return String.format("%.2f GB", gb);
+ } else if (mb >= 1.0) {
+ return String.format("%.2f MB", mb);
+ } else if (kb >= 1.0) {
+ return String.format("%.2f kB", kb);
+ } else {
+ return bytes + " bytes";
+ }
+ }
+
+ public static String escapeToXML(String st) {
+ st = st.replace("&", "&");
+ st = st.replace("'", "'");
+ st = st.replace("<", "<");
+ st = st.replace(">", ">");
+ st = st.replace("\"", """);
+ return st;
+ }
+
+ public static String unescapeFromXML(String st) {
+ st = st.replace("&", "&");
+ st = st.replace("'", "'");
+ st = st.replace("<", "<");
+ st = st.replace(">", ">");
+ st = st.replace(""", "\"");
+ return st;
+ }
+
+ public static byte[] hexStringToByteArray(String hexString) {
+
+ assert (hexString.length() % 2 == 0);
+ byte[] bytes = new byte[hexString.length() / 2];
+
+ for (int i = 0; i < hexString.length(); i += 2) {
+ int b = Integer.parseInt(hexString.substring(i, i + 2), 16);
+ bytes[i / 2] = b >= 128? (byte) (b - 256): (byte) b;
+ }
+
+ return bytes;
+ }
+
+}
diff --git a/servers/src/org/xtreemfs/common/uuids/ServiceUUID.java b/servers/src/org/xtreemfs/common/uuids/ServiceUUID.java
new file mode 100644
index 0000000000000000000000000000000000000000..b048da9dc234d409749f5e7a5723fb4ca0e84f0e
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/uuids/ServiceUUID.java
@@ -0,0 +1,175 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Björn Kolbeck (ZIB)
+ */
+
+package org.xtreemfs.common.uuids;
+
+import java.io.Serializable;
+import java.net.InetSocketAddress;
+import org.xtreemfs.common.TimeSync;
+import org.xtreemfs.common.buffer.ASCIIString;
+
+/**
+ * Encapsules the UUID and InetSocketAddress for a service.
+ * @author bjko
+ */
+public final class ServiceUUID implements Serializable {
+
+ private final String uuid;
+
+ private InetSocketAddress address;
+
+ private String protocol;
+
+ private long validUntil;
+
+ private UUIDCacheEntry cacheEntry;
+
+ private final UUIDResolver nonSingleton;
+
+ /**
+ * Creates a new ServiceUUID.
+ * @param uuid the uuid string
+ */
+ public ServiceUUID(String uuid) {
+ this.uuid = uuid;
+ this.validUntil = 0;
+ this.nonSingleton = null;
+ }
+
+ /**
+ * Creates a new ServiceUUID with an individual UUIDresolver (rather than the global instance)
+ * @param uuid the uuid string
+ */
+ public ServiceUUID(String uuid, UUIDResolver nonSingleton) {
+ this.uuid = uuid;
+ this.validUntil = 0;
+ this.nonSingleton = nonSingleton;
+ }
+
+ /**
+ * Creates a new ServiceUUID.
+ * @param uuid the uuid string.
+ */
+ public ServiceUUID(ASCIIString uuid) {
+ this(uuid.toString());
+
+ }
+
+ /**
+ * Resolves the uuid to a InetSocketAddress and protocol.
+ * @throws org.xtreemfs.common.uuids.UnknownUUIDException if the uuid cannot
+ * be resolved (not local, no mapping on DIR).
+ */
+ public void resolve() throws UnknownUUIDException {
+ updateMe();
+ }
+
+ /**
+ * Retrieves the InetSocketAddress for the service.
+ * @return the InetSocketAddress of the service
+ * @throws org.xtreemfs.common.uuids.UnknownUUIDException if the UUID cannot be resolved
+ */
+ public InetSocketAddress getAddress() throws UnknownUUIDException {
+ if (validUntil > TimeSync.getLocalSystemTime()) {
+ cacheEntry.setLastAccess(TimeSync.getLocalSystemTime());
+ } else {
+ updateMe();
+ }
+ return address;
+ }
+
+ /**
+ * Retrieves the protocol (hhtp,https) for the service.
+ * @return the protocol of the service
+ * @throws org.xtreemfs.common.uuids.UnknownUUIDException if the UUID cannot be resolved
+ */
+ public String getProtocol() throws UnknownUUIDException {
+ if (validUntil > TimeSync.getLocalSystemTime()) {
+ cacheEntry.setLastAccess(TimeSync.getLocalSystemTime());
+ } else {
+ updateMe();
+ }
+ return protocol;
+ }
+
+ /**
+ * Returns the full URl of the service.
+ * @return the URL of the service
+ * @throws org.xtreemfs.common.uuids.UnknownUUIDException if the UUID cannot be resolved
+ */
+ public String toURL() throws UnknownUUIDException {
+ if (validUntil > TimeSync.getLocalSystemTime()) {
+ cacheEntry.setLastAccess(TimeSync.getLocalSystemTime());
+ } else {
+ updateMe();
+ }
+ return protocol+"://"+address.getHostName()+":"+address.getPort();
+ }
+
+ /**
+ * Get a details of the UUID mapping.
+ * @return details of the UUID mapping.
+ */
+ public String debugString() {
+ return this.uuid+" -> "+this.protocol+" "+this.address+" (still valid for "+((validUntil-TimeSync.getLocalSystemTime())/1000)+"s)";
+ }
+
+ /**
+ * return the UUID string
+ * @return UUID string
+ */
+ public String toString() {
+ return this.uuid;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ try {
+ final ServiceUUID o = (ServiceUUID)other;
+ return this.uuid.equals(o.uuid);
+ } catch (ClassCastException ex) {
+ return false;
+ }
+ }
+
+ @Override
+ public int hashCode() {
+ return uuid.hashCode();
+ }
+
+ /**
+ * updates the UUID mapping via UUIDResolver
+ * @throws org.xtreemfs.common.uuids.UnknownUUIDException
+ */
+ private void updateMe() throws UnknownUUIDException {
+ if (nonSingleton == null) {
+ cacheEntry = UUIDResolver.resolve(this.uuid);
+ } else {
+ cacheEntry = UUIDResolver.resolve(this.uuid, nonSingleton);
+ }
+ this.address = cacheEntry.getResolvedAddr();
+ this.validUntil = cacheEntry.getValidUntil();
+ this.protocol = cacheEntry.getProtocol();
+ }
+}
diff --git a/servers/src/org/xtreemfs/common/uuids/UUIDCacheEntry.java b/servers/src/org/xtreemfs/common/uuids/UUIDCacheEntry.java
new file mode 100644
index 0000000000000000000000000000000000000000..ca023ada59b564ec84e6f6589b519c2e625341e4
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/uuids/UUIDCacheEntry.java
@@ -0,0 +1,103 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Björn Kolbeck (ZIB)
+ */
+package org.xtreemfs.common.uuids;
+
+import java.net.InetSocketAddress;
+import org.xtreemfs.common.TimeSync;
+
+/**
+ * Cache entry for the UUIDResolver.
+ * @author bjko
+ */
+class UUIDCacheEntry {
+
+ private String uuid;
+
+ private InetSocketAddress resolvedAddr;
+
+ private long validUntil;
+
+ private long lastAccess;
+
+ private String protocol;
+
+ private boolean sticky;
+
+ public UUIDCacheEntry(String uuid, String protocol, InetSocketAddress resolvedAddr,
+ long validUntil) {
+ this.uuid = uuid;
+ this.protocol = protocol;
+ this.resolvedAddr = resolvedAddr;
+ this.validUntil = validUntil;
+ this.lastAccess = TimeSync.getLocalSystemTime();
+ }
+
+ public String getUuid() {
+ return uuid;
+ }
+
+ public void setUuid(String uuid) {
+ this.uuid = uuid;
+ }
+
+ public InetSocketAddress getResolvedAddr() {
+ return resolvedAddr;
+ }
+
+ public void setResolvedAddr(InetSocketAddress resolvedAddr) {
+ this.resolvedAddr = resolvedAddr;
+ }
+
+ public long getValidUntil() {
+ return validUntil;
+ }
+
+ public void setValidUntil(long validUntil) {
+ this.validUntil = validUntil;
+ }
+
+ public long getLastAccess() {
+ return lastAccess;
+ }
+
+ public void setLastAccess(long lastAccess) {
+ this.lastAccess = lastAccess;
+ }
+
+ public String getProtocol() {
+ return protocol;
+ }
+
+ public void setProtocol(String protocol) {
+ this.protocol = protocol;
+ }
+
+ public boolean isSticky() {
+ return sticky;
+ }
+
+ public void setSticky(boolean sticky) {
+ this.sticky = sticky;
+ }
+}
diff --git a/servers/src/org/xtreemfs/common/uuids/UUIDResolver.java b/servers/src/org/xtreemfs/common/uuids/UUIDResolver.java
new file mode 100644
index 0000000000000000000000000000000000000000..141a8abbd8e10e641b6744b7c2edf67c8aee0111
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/uuids/UUIDResolver.java
@@ -0,0 +1,287 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Björn Kolbeck (ZIB)
+ */
+
+package org.xtreemfs.common.uuids;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import org.xtreemfs.common.TimeSync;
+import org.xtreemfs.common.auth.NullAuthProvider;
+import org.xtreemfs.common.clients.RPCResponse;
+import org.xtreemfs.common.clients.dir.DIRClient;
+import org.xtreemfs.common.logging.Logging;
+import org.xtreemfs.common.util.NetUtils;
+import org.xtreemfs.foundation.json.JSONException;
+
+/**
+ * Resolves UUID to InetSocketAddress+Protocol mappings.
+ * @author bjko
+ */
+public final class UUIDResolver extends Thread {
+
+ Map cache;
+
+ protected transient boolean quit;
+
+ protected final DIRClient dir;
+
+ protected final String authString;
+
+ protected final List myNetworks;
+
+ /**
+ * interval between two cache cleanups/renewals in milliseconds
+ */
+ public final int cacheCleanInterval;
+
+ public final int maxUnusedEntry ;
+
+ protected static transient UUIDResolver theInstance;
+
+
+ protected UUIDResolver(DIRClient client, int cacheCleanInterval, int maxUnusedEntry,
+ boolean singleton) throws JSONException,IOException {
+
+ super("UUID Resolver");
+ setDaemon(true);
+
+ cache = new ConcurrentHashMap();
+ quit = false;
+ this.dir = client;
+ this.maxUnusedEntry = maxUnusedEntry;
+ this.cacheCleanInterval = cacheCleanInterval;
+
+ if (singleton) {
+ assert(theInstance == null);
+ theInstance = this;
+ }
+ authString = NullAuthProvider.createAuthString("services", "xtreemfs");
+ List> ntwrks = NetUtils.getReachableEndpoints(0, "http");
+ myNetworks = new ArrayList(ntwrks.size());
+ for (Map network : ntwrks) {
+ myNetworks.add((String)network.get("match_network"));
+ }
+ }
+
+ /**
+ * Starts the UUIDResolver thread.
+ * @param client a DIRClient used to resolve non-cached and non-local mappings
+ * @param cacheCleanInterval the interval between two cleanup/renewals of cache entries (in ms)
+ * @param maxUnusedEntry the duration for which to keep an unused entry (in ms, should be set to several tens of minutes)
+ * @throws org.xtreemfs.foundation.json.JSONException
+ * @throws java.io.IOException
+ */
+ public static synchronized void start(DIRClient client,
+ int cacheCleanInterval, int maxUnusedEntry) throws JSONException,IOException {
+ if (theInstance == null) {
+ new UUIDResolver(client, cacheCleanInterval, maxUnusedEntry,true);
+ theInstance.start();
+ Logging.logMessage(Logging.LEVEL_DEBUG, null,"started UUIDResolver");
+ } else {
+ Logging.logMessage(Logging.LEVEL_INFO, null,"UUIDResolver already running!");
+ }
+ }
+
+ public static synchronized UUIDResolver startNonSingelton(DIRClient client,
+ int cacheCleanInterval, int maxUnusedEntry) throws JSONException,IOException {
+ UUIDResolver tmp = new UUIDResolver(client, cacheCleanInterval, maxUnusedEntry,false);
+ tmp.start();
+ return tmp;
+ }
+
+ public static boolean isRunning() {
+ return theInstance != null;
+ }
+
+ static UUIDCacheEntry resolve(String uuid) throws UnknownUUIDException {
+ assert (theInstance != null);
+
+ UUIDCacheEntry entry = theInstance.cache.get(uuid);
+ //check if it is still valid
+ if ((entry != null) && (entry.getValidUntil() > TimeSync.getLocalSystemTime())) {
+ entry.setLastAccess(TimeSync.getLocalSystemTime());
+ return entry;
+ }
+ return theInstance.fetchUUID(uuid);
+ }
+
+ static UUIDCacheEntry resolve(String uuid, UUIDResolver nonSingleton) throws UnknownUUIDException {
+
+ UUIDCacheEntry entry = nonSingleton.cache.get(uuid);
+ //check if it is still valid
+ if ((entry != null) && (entry.getValidUntil() > TimeSync.getLocalSystemTime())) {
+ entry.setLastAccess(TimeSync.getLocalSystemTime());
+ return entry;
+ }
+ return nonSingleton.fetchUUID(uuid);
+ }
+
+ UUIDCacheEntry fetchUUID(String uuid) throws UnknownUUIDException {
+ if (dir == null)
+ throw new UnknownUUIDException("there is no mapping for "+uuid+". Attention: local mode enabled, no remote lookup possible.");
+ RPCResponse>>> r = null;
+ if (Logging.isDebug())
+ Logging.logMessage(Logging.LEVEL_DEBUG, this,"loading uuid mapping for "+uuid);
+ try {
+ r = dir.getAddressMapping(uuid, authString);
+ Logging.logMessage(Logging.LEVEL_DEBUG, this,"sent request to DIR");
+ r.waitForResponse(2000);
+ List> l = r.get().get(uuid);
+ Logging.logMessage(Logging.LEVEL_DEBUG, this,"received response for "+uuid);
+ if ((l == null) || (l.size() == 1)) {
+ Logging.logMessage(Logging.LEVEL_DEBUG, this,"NO UUID MAPPING FOR: "+uuid);
+ throw new UnknownUUIDException("uuid "+uuid+" is not registered at directory server");
+ }
+ List> mappings = (List>) l.get(1);
+ for (int i = 0; i < mappings.size(); i++) {
+ Map addrMapping = mappings.get(i);
+ final String network = (String)addrMapping.get("match_network");
+ if (myNetworks.contains(network) || (network.equals("*"))) {
+ final String address = (String)addrMapping.get("address");
+ final String protocol = (String)addrMapping.get("protocol");
+ final int port = (int) ((Long)addrMapping.get("port")).intValue();
+ final long validUntil = TimeSync.getLocalSystemTime() + ((Long)addrMapping.get("ttl"))*1000;
+ final InetSocketAddress endpoint = new InetSocketAddress(address,port);
+ if (Logging.isDebug())
+ Logging.logMessage(Logging.LEVEL_DEBUG, this,"matching uuid record found for uuid "+uuid+" with network "+network);
+ UUIDCacheEntry e = new UUIDCacheEntry(uuid, protocol, endpoint, validUntil);
+ cache.put(uuid, e);
+ return e;
+ }
+ }
+ Logging.logMessage(Logging.LEVEL_DEBUG, this,"NO UUID MAPPING FOR: "+uuid);
+ throw new UnknownUUIDException("there is no matching entry for my network in the uuid address mapping. The service at "+uuid+
+ " is either not reachable from this machine or the mapping entry is misconfigured.");
+ } catch (InterruptedException ex) {
+ throw new UnknownUUIDException("cannot retrieve mapping from server due to IO error: "+ex);
+ } catch (IOException ex) {
+ throw new UnknownUUIDException("cannot retrieve mapping from server due to IO error: "+ex);
+ } catch (Exception ex) {
+ ex.printStackTrace();
+ throw new UnknownUUIDException("cannot retrieve mapping from server due to invalid data sent by the server: "+ex);
+ } finally {
+ if (r != null)
+ r.freeBuffers();
+ }
+ }
+
+ @Override
+ public void run() {
+ List updates = new LinkedList();
+ do {
+ Iterator iter = cache.values().iterator();
+ while (iter.hasNext()) {
+ final UUIDCacheEntry entry = iter.next();
+ if (entry.isSticky())
+ continue;
+ if (entry.getLastAccess() + maxUnusedEntry < TimeSync.getLocalSystemTime()) {
+ //dump entry!
+ iter.remove();
+ Logging.logMessage(Logging.LEVEL_DEBUG, this,"removed entry from UUID cache: "+entry.getUuid());
+ } else {
+ //check if update is necessary
+ if (entry.getValidUntil() < TimeSync.getLocalSystemTime()+cacheCleanInterval) {
+ //renew entry...
+ try {
+ updates.add(fetchUUID(entry.getUuid()));
+ } catch (Exception ex) {
+ Logging.logMessage(Logging.LEVEL_WARN, this,"cannot refresh UIID mapping: "+ex);
+ iter.remove();
+ }
+ }
+ }
+
+ }
+ try {
+ sleep(cacheCleanInterval);
+ } catch (InterruptedException ex) {
+ }
+ } while (!quit);
+ }
+
+ /**
+ * Add a UUID which is mapped on localhost
+ * @param localUUID the UUID to map
+ * @param port the port to map the UUID to
+ * @param useSSL defines the protocol
+ */
+ public static void addLocalMapping(String localUUID, int port, boolean useSSL) {
+ assert(theInstance != null);
+
+ UUIDCacheEntry e = new UUIDCacheEntry(localUUID,
+ (useSSL ? "https" : "http"),
+ new InetSocketAddress("localhost",port),
+ Long.MAX_VALUE);
+
+ e.setSticky(true);
+ theInstance.cache.put(localUUID, e);
+ }
+
+ public static void addLocalMapping(ServiceUUID uuid, int port, boolean useSSL) {
+ addLocalMapping(uuid.toString(), port, useSSL);
+ }
+
+ public static void shutdown(UUIDResolver nonSingleton) {
+ nonSingleton.quit = true;
+ nonSingleton.interrupt();
+ }
+
+ public static void shutdown() {
+ if (theInstance != null) {
+ theInstance.quit = true;
+ theInstance.interrupt();
+ theInstance = null;
+ Logging.logMessage(Logging.LEVEL_DEBUG, null,"UUIDREsolver shut down");
+ } else {
+ Logging.logMessage(Logging.LEVEL_DEBUG, null,"UUIDREsolver was already shut down or is not running");
+ }
+ }
+
+ public static String getCache() {
+ StringBuilder sb = new StringBuilder();
+ for (UUIDCacheEntry e : theInstance.cache.values()) {
+ sb.append(e.getUuid());
+ sb.append(" -> ");
+ sb.append(e.getProtocol());
+ sb.append(" ");
+ sb.append(e.getResolvedAddr());
+ if (e.isSticky()) {
+ sb.append(" - STICKY");
+ } else {
+ sb.append(" - valid for ");
+ sb.append((e.getValidUntil() - TimeSync.getLocalSystemTime())/1000l);
+ sb.append("s");
+ }
+ sb.append("\n");
+ }
+ return sb.toString();
+ }
+
+}
diff --git a/servers/src/org/xtreemfs/common/uuids/UnknownUUIDException.java b/servers/src/org/xtreemfs/common/uuids/UnknownUUIDException.java
new file mode 100644
index 0000000000000000000000000000000000000000..0fddb31e54095b8016e8e260aefe2fcd9421754d
--- /dev/null
+++ b/servers/src/org/xtreemfs/common/uuids/UnknownUUIDException.java
@@ -0,0 +1,39 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Björn Kolbeck (ZIB)
+ */
+
+package org.xtreemfs.common.uuids;
+
+import java.io.IOException;
+
+/**
+ * Thrown when a UUID cannot be mapped to a service's InetSocketAddress and Protoco.
+ * @author bjko
+ */
+public class UnknownUUIDException extends IOException {
+
+ public UnknownUUIDException(String message) {
+ super(message);
+ }
+
+}
diff --git a/servers/src/org/xtreemfs/dir/DIR.java b/servers/src/org/xtreemfs/dir/DIR.java
new file mode 100644
index 0000000000000000000000000000000000000000..673343be34a42a2ef7ae6082fe6f46078035bb5f
--- /dev/null
+++ b/servers/src/org/xtreemfs/dir/DIR.java
@@ -0,0 +1,91 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Jan Stender (ZIB)
+ */
+package org.xtreemfs.dir;
+
+import java.io.IOException;
+
+import org.xtreemfs.common.logging.Logging;
+
+/**
+ * This class can be used to start a new instance of the Directory Service.
+ *
+ * @author stender
+ *
+ */
+public class DIR {
+
+ /**
+ * @param args
+ * the command line arguments
+ */
+ public static void main(String[] args) {
+
+ String configFileName = "../config/dirconfig.properties";
+
+ if (args.length != 1) {
+ System.out.println("using default config file " + configFileName);
+ } else {
+ configFileName = args[0];
+ }
+
+ DIRConfig config = null;
+ try {
+ config = new DIRConfig(configFileName);
+ } catch (IOException ex) {
+ ex.printStackTrace();
+ return;
+ }
+
+ Logging.start(config.getDebugLevel());
+
+ Logging
+ .logMessage(Logging.LEVEL_INFO, null, "JAVA_HOME="
+ + System.getProperty("java.home"));
+
+ try {
+ final RequestController rq = new RequestController(config);
+ rq.startup();
+
+ Runtime.getRuntime().addShutdownHook(new Thread() {
+ @Override
+ public void run() {
+ try {
+ Logging.logMessage(Logging.LEVEL_INFO, this, "received shutdown signal!");
+ rq.shutdown();
+ Logging.logMessage(Logging.LEVEL_INFO, this, "DIR shotdown complete");
+ } catch (Exception ex) {
+ ex.printStackTrace();
+ }
+ }
+ });
+ } catch (Exception ex) {
+ Logging.logMessage(Logging.LEVEL_ERROR, null, ex);
+ Logging.logMessage(Logging.LEVEL_DEBUG, null,
+ "System could not start up due to an exception. Aborted.");
+ System.exit(1);
+ }
+
+ }
+
+}
diff --git a/servers/src/org/xtreemfs/dir/DIRConfig.java b/servers/src/org/xtreemfs/dir/DIRConfig.java
new file mode 100644
index 0000000000000000000000000000000000000000..677d1b6a736886c0c66c6387da66dd75df4ca9be
--- /dev/null
+++ b/servers/src/org/xtreemfs/dir/DIRConfig.java
@@ -0,0 +1,68 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Jan Stender (ZIB)
+ */
+
+package org.xtreemfs.dir;
+
+import java.io.IOException;
+import java.util.Properties;
+
+import org.xtreemfs.common.config.ServiceConfig;
+
+/**
+ *
+ * @author bjko
+ */
+public class DIRConfig extends ServiceConfig {
+
+ private String dbDir;
+
+ private String authenticationProvider;
+
+ /** Creates a new instance of OSDConfig */
+ public DIRConfig(String filename) throws IOException {
+ super(filename);
+ read();
+ }
+
+ public DIRConfig(Properties prop) throws IOException {
+ super(prop);
+ read();
+ }
+
+ public void read() throws IOException {
+ super.read();
+
+ this.dbDir = this.readRequiredString("database.dir");
+ this.authenticationProvider = readRequiredString("authentication_provider");
+ }
+
+ public String getDbDir() {
+ return dbDir;
+ }
+
+ public String getAuthenticationProvider() {
+ return authenticationProvider;
+ }
+
+}
diff --git a/servers/src/org/xtreemfs/dir/DIRRequest.java b/servers/src/org/xtreemfs/dir/DIRRequest.java
new file mode 100644
index 0000000000000000000000000000000000000000..d6fd04b8fed96f4c5d09467f8ab4a8d7bd6d1579
--- /dev/null
+++ b/servers/src/org/xtreemfs/dir/DIRRequest.java
@@ -0,0 +1,50 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Jan Stender (ZIB)
+ */
+
+package org.xtreemfs.dir;
+
+import java.net.InetSocketAddress;
+
+import org.xtreemfs.common.Request;
+import org.xtreemfs.foundation.pinky.PinkyRequest;
+import org.xtreemfs.foundation.speedy.SpeedyRequest;
+
+public class DIRRequest extends Request {
+
+ public SpeedyRequest sr;
+
+ public InetSocketAddress srEndpoint;
+
+ public RequestDetails details;
+
+ public DIRRequest() {
+ this(null);
+ }
+
+ public DIRRequest(PinkyRequest pr) {
+ super(pr);
+ sr = null;
+ details = new RequestDetails();
+ }
+}
diff --git a/servers/src/org/xtreemfs/dir/DIRRequestListener.java b/servers/src/org/xtreemfs/dir/DIRRequestListener.java
new file mode 100644
index 0000000000000000000000000000000000000000..f94218f11031c3aa85c19d431159fca32e5a2daf
--- /dev/null
+++ b/servers/src/org/xtreemfs/dir/DIRRequestListener.java
@@ -0,0 +1,32 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+*/
+/*
+ * AUTHORS: Jan Stender (ZIB)
+ */
+
+package org.xtreemfs.dir;
+
+
+public interface DIRRequestListener {
+
+ public void dsRequestDone(DIRRequest request);
+
+}
diff --git a/servers/src/org/xtreemfs/dir/DirService.java b/servers/src/org/xtreemfs/dir/DirService.java
new file mode 100644
index 0000000000000000000000000000000000000000..53550a60e4074ef075ec484c71f4453e1a5d9349
--- /dev/null
+++ b/servers/src/org/xtreemfs/dir/DirService.java
@@ -0,0 +1,820 @@
+/* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
+
+ This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
+ Grid Operating System, see for more details.
+ The XtreemOS project has been developed with the financial support of the
+ European Commission's IST program under contract #FP6-033576.
+
+ XtreemFS is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 2 of the License, or (at your option)
+ any later version.
+
+ XtreemFS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with XtreemFS. If not, see .
+ */
+/*
+ * AUTHORS: Jan Stender (ZIB)
+ */
+
+package org.xtreemfs.dir;
+
+import java.io.File;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+
+import org.xtreemfs.common.logging.Logging;
+import org.xtreemfs.foundation.json.JSONException;
+import org.xtreemfs.foundation.json.JSONParser;
+import org.xtreemfs.foundation.json.JSONString;
+
+/**
+ * Implements the functionality provided by the Directory Service.
+ *
+ * @author stender
+ *
+ */
+public class DirService {
+
+ public enum Attrs {
+ uuid, version, lastUpdated, owner, type, name, organization, country, uri, publicKey
+ }
+
+ public static final String TABLE_NAME = "TABLE";
+
+ public static final String COL_UUID = "UUID";
+
+ public static final String COL_ATTR = "ATTR";
+
+ public static final String COL_VAL = "VAL";
+
+ public static final String COL_MAPPING = "MAPPING";
+
+ public static final String COL_OWNER = "OWNER";
+
+ public static final String COL_VERSION = "VERSION";
+
+ private DIRRequestListener requestListener;
+
+ private final Connection conEntities;
+
+ private final Connection conMappings;
+
+ private Map timestamps;
+
+ /**
+ * Creates a new Directory Service using a database stored at the given path
+ * in the local file system tree.
+ *
+ * @param dbPath
+ * the path to the database directory
+ * @throws SQLException
+ * if the database could not be initialized properly
+ */
+ public DirService(String dbPath) throws SQLException {
+
+ this.timestamps = new HashMap();
+
+ try {
+ Class.forName("org.hsqldb.jdbcDriver");
+ } catch (Exception e) {
+ Logging.logMessage(Logging.LEVEL_ERROR, this,
+ "ERROR: failed to load HSQLDB JDBC driver.");
+ throw new RuntimeException(e);
+ }
+
+ new File(dbPath).mkdirs();
+
+ Properties info = new Properties();
+ info.setProperty("shutdown", "true");
+ info.setProperty("user", "sa");
+ conEntities = DriverManager.getConnection("jdbc:hsqldb:file:" + dbPath + "/ds-entities",
+ info);
+ conEntities.setAutoCommit(true);
+
+ conMappings = DriverManager.getConnection("jdbc:hsqldb:file:" + dbPath + "/ds-mappings",
+ info);
+ conMappings.setAutoCommit(true);
+
+ // check whether the entities table exists already
+ // if the table does not exist yet, create it
+ if (!tableExists(conEntities)) {
+ String sql = "CREATE TABLE " + TABLE_NAME + " (" + COL_UUID + " VARCHAR(128) NOT NULL,"
+ + COL_ATTR + " VARCHAR(128) NOT NULL," + COL_VAL + " VARCHAR(1024) NOT NULL);";
+
+ Statement statement = conEntities.createStatement();
+ statement.execute(sql);
+ statement.close();
+ }
+
+ // check whether the mappings table exists already
+ // if the table does not exist yet, create it
+ if (!tableExists(conMappings)) {
+ String sql = "CREATE TABLE " + TABLE_NAME + " (" + COL_UUID + " VARCHAR(128) NOT NULL,"
+ + COL_OWNER + " VARCHAR(128) NOT NULL," + COL_MAPPING + " VARCHAR(1024) NOT NULL,"
+ + COL_VERSION + " INTEGER NOT NULL);";
+
+ Statement statement = conMappings.createStatement();
+ statement.execute(sql);
+ statement.close();
+ }
+
+ }
+
+ public void shutdown() throws SQLException {
+ conMappings.createStatement().execute("shutdown");
+ conMappings.close();
+ conEntities.createStatement().execute("shutdown");
+ conEntities.close();
+ }
+
+ /**
+ * Registers or updates an entity at the Directory Service.
+ *
+ *
+ * First, an authorization check is performed. Access is always granted if
+ * no entity with the given UUID exists yet. If an entity already exists,
+ * access is only granted if the user ID associated with the request is
+ * equal to the user ID associated with the existing entity.
+ *
+ *
+ * If the request is sufficiently authorized and the entity exists already,
+ * oldVersion is compared to the version which is currently
+ * associated with the entity. Unless both version strings are equal,
+ * registration fails with an error message indicating that an attempt was
+ * made to update an entry with an outdated version.
+ *
+ *
+ * If authorization and version check are successful, all entries given in
+ * data are atomically updated. This includes a calculation of a
+ * new version string, as well as an update of the 'lastUpdated' attribute.
+ *
+ * @param request
+ * the request context
+ * @param uuid
+ * the UUID of the entity
+ * @param data
+ * a map containing attribute-value pairs defining the entity
+ * @param oldVersion
+ * the former version number of the entry, which the update
+ * refers to
+ * @throws SQLException
+ * if an error occured while updating the database
+ * @throws UserException
+ * if the operation failed due to an invalid argument
+ */
+ public void registerEntity(DIRRequest request, String uuid, Map data,
+ long oldVersion) throws SQLException, UserException {
+
+ Statement statement = conEntities.createStatement();
+
+ try {
+
+ conEntities.setAutoCommit(false);
+
+ // check if an owner has already been defined for the entry;
+ // if so, check if the user requesting the update is authorized to
+ // modify the entry
+ boolean ownerExists = false;
+
+ String sql = SQLQueryHelper.createQueryStatement(TABLE_NAME, new String[] { COL_VAL },
+ COL_UUID + "='" + uuid + "' and " + COL_ATTR + "='" + Attrs.owner + "'");
+
+ ResultSet rs = statement.executeQuery(sql);
+ try {
+ if (rs.next()) {
+ ownerExists = true;
+ String owner = rs.getString(1);
+ checkAuthorization(owner, request, uuid);
+ }
+ } finally {
+ rs.close();
+ }
+
+ // check if the user has the correct version to update
+ if (ownerExists && !data.isEmpty()) {
+ sql = SQLQueryHelper.createQueryStatement(TABLE_NAME, new String[] { COL_VAL },
+ COL_UUID + "='" + uuid + "' and " + COL_ATTR + "='" + Attrs.version + "'");
+ rs = statement.executeQuery(sql);
+
+ try {
+ if (rs.next()) {
+
+ // check the version
+ long versionInDB = rs.getLong(1);
+ if (versionInDB != oldVersion)
+ throw new UserException(
+ "version mismatch: received update for version '" + oldVersion
+ + "', latest version is '" + versionInDB + "'");
+ }
+ } finally {
+ rs.close();
+ }
+ }
+
+ long timestamp = System.currentTimeMillis() / 1000;
+
+ // add the owner entry if it does not exist
+ if (!ownerExists) {
+ // insert the new attribute-value pair
+ sql = SQLQueryHelper.createInsertStatement(TABLE_NAME, null, new Object[] { uuid,
+ Attrs.owner.toString(), request.details.userId });
+ statement.executeUpdate(sql);
+ }
+
+ for (String attr : data.keySet()) {
+
+ if (attr.equals(Attrs.version.toString())
+ || attr.equals(Attrs.lastUpdated.toString()))
+ throw new UserException("invalid attribute name: '" + attr
+ + "' cannot be changed explicitly");
+
+ // delete the former attribute-value pair, if existing
+ sql = SQLQueryHelper.createDeleteStatement(TABLE_NAME, COL_UUID + "='" + uuid
+ + "' and " + COL_ATTR + "='" + attr + "'");
+ statement.executeUpdate(sql);
+
+ // if a value has been assigned, insert the new attribute-value
+ // pair
+ String value = (String) data.get(attr);
+
+ if (value != null && value.length() > 0) {
+
+ sql = SQLQueryHelper.createInsertStatement(TABLE_NAME, null, new Object[] {
+ uuid, attr, value });
+ statement.executeUpdate(sql);
+ }
+
+ }
+
+ // calculate the new version number
+ long version = oldVersion + 1;
+
+ // delete the former version number
+ sql = SQLQueryHelper.createDeleteStatement(TABLE_NAME, COL_UUID + "='" + uuid
+ + "' and " + COL_ATTR + "='" + Attrs.version + "'");
+ statement.executeUpdate(sql);
+
+ // update the version number
+ sql = SQLQueryHelper.createInsertStatement(TABLE_NAME, new String[] { COL_UUID,
+ COL_ATTR, COL_VAL }, new Object[] { uuid, Attrs.version.toString(), version });
+ statement.executeUpdate(sql);
+
+ // update the timestamp
+ timestamps.put(uuid, timestamp);
+
+ // commit the transaction
+ conEntities.commit();
+
+ MessageUtils.marshallResponse(request, version);
+ this.notifyRequestListener(request);
+
+ } catch (UserException exc) {
+ conEntities.rollback();
+ throw exc;
+ } catch (SQLException exc) {
+ conEntities.rollback();
+ throw exc;
+ } finally {
+ statement.close();
+ }
+
+ }
+
+ /**
+ * Queries the Directory Service.
+ *
+ *
+ * The method returns a set of entities in the form of a mapping from UUIDs
+ * to maps containing sets of attribute-value pairs associated with the
+ * corresponding UUIDs.
+ *
+ *
+ * The set of entities included in the result set is restricted by the query
+ * map. Only such entities are included that match the query map, i.e. that
+ * have attribute-value pairs equal to or at least covered by patterns
+ * contained in the query map. Entities are only included in the result set
+ * if each attribute from the query map is also attached to the entity.
+ * Similarly, each value mapped by an attribute in the query map must also
+ * be mapped by an attribute attached to the entity, with the exception that
+ * an asterisk ('*') indicates that any value is allowed.
+ *
+ *
+ * The attributes included in the result set are restricted by the given
+ * list of attributes. If this list is null or empty, all
+ * attributes of all matching entities are included.
+ *
+ * @param request
+ * the request context
+ * @param queryMap
+ * a mapping defining the query
+ * @param attrs
+ * a set of attributes to which all entities included in the
+ * result set are reduced
+ *
+ * @throws SQLException
+ * if an error occured while querying the database
+ */
+ public void getEntities(DIRRequest request, Map queryMap, List attrs)
+ throws SQLException {
+
+ // TODO: check whether some fancy SQL statement will perform this task
+ // more efficiently
+
+ Statement statement = conEntities.createStatement();
+
+ try {
+
+ // first, get a list of potential UUIDs which might belong to the
+ // query
+ // result; most probably, this will significantly reduce the amount
+ // of
+ // entries to be checked in the second step
+ StringBuffer sb = new StringBuffer();
+ for (String key : queryMap.keySet()) {
+
+ String value = (String) queryMap.get(key);
+
+ if (sb.length() != 0)
+ sb.append("OR ");
+
+ if (key.equalsIgnoreCase(Attrs.uuid.toString()) && !value.equals("*")) {
+ sb.append("(");
+ sb.append(COL_UUID);
+ sb.append("='");
+ sb.append(value);
+ sb.append("')");
+ continue;
+ }
+
+ if (!key.equalsIgnoreCase(Attrs.lastUpdated.toString())) {
+
+ sb.append("(");
+ sb.append(COL_ATTR);
+ sb.append("='");
+ sb.append(key);
+ sb.append("'");
+
+ if (!value.equals("*")) {
+ sb.append(" AND ");
+ sb.append(COL_VAL);
+ sb.append("='");
+ sb.append(value);
+ sb.append("'");
+ }
+
+ sb.append(")");
+ }
+
+ }
+
+ String sql = SQLQueryHelper.createQueryStatement(TABLE_NAME, new String[] { COL_UUID },
+ sb.toString());
+ ResultSet rs = statement.executeQuery(sql);
+
+ Set uuids = new HashSet();
+ try {
+ while (rs.next())
+ uuids.add(rs.getString(1));
+ } finally {
+ rs.close();
+ }
+
+ // for each potential entry, check whether all requirements are
+ // fulfilled; if so, add the entry to the result set
+ Map> result = new HashMap>();
+ for (String uuid : uuids) {
+
+ // get all entities with the given UUID from the database
+ sql = SQLQueryHelper.createQueryStatement(TABLE_NAME, new String[] { COL_ATTR,
+ COL_VAL }, COL_UUID + "='" + uuid + "'");
+ rs = statement.executeQuery(sql);
+
+ // add all entries from the database
+ Map entity = new HashMap();
+ entity.put(Attrs.uuid.toString(), uuid);
+ try {
+ while (rs.next())
+ entity.put(rs.getString(1), rs.getString(2));
+ } finally {
+ rs.close();
+ }
+
+ // add the lastUpdated entry from the timestamp map if it exists
+ if (timestamps.containsKey(uuid))
+ entity.put(Attrs.lastUpdated.toString(), timestamps.get(uuid).toString());
+
+ // if the entry matches the query map, remove all
+ // attribute-value
+ // pairs not defined in 'attrs' and add the resulting entry to
+ // the
+ // result set
+ if (matches(entity, queryMap)) {
+
+ if (attrs == null || attrs.size() == 0)
+ result.put(uuid, entity);
+
+ else {
+
+ // prune the result set with the aid of 'attrs'
+
+ Map prunedEntry = new HashMap();
+ for (String key : attrs) {
+ String value = (String) entity.get(key);
+ if (value != null)
+ prunedEntry.put(key, value);
+ }
+
+ result.put(uuid, prunedEntry);
+ }
+ }
+ }
+
+ MessageUtils.marshallResponse(request, result);
+ this.notifyRequestListener(request);
+
+ } finally {
+ statement.close();
+ }
+ }
+
+ /**
+ * Deregisters an entity from the Directory Service.
+ *
+ *
+ * If an entity with the given UUID exists, a check is performed whether the
+ * user ID associated with the request is equal to the user ID associated
+ * with the database entry. The deregistration will only be performed if
+ * both user IDs match.
+ *
+ * @param request
+ * the request context
+ * @param uuid
+ * the UUID of the entity to remove
+ * @throws SQLException
+ * if an error occured while updating the database
+ * @throws UserException
+ * if the operation failed due to an invalid argument
+ */
+ public void deregisterEntity(DIRRequest request, String uuid) throws SQLException, UserException {
+
+ conEntities.setAutoCommit(true);
+ Statement statement = conEntities.createStatement();
+
+ // check if an owner has already been defined for the entry;
+ // if so, check if the user requesting the deletion is the owner
+ String sql = SQLQueryHelper.createQueryStatement(TABLE_NAME, new String[] { COL_VAL },
+ COL_UUID + "='" + uuid + "' and " + COL_ATTR + "='" + Attrs.owner + "'");
+ ResultSet rs = statement.executeQuery(sql);
+
+ try {
+
+ if (rs.next()) {
+ String owner = rs.getString(1);
+ checkAuthorization(owner, request, uuid);
+ }
+
+ // delete all attribute-value pairs associated with the UUID from
+ // the database
+ sql = SQLQueryHelper.createDeleteStatement(TABLE_NAME, COL_UUID + "='" + uuid + "'");
+ statement.executeUpdate(sql);
+
+ // remove last timestamp from the hash map
+ timestamps.remove(uuid);
+
+ MessageUtils.marshallResponse(request, null);
+ this.notifyRequestListener(request);
+
+ } finally {
+ rs.close();
+ statement.close();
+ }
+ }
+
+ public void registerAddressMapping(DIRRequest request, String uuid,
+ List> mapping, long oldVersion) throws SQLException, UserException {
+
+ Statement statement = conMappings.createStatement();
+
+ try {
+
+ conEntities.setAutoCommit(false);
+
+ // First, check whether a mapping has been registered already. If
+ // so, check whether the requesting user is authorized to change the
+ // mapping.
+ String sql = SQLQueryHelper.createQueryStatement(TABLE_NAME, new String[] { COL_OWNER,
+ COL_VERSION }, COL_UUID + "='" + uuid + "'");
+
+ ResultSet rs = statement.executeQuery(sql);
+ long versionInDB = 0;
+ String owner = null;
+ try {
+ if (rs.next()) {
+
+ // check whether user is authorized
+ owner = rs.getString(1);
+ checkAuthorization(owner, request, uuid);
+
+ // check whether version is correct
+ versionInDB = rs.getLong(2);
+ if (versionInDB != oldVersion)
+ throw new UserException("version mismatch: received update for version '"
+ + oldVersion + "', latest version is '" + versionInDB + "'");
+ }
+ } finally {
+ rs.close();
+ }
+
+ // do not change the owner if the entry exists already
+ if(owner == null)
+ owner = request.details.userId;
+
+ // delete the old mapping, if necessary
+ sql = SQLQueryHelper.createDeleteStatement(TABLE_NAME, COL_UUID + "='" + uuid + "'");
+ statement.executeUpdate(sql);
+
+ // add the new mapping
+ sql = SQLQueryHelper.createInsertStatement(TABLE_NAME, new String[] { COL_UUID,
+ COL_OWNER, COL_MAPPING, COL_VERSION }, new Object[] { uuid, owner,
+ JSONParser.writeJSON(mapping), oldVersion + 1 });
+ statement.executeUpdate(sql);
+
+ // commit the transaction
+ conEntities.commit();
+
+ MessageUtils.marshallResponse(request, oldVersion + 1);
+ this.notifyRequestListener(request);
+
+ } catch (UserException exc) {
+ conEntities.rollback();
+ throw exc;
+ } catch (SQLException exc) {
+ conEntities.rollback();
+ throw exc;
+ } catch (JSONException exc) {
+ conEntities.rollback();
+ throw new UserException("cannot convert map to JSON: " + exc);
+ } finally {
+ statement.close();
+ }
+ }
+
+ public void getAddressMapping(DIRRequest request, String uuid) throws SQLException,
+ JSONException {
+
+ Statement statement = conMappings.createStatement();
+ Map> results = new HashMap>();
+
+ // get all entries
+ if (uuid.equals("")) {
+
+ // query all mappings
+ String sql = SQLQueryHelper.createQueryStatement(TABLE_NAME, new String[] { COL_UUID,
+ COL_VERSION, COL_MAPPING }, null);
+
+ ResultSet rs = statement.executeQuery(sql);
+ try {
+ while (rs.next()) {
+ List result = new ArrayList(2);
+ result.add(rs.getLong(2)); // version
+ result.add(JSONParser.parseJSON(new JSONString(rs.getString(3)))); // mapping
+
+ results.put(rs.getString(1), result);
+ }
+ } finally {
+ rs.close();
+ }
+
+ } else {
+
+ // query the mapping with the given UUID
+ String sql = SQLQueryHelper.createQueryStatement(TABLE_NAME, new String[] { COL_UUID,
+ COL_VERSION, COL_MAPPING }, COL_UUID + "='" + uuid + "'");
+ ResultSet rs = statement.executeQuery(sql);
+
+ try {
+ if (rs.next()) {
+ List result = new ArrayList(3);
+ result.add(rs.getLong(2)); // version
+ result.add(JSONParser.parseJSON(new JSONString(rs.getString(3)))); // mapping
+ // /
+ // /
+ // mapping
+
+ results.put(rs.getString(1), result);
+ }
+ } finally {
+ rs.close();
+ }
+ }
+
+ MessageUtils.marshallResponse(request, results);
+ this.notifyRequestListener(request);
+ }
+
+ public void deregisterAddressMapping(DIRRequest request, String uuid) throws SQLException,
+ UserException {
+
+ conEntities.setAutoCommit(true);
+
+ // First, check whether a mapping has been registered already. If
+ // so, check whether the requesting user is authorized to change the
+ // mapping.
+ Statement statement = conMappings.createStatement();
+ String sql = SQLQueryHelper.createQueryStatement(TABLE_NAME, new String[] { COL_OWNER },
+ COL_UUID + "='" + uuid + "'");
+ ResultSet rs = statement.executeQuery(sql);
+
+ try {
+ if (rs.next()) {
+ String owner = rs.getString(1);
+ checkAuthorization(owner, request, uuid);
+ }
+
+ // delete the mapping
+ sql = SQLQueryHelper.createDeleteStatement(TABLE_NAME, COL_UUID + "='" + uuid + "'");
+ statement.executeUpdate(sql);
+
+ MessageUtils.marshallResponse(request, null);
+ this.notifyRequestListener(request);
+
+ } finally {
+ rs.close();
+ statement.close();
+ }
+ }
+
+ /**
+ * Returns the current system time in milliseconds since 1/1/70
+ */
+ public void getGlobalTime(DIRRequest request) {
+ MessageUtils.marshallResponse(request, System.currentTimeMillis());
+ this.notifyRequestListener(request);
+ }
+
+ public void setRequestListener(DIRRequestListener listener) {
+ requestListener = listener;
+ }
+
+ protected void notifyRequestListener(DIRRequest request) {
+ if (requestListener != null)
+ requestListener.dsRequestDone(request);
+ else
+ throw new RuntimeException("listener must not be null!");
+ }
+
+ protected Map> getEntityDBDump() throws SQLException {
+
+ Statement statement = conEntities.createStatement();
+ String sql = "SELECT * FROM " + TABLE_NAME;
+ ResultSet rs = statement.executeQuery(sql);
+
+ Map> dump = new HashMap>();
+
+ for (int i = 0; rs.next(); i++) {
+ String uuid = rs.getString(1);
+ Map entry = dump.get(uuid);
+ if (entry == null) {
+ entry = new HashMap();
+ dump.put(uuid, entry);
+ }
+
+ entry.put(rs.getString(2), rs.getString(3));
+
+ if (!entry.containsKey(Attrs.lastUpdated.toString())) {
+ Object timeStamp = timestamps.get(uuid);
+ if (timeStamp != null)
+ entry.put(Attrs.lastUpdated.toString(), timeStamp.toString());
+ }
+ }
+
+ rs.close();
+
+ return dump;
+ }
+
+ protected Map getMappingDBDump() throws SQLException, JSONException {
+
+ Statement statement = conMappings.createStatement();
+ String sql = "SELECT * FROM " + TABLE_NAME;
+ ResultSet rs = statement.executeQuery(sql);
+
+ Map dump = new HashMap();
+
+ for (int i = 0; rs.next(); i++) {
+ String uuid = rs.getString(1);
+ dump.put(uuid, new Object[] { rs.getString(2),
+ JSONParser.parseJSON(new JSONString(rs.getString(3))), rs.getLong(4) + "" });
+ }
+
+ rs.close();
+
+ return dump;
+ }
+
+ /**
+ * Checks whether a given entity matches a given query map.
+ *
+ * @param entity
+ * @param query
+ * @return
+ */
+ private boolean matches(Map