diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 000000000..7fbf853b8 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,94 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +### Added + +[Ensure Database connection pool tests connections on borrow and while idle][STOR-1333] + +### Fixed + +[If database is restarted while storm is running, it doesn't reconnect and needs a restart][STOR-482] + +## 1.11.21 - 2021-05-12 + +### Fixed + +[StoRM Backend service enters failed state when stopped][STOR-1395] +[Upgrading to StoRM v1.11.20 could break connections with MariaDB][STOR-1397] +[Ensure MariaDB is started before StoRM Backend on boot][STOR-1401] + +## 1.11.20 - 2021-04-01 + +### Added + +- [StoRM Backend and native libs should run with Java 11][STOR-1357] + +## 1.11.19 - 2020-10-29 + +### Added + +- [Include Jetty handler metrics reporting in storm-backend-metrics log][STOR-1251] + +### Fixed + +- [BoL and PtG requests statuses not updated after recall success][STOR-1260] +- [Uncaught RuntimeException raised when user.storm.pinned attribute is not found causes SRM_INTERNAL_ERROR during srmReleaseFiles][STOR-1267] + +## 1.11.18 - 2020-08-07 + +### Added + +- [Make Background DU configurable to run periodically in order to update used space info on db][STOR-932] +- [Fix useless verbosity in log][STOR-1036] +- [Include thread pool and jetty handler metrics reporting in storm-backend-metrics log][STOR-1174] +- [Add Date to Backend's metrics log][STOR-1198] +- [SystemD support for StoRM Backend][STOR-1089] + +### Fixed + +- [Log as ERROR only internal errors][STOR-892] +- [Understand what is the purpose of the recallBuckets map and whether it can be removed][STOR-1175] +- [SrmRm file does not exist should not be logged as ERROR][STOR-1176] +- [Include mysql-connector-java into maven dependencies][STOR-1216] + +## 1.11.17 - 2019-12-17 + +### Fixed + +- [Service storm-backend-server status returns 0 even if backend is not running][STOR-821] + +## 1.11.16 - 2019-10-02 + +### Fixed + +- [Improve error description when srmMkdir path contains non existing intermediate directories][STOR-1099] + + + +[STOR-482]: https://issues.infn.it/jira/browse/STOR-482 +[STOR-821]: https://issues.infn.it/jira/browse/STOR-821 +[STOR-892]: https://issues.infn.it/jira/browse/STOR-892 +[STOR-932]: https://issues.infn.it/jira/browse/STOR-932 +[STOR-1036]: https://issues.infn.it/jira/browse/STOR-1036 +[STOR-1089]: https://issues.infn.it/jira/browse/STOR-1089 +[STOR-1099]: https://issues.infn.it/jira/browse/STOR-1099 +[STOR-1174]: https://issues.infn.it/jira/browse/STOR-1174 +[STOR-1175]: https://issues.infn.it/jira/browse/STOR-1175 +[STOR-1176]: https://issues.infn.it/jira/browse/STOR-1176 +[STOR-1198]: https://issues.infn.it/jira/browse/STOR-1198 +[STOR-1216]: https://issues.infn.it/jira/browse/STOR-1216 +[STOR-1251]: https://issues.infn.it/jira/browse/STOR-1251 +[STOR-1260]: https://issues.infn.it/jira/browse/STOR-1260 +[STOR-1267]: https://issues.infn.it/jira/browse/STOR-1267 +[STOR-1333]: https://issues.infn.it/jira/browse/STOR-1333 +[STOR-1357]: https://issues.infn.it/jira/browse/STOR-1357 +[STOR-1395]: https://issues.infn.it/jira/browse/STOR-1395 +[STOR-1397]: https://issues.infn.it/jira/browse/STOR-1397 +[STOR-1401]: https://issues.infn.it/jira/browse/STOR-1401 + diff --git a/Jenkinsfile b/Jenkinsfile index 312c9840e..470f699d7 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -8,7 +8,7 @@ pipeline { label "${kubeLabel}" cloud 'Kube mwdevel' defaultContainer 'jnlp' - inheritFrom 'ci-template' + inheritFrom 'ci-template-java11' } } diff --git a/etc/storm.properties.template b/etc/storm.properties.template index 43bde6974..28ace2f50 100644 --- a/etc/storm.properties.template +++ b/etc/storm.properties.template @@ -1,252 +1,62 @@ -########################### -# storm.properties -########################### +# ==================================================================================== +# StoRM Backend Configuration +# ==================================================================================== # -# This file contains properties that govern the operation of StoRM. -# The file is read at startup of the service. -# Your changes will be applied when the service will be restarted. -# -############################ - -############################ -### SERVICE PARAMETERS ### -############################ - -# ============================ -# StoRM Service DNS -# ============================ -# hostname with which the service is published -storm.service.FE-public.hostname = +# This file contains StoRM Backend configuration. The file is read at startup +# of the service. Your changes will be applied when the service will be restarted. - -# ============================ -# Front End service port # ============================ -# The Front End binding port -storm.service.port = 8444 - +# SRM service parameters # ============================ -# Managed SURL endpoints -# ============================ -# -# comma-separated list of managed SURL. -# These entries are used to check the SURL validity. -# storm.service.SURL.endpoint = srm://:/ -# Example: Example: srm://storm.cnaf.infn.it:8444/srm/managerv2 -storm.service.SURL.endpoint = -# ============================ -# Managed SURL default ports -# ============================ -# -# comma-separated list of managed SURL's defauilt ports. -# These entries are used to check the SURL validity. -# storm.service.SURL.default-ports = -# Example: 8444 -storm.service.SURL.default-ports = +# List of accepted StoRM SRM end-points. +# srm_endpoints.1.host = storm.example +# srm_endpoints.1.port = 8444 +# srm_endpoints.2.host = alias.example +# srm_endpoints.2.port = 8444 # ============================ -# FE/BE communication RDBMS +# FE/BE DBMS parameters # ============================ -# -# Parameters to connect to the DB used as channel for the requests. -storm.service.request-db.host = -storm.service.request-db.username = -storm.service.request-db.passwd = - -############################################# -############ PROFILE PARAMETERS ############ -############################################# -# ============================ -# StoRM Service Generic Behavior -# ============================ -directory.automatic-creation = false -# To enable file creation within directories. Useful when authorization is not defined. -directory.writeperm = false +# Database connection configuration +db.hostname = +db.username = storm +db.password = storm +db.port = 3306 +db.properties = serverTimezone=UTC&autoReconnect=true +# Database connection pool configuration +db.pool.size = -1 +db.pool.min_idle = 10 +db.pool.max_wait_millis = 5000 +db.pool.test_on_borrow = true +db.pool.test_while_idle = true # ============================ -# StoRM Service PINNED Behavior -# ============================ -# Default PinLifetime in seconds used for pinning files in case of srmPrepareToPut or srmPrapareToGet operation -# without any pinLifetime specified. -pinLifetime.default=259200 -# Maximum allowed value for Pin LifeTime. -# Values beyond the max will be dropped to max value. -pinLifetime.maximum=1814400 - - -# ============================ -# StoRM Service TURL Behavior -# ============================ -extraslashes.file= -extraslashes.rfio= -extraslashes.gsiftp=/ -extraslashes.root= - - -# ======================= -# Default -# ======================= -# Default FileLifetime in seconds used for VOLATILE file in case of SRM request without FileLifetime parameter specified. -fileLifetime.default=259200 -# Possible values are : N (Never) and A (Always) -default.overwrite = A -# Possible values are V (Volatile), P (Permanent) and D (Durable) -default.storagetype = P - - -############################################# -############ TUNING PARAMETERS ############# -############################################# - -# ============================ -# BE-private RDBMS -# ============================ -persistence.internal-db.connection-pool.maxActive = 50 -persistence.internal-db.connection-pool.maxWait = 50 - - -# ============================ -# ASYNCH SCHEDULER Component parameters -# ============================ -scheduler.serial=false -scheduler.crusher.workerCorePoolSize=10 -scheduler.crusher.workerMaxPoolSize=50 -scheduler.crusher.queueSize=2000 -scheduler.chunksched.ptp.workerCorePoolSize=50 -scheduler.chunksched.ptp.workerMaxPoolSize=200 -scheduler.chunksched.ptp.queueSize=1000 -scheduler.chunksched.ptg.workerCorePoolSize=50 -scheduler.chunksched.ptg.workerMaxPoolSize=200 -scheduler.chunksched.ptg.queueSize=2000 -scheduler.chunksched.bol.workerCorePoolSize=50 -scheduler.chunksched.bol.workerMaxPoolSize=200 -scheduler.chunksched.bol.queueSize=2000 -scheduler.chunksched.copy.workerCorePoolSize=10 -scheduler.chunksched.copy.workerMaxPoolSize=50 -scheduler.chunksched.copy.queueSize=500 - - -# ============================ -# ASYNCH PICKER Component parameters +# REST Services parameter # ============================ -asynch.db.ReconnectPeriod=18000 -asynch.db.DelayPeriod=30 -asynch.PickingInitialDelay=1 -# Polling time in seconds for pick up new requests from DB -asynch.PickingTimeInterval=2 -asynch.PickingMaxBatchSize=100 - - -# ====================================== -# SYNCH CALL Component tuning parameters -# ====================================== -synchcall.directoryManager.maxLsEntry=2000 +rest.port = 9998 +rest.max_threads = 100 +rest.max_queue_size = 1000 # ============================ -# REST Services parameter +# Sanity Check enabled # ============================ -storm.rest.services.port=9998 -storm.rest.services.maxthreads=100 -storm.rest.services.max_queue_size=1000 +sanity_checks_enabled = true # ============================ # XMLRPC Server parameter # ============================ -synchcall.xmlrpc.unsecureServerPort=8080 -synchcall.xmlrpc.maxthread=256 -synchcall.xmlrpc.max_queue_size=1000 -synchcall.xmlrpc.security.enabled=true -synchcall.xmlrpc.security.token= - -# ======================= -# Pinned Files cleaning parameters -# ======================= -# Initial delay in seconds before starting the garbage collector thread -gc.pinnedfiles.cleaning.delay = 10 -# Garbage Collector time interval in seconds. -gc.pinnedfiles.cleaning.interval = 300 - - -# =============================== -# TAPE RECALL Component parameter -# =============================== -tape.recalltable.service.param.retry-value=retry-value -tape.recalltable.service.param.status=status -tape.recalltable.service.param.takeover=first - +xmlrpc.port = 8080 +xmlrpc.max_threads = 256 +xmlrpc.max_queue_size = 1000 # ============================ -# srmCopy parameters +# XMLRPC & REST security token # ============================ -asynch.srmclient.retrytime=60 -asynch.srmclient.timeout=180 -asynch.srmclient.sleeptime=5 -asynch.srmclient.putdone.sleeptime=1 -asynch.srmclient.putdone.timeout=60 -asynch.gridftpclient=it.grid.storm.asynch.NaiveGridFTPTransferClient -asynch.srmclient=it.grid.storm.asynch.SRM22Client -# Default PinLifeTime in seconds used by StoRM in case of SrmCOpy operation. -# This value is the one specified in the remote SrmPrepareToGet request. -SRM22Client.PinLifeTime=259200 -# Time expressed in millisec. -asynch.srmcopy.gridftp.timeout = 15000 - - -# =========================== -# Garbage Collector parameter -# =========================== -# -#Enable/Disable Garbage Collector -#purging=true -# -#Time interval for between two requests in garbage collection. In seconds -purge.interval=600 -# -#Number of requests removed at each run. Every run purge max 800 requests in final status -purge.size=800 -# -#Time after that the GC consider a _terminated_ request as garbage -#Default: 21600s (6h) -expired.request.time=21600 - - -# ========================================================== -# Expired-Put-Requests-Agent parameters -# ========================================================== -# -# Transit expired put requests to a final state. A put request is expired if pinLifetime is reached. -# See pinLifetime.default. -# -#Time interval between two agent executions. In seconds -transit.interval = 300 -# -#Delay on starting agent. In seconds -transit.delay = 10 - - -# Skip ACL setup for PtG requests -ptg.skip-acl-setup = false - -# The caching policy for successful name lookups from the name service. -# The value is specified as integer to indicate the number of seconds to cache the successful lookup. -# A value of -1 indicates "cache forever". The default behavior is to cache forever when a security -# manager is installed, and to cache for an implementation specific period of time, when a security -# manager is not installed. -# -# Default value: 0 -# -#networkaddress.cache.ttl=0 - -# The caching policy for un-successful name lookups from the name service. -# The value is specified as integer to indicate the number of seconds to cache the failure for un-successful lookups. -# A value of 0 indicates "never cache". A value of -1 indicates "cache forever". -# -# Default value: 0 -# -#networkaddress.cache.negative.ttl=0 +security_enabled = true +security_token = secret-token # ================================ # Disk Usage Service Configuration @@ -258,10 +68,129 @@ ptg.skip-acl-setup = false # The initial starting delay of the service and the period can also be configured. # # Enable/Disable the periodic run of the service -storm.service.du.enabled=false +du.enabled = false # Enable/Disable parallel du execution -storm.service.du.parallelTasks=false +du.parallel_tasks_enabled = false # Initial delay of service start in seconds. Default: 60s -storm.service.du.delaySecs=60 +du.initial_delay = 60 # Interval of execution of the du in seconds. Default: 86400s (24h) -storm.service.du.periodSecs=86400 +du.tasks_interval = 86400 + + +# ================================ +# Advanced Properties +# ================================ + +# Enable/disable the automatic directory creation during srmPrepareToPut requests. +directories.enable_automatic_creation = false +# Enable/disable write permission on directory created through srmMkDir requests. +directories.enable_writeperm_on_creation = false + +# Default pinLifetime in seconds used for pinning files in case of srmPrepareToPut or srmPrapareToGet requests +pinlifetime.default = 259200 +# Maximum allowed value for pinLifeTime. Values beyond the max will be dropped to max value. +pinlifetime.maximum = 1814400 + +# Add extra slashes after the “authority” part of a TURL for FILE protocol. +extraslashes.file = +# Add extra slashes after the “authority” part of a TURL for RFIO protocol. +extraslashes.rfio = +# Add extra slashes after the “authority” part of a TURL for GSIFTP protocol. +extraslashes.gsiftp = / +# Add extra slashes after the “authority” part of a TURL for ROOT protocol. +extraslashes.root = / + +# Initial delay in seconds before starting the garbage collector thread +expired_spaces_agent.delay = 10 +# Garbage Collector time interval in seconds. +expired_spaces_agent.interval = 300 + +# Default FileSize +files.default_size = 1000000 +# Default FileLifetime in seconds used for VOLATILE file in case of SRM request. +files.default_lifetime = 259200 +# Default file overwrite mode to use upon srmPrepareToPut requests. +# Possible values are N (Never), A (Always), D (when files Differs). +files.default_overwrite = A +# Default File Storage Type to be used for srmPrepareToPut requests. +# Possible values are V (Volatile), P (Permanent) and D (Durable) +files.default_storagetype = P + +# Crusher Scheduler worker pool base size. +requests_scheduler.core_pool_size = 10 +# Crusher Schedule worker pool max size. +requests_scheduler.max_pool_size = 50 +# Request queue maximum size. +requests_scheduler.queue_size = 2000 +# PrepareToPut worker pool base size. +ptp_scheduler.core_pool_size = 50 +# PrepareToPut worker pool max size. +ptp_scheduler.max_pool_size = 200 +# PrepareToPut request queue maximum size. +ptp_scheduler.queue_size = 1000 +# PrepareToGet worker pool base size. +ptg_scheduler.core_pool_size = 50 +# PrepareToGet worker pool max size. +ptg_scheduler.max_pool_size = 200 +# PrepareToGet request queue maximum size. +ptg_scheduler.queue_size = 2000 +# BringOnline worker pool base size. +bol_scheduler.core_pool_size = 50 +# BringOnline Worker pool max size. +bol_scheduler.max_pool_size = 200 +# BringOnline request queue maximum size. +bol_scheduler.queue_size = 2000 + +# Initial delay before starting to pick requests from the DB, in seconds. +requests_picker_agent.delay = 1 +# Polling interval in seconds to pick up new SRM requests. +requests_picker_agent.interval = 2 +# Maximum number of requests picked up at each polling time. +requests_picker_agent.max_fetched_size = 100 + +# Maximum number of entries returned by an srmLs call. +# Since in case of recursive srmLs results can be in order of million, +# this prevent a server overload. +synch_ls.max_entries = 2000 +# Default value for the parameter "allLevelRecursive" of the srmLS request. +synch_ls.default_all_level_recursive = false +# Default value for the parameter "numOfLevels" of the srmLS request. +synch_ls.default_num_levels = 1 +# Default value for the parameter "offset" of the LS request. +synch_ls.default_offset = 0 + +# Enable/Disable Garbage Collector +completed_requests_agent.enabled = true +# Time interval for between two requests in garbage collection. In seconds +completed_requests_agent.interval = 600 +# Initial delay before starting the requests garbage collection process, in seconds. +completed_requests_agent.delay = 10 +# Number of requests removed at each run. Every run purge max 800 requests in final status +completed_requests_agent.purge_size = 800 +# Time after that the GC consider a _terminated_ request as garbage in seconds +completed_requests_agent.purge_age = 21600 + + +# Expired-Put-Requests-Agent transits expired put requests to a final state. +# A put request is expired if pinLifetime is reached. +# Time interval between two agent executions. In seconds +inprogress_requests_agent.interval = 300 +# Delay on starting agent. In seconds +inprogress_requests_agent.delay = 10 +# Time in seconds to consider an in-progress ptp request as expired. +inprogress_requests_agent.ptp_expiration_time = 2592000 + +# Skip ACL setup for PtG requests +skip_ptg_acl_setup = false + +info_quota_refresh_period = 900 +http_turl_prefix = +server_pool_status_check_timeout = 20000 +abort_maxloop = 10 +ping_properties_filename = ping-values.properties + +hearthbeat.bookkeeping_enabled = false +hearthbeat.performance_measuring_enabled = false +hearthbeat.period = 60 +hearthbeat.performance_logbook_time_interval = 15 +hearthbeat.performance_glance_time_interval = 15 \ No newline at end of file diff --git a/etc/systemd/service.d/storm-backend-server.conf b/etc/systemd/service.d/storm-backend-server.conf index da2317d59..484c8ed54 100644 --- a/etc/systemd/service.d/storm-backend-server.conf +++ b/etc/systemd/service.d/storm-backend-server.conf @@ -1,14 +1,17 @@ [Service] -# options for the JVM running the StoRM BE server -# Environment="STORM_BE_JMX_OPTS=-Dcom.sun.management.jmxremote.port=8501 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false" + # options for the JVM running the StoRM BE server Environment="STORM_BE_JVM_OPTS=-Xms512m -Xmx512m" -# Environment="STORM_BE_JVM_OPTS=-Xms512m -Xmx512m -Xdebug -Xrunjdwp:server=y,transport=dt_socket,address=1044,suspend=n" +# Environment="STORM_BE_JVM_DEBUG_OPTS=-Xdebug -Xrunjdwp:server=y,transport=dt_socket,address=1044,suspend=n" + # LCMAPS config file Environment="LCMAPS_DB_FILE=/etc/storm/backend-server/lcmaps.db" + # LCMAPS user mapping policy (defined in $LCMAPS_DB_FILE) Environment="LCMAPS_POLICY_NAME=standard" + # LCMAPS log file Environment="LCMAPS_LOG_FILE=/var/log/storm/lcmaps.log" + # LCMAPS log verbosity: 0 minimum (default), 5 maximum Environment="LCMAPS_DEBUG_LEVEL=0" diff --git a/etc/systemd/storm-backend-server.service b/etc/systemd/storm-backend-server.service index 1d7484d63..9c07dc0a5 100644 --- a/etc/systemd/storm-backend-server.service +++ b/etc/systemd/storm-backend-server.service @@ -9,7 +9,7 @@ Type=simple ExecStart=/bin/bash -ac "exec /usr/bin/java \ -server \ ${STORM_BE_JVM_OPTS} \ - ${STORM_BE_JMX_OPTS} \ + ${STORM_BE_JVM_DEBUG_OPTS} \ -cp '/usr/share/java/storm-backend-server/*:/etc/storm/backend-server' \ -Djava.library.path=/usr/lib64/ \ -Djna.library.path=/usr/lib64/modules:/usr/lib64/ \ diff --git a/pom.xml b/pom.xml index 9bf1ba0ec..8477f20a4 100644 --- a/pom.xml +++ b/pom.xml @@ -5,13 +5,14 @@ StoRM Backend server org.italiangrid.storm storm-backend-server - 1.11.21 + 1.12.0 1.10 2.25.1 + 2.9.0 8.1.9.v20130131 1.7.2 1.2.3 @@ -30,6 +31,7 @@ 3.1 1.1.1 1.7 + 4.4 1.5.1 2.7.1 1.4.6 @@ -37,14 +39,15 @@ 8.0.16 - 18.0 + 30.0-jre 3.1.0 2.12.1 + 1.6.6 - 1.10.19 + 4.3.1 1.3 - 1.0.6 + 1.0.7 UTF-8 @@ -55,17 +58,35 @@ + + + org.apache.maven.plugins + maven-enforcer-plugin + 3.0.0 + + + enforce-maven + + enforce + + + + + 3.1.1 + + + + + + + org.apache.maven.plugins maven-compiler-plugin - 3.6.1 + 3.8.1 - 1.8 - 1.8 - - -Xlint:deprecation - + 11 @@ -73,15 +94,12 @@ org.apache.maven.plugins maven-surefire-plugin - 2.20 - - false - + 3.0.0-M5 maven-assembly-plugin - 3.0.0 + 3.3.0 storm-backend-server false @@ -100,162 +118,20 @@ - - - - org.apache.maven.plugins - maven-dependency-plugin - 3.0.1 - - - copy-dependencies - deploy - - copy-dependencies - - - - - - - - - - org.codehaus.mojo - wagon-maven-plugin - 1.0-beta-4 - - - upload-config - deploy - - upload - - - ${remoteDeployment.url}/ - ${remoteDeployment.serverId} - etc - - - logging.xml, namespace.xml, path-authz.db, - init.d/*, logrotate.d/*, sysconfig/* - - /etc/storm/backend-server - - - - upload-logrotated - deploy - - upload - - - ${remoteDeployment.url} - ${remoteDeployment.serverId} - etc/logrotate.d - /etc/logrotate.d - - - - upload-systemd-unit - deploy - - upload - - - ${remoteDeployment.url} - ${remoteDeployment.serverId} - etc/systemd - /usr/lib/systemd/system - - - - upload-systemd-conf - deploy - - upload - - - ${remoteDeployment.url} - ${remoteDeployment.serverId} - etc/systemd/service.d - /usr/lib/systemd/system/storm-backend-server.service.d - - - - - upload-deps - deploy - - upload - - - ${remoteDeployment.url} - ${remoteDeployment.serverId} - target/dependency - * - /usr/share/java/storm-backend-server - - - - - upload-jar - deploy - - upload - - - ${remoteDeployment.url} - ${remoteDeployment.serverId} - target - storm-backend-server.jar - /usr/share/java/storm-backend-server - - - - - - - - - org.apache.maven.plugins - maven-deploy-plugin - 2.7 - - true - - - org.apache.maven.plugins maven-jar-plugin - 3.0.2 + 3.2.2 true + true - - - - - - org.apache.maven.wagon - wagon-ssh - 1.0 - - @@ -288,6 +164,12 @@ + + com.fasterxml.jackson.dataformat + jackson-dataformat-xml + ${jacksonVersion} + + com.google.guava guava @@ -338,6 +220,11 @@ jersey-bean-validation ${jerseyVersion} + + com.fasterxml.jackson.dataformat + jackson-dataformat-properties + ${jacksonVersion} + org.eclipse.jetty @@ -377,6 +264,12 @@ org.codehaus.jettison jettison ${jettisonVersion} + + + stax + stax-api + + @@ -385,6 +278,12 @@ ${commonsDbcpVersion} + + io.micrometer + micrometer-core + ${micrometerCoreVersion} + + commons-io commons-io @@ -406,6 +305,10 @@ javax.servlet servlet-api + + xml-apis + xml-apis + @@ -474,6 +377,12 @@ xalan ${xalanVersion} runtime + + + xml-apis + xml-apis + + @@ -483,12 +392,13 @@ runtime - + org.mockito - mockito-all + mockito-core ${mockitoVersion} test + org.hamcrest hamcrest-all @@ -512,8 +422,21 @@ xerces xercesImpl ${xercesImplVersion} + + + xml-apis + xml-apis + + + + + + org.apache.commons + commons-collections4 + ${commonsCollections4Version} + diff --git a/src/main/assemblies/assembly.xml b/src/main/assemblies/assembly.xml index 5d3f881b6..b249b1f01 100644 --- a/src/main/assemblies/assembly.xml +++ b/src/main/assemblies/assembly.xml @@ -94,7 +94,6 @@ etc/logrotate.d - etc/systemd/storm-backend-server.service usr/lib/systemd/system diff --git a/src/main/java/it/grid/storm/Constants.java b/src/main/java/it/grid/storm/Constants.java index ca61cbf93..448378341 100644 --- a/src/main/java/it/grid/storm/Constants.java +++ b/src/main/java/it/grid/storm/Constants.java @@ -35,7 +35,6 @@ public class Constants { private static final Logger log = LoggerFactory.getLogger(Constants.class); public static final Entry BE_VERSION; - public static final Entry NAMESPACE_VERSION; public static final Entry BE_OS_DISTRIBUTION; public static final Entry BE_OS_PLATFORM; public static final Entry BE_OS_KERNEL_RELEASE; @@ -49,7 +48,6 @@ private Constants() {} static { BE_VERSION = new Entry("BE-Version", Constants.class.getPackage().getImplementationVersion()); - NAMESPACE_VERSION = new Entry("Namespace-version", "1.5.0"); BE_OS_DISTRIBUTION = new Entry("BE-OS-Distribution", getDistribution()); Map map = getPlatformKernel(); BE_OS_PLATFORM = new Entry(BE_OS_PLATFORM_KEY, map.get(BE_OS_PLATFORM_KEY)); diff --git a/src/main/java/it/grid/storm/Main.java b/src/main/java/it/grid/storm/Main.java index b86c93c04..89dbceb6f 100644 --- a/src/main/java/it/grid/storm/Main.java +++ b/src/main/java/it/grid/storm/Main.java @@ -2,20 +2,58 @@ import static java.lang.System.exit; +import java.io.IOException; + +import javax.xml.parsers.ParserConfigurationException; + +import org.apache.commons.configuration.ConfigurationException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.w3c.dom.DOMException; +import org.xml.sax.SAXException; +import it.grid.storm.config.Configuration; +import it.grid.storm.namespace.Namespace; +import it.grid.storm.namespace.NamespaceException; +import it.grid.storm.startup.Bootstrap; import it.grid.storm.startup.BootstrapException; public class Main { private static final Logger log = LoggerFactory.getLogger(Main.class); + public static final String DEFAULT_CONFIG_DIR = "/etc/storm/backend-server"; + public static final String DEFAULT_CONFIG_FILE = DEFAULT_CONFIG_DIR + "/storm.properties"; + public static final String DEFAULT_NAMESPACE_FILE = DEFAULT_CONFIG_DIR + "/namespace.xml"; + public static final String DEFAULT_NAMESPACE_SCHEMA_FILE = + DEFAULT_CONFIG_DIR + "/namespace-1.5.0.xsd"; + public static final String DEFAULT_LOGGING_FILE = DEFAULT_CONFIG_DIR + "/logging.xml"; + private Main() {} public static void main(String[] args) { - StoRM storm = new StoRM(); + log.info("Configure logging from {} ...", DEFAULT_LOGGING_FILE); + Bootstrap.configureLogging(DEFAULT_LOGGING_FILE); + + log.info("Load configuration from {} ...", DEFAULT_CONFIG_FILE); + try { + Configuration.init(DEFAULT_CONFIG_FILE); + } catch (IOException e) { + log.error(e.getMessage(), e); + exit(1); + } + + log.info("Load namespace from {} ...", DEFAULT_NAMESPACE_FILE); + try { + Namespace.init(DEFAULT_NAMESPACE_FILE, true); + } catch (DOMException | ConfigurationException | ParserConfigurationException | SAXException + | IOException | NamespaceException e) { + log.error(e.getMessage(), e); + exit(1); + } + + StoRM storm = new StoRM(Configuration.getInstance(), Namespace.getInstance()); try { storm.init(); diff --git a/src/main/java/it/grid/storm/ShutdownHook.java b/src/main/java/it/grid/storm/ShutdownHook.java index 09cb21c22..2dffea96f 100644 --- a/src/main/java/it/grid/storm/ShutdownHook.java +++ b/src/main/java/it/grid/storm/ShutdownHook.java @@ -28,6 +28,7 @@ public void run() { storm.stopSpaceGC(); storm.stopExpiredAgent(); storm.stopDiskUsageService(); + storm.stopRequestGarbageCollector(); GPFSQuotaManager.INSTANCE.shutdown(); log.info("StoRM: Backend successfully stopped."); diff --git a/src/main/java/it/grid/storm/StoRM.java b/src/main/java/it/grid/storm/StoRM.java index 2c4852fd3..8e3c7f9df 100644 --- a/src/main/java/it/grid/storm/StoRM.java +++ b/src/main/java/it/grid/storm/StoRM.java @@ -30,24 +30,34 @@ import it.grid.storm.asynch.AdvancedPicker; import it.grid.storm.catalogs.ReservedSpaceCatalog; -import it.grid.storm.catalogs.StoRMDataSource; -import it.grid.storm.catalogs.timertasks.ExpiredPutRequestsAgent; +import it.grid.storm.catalogs.executors.RequestFinalizerService; +import it.grid.storm.catalogs.timertasks.RequestsGarbageCollector; import it.grid.storm.check.CheckManager; import it.grid.storm.check.CheckResponse; import it.grid.storm.check.CheckStatus; import it.grid.storm.check.SimpleCheckManager; +import it.grid.storm.check.sanity.filesystem.SupportedFSType; import it.grid.storm.config.Configuration; import it.grid.storm.health.HealthDirector; import it.grid.storm.info.du.DiskUsageService; import it.grid.storm.metrics.StormMetricsReporter; -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.NamespaceInterface; -import it.grid.storm.namespace.VirtualFSInterface; +import it.grid.storm.namespace.Namespace; +import it.grid.storm.namespace.NamespaceException; +import it.grid.storm.namespace.model.Property; +import it.grid.storm.namespace.model.Property.SizeUnitType; +import it.grid.storm.namespace.model.Quota; +import it.grid.storm.namespace.model.VirtualFS; import it.grid.storm.rest.RestServer; +import it.grid.storm.space.SpaceHelper; +import it.grid.storm.space.gpfsquota.GPFSFilesetQuotaInfo; import it.grid.storm.space.gpfsquota.GPFSQuotaManager; +import it.grid.storm.space.gpfsquota.GetGPFSFilesetQuotaInfoCommand; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.srm.types.TSpaceToken; import it.grid.storm.startup.Bootstrap; import it.grid.storm.startup.BootstrapException; import it.grid.storm.synchcall.SimpleSynchcallDispatcher; +import it.grid.storm.util.GPFSSizeHelper; import it.grid.storm.xmlrpc.StoRMXmlRpcException; import it.grid.storm.xmlrpc.XMLRPCHttpServer; @@ -67,52 +77,69 @@ public class StoRM { private XMLRPCHttpServer xmlrpcServer; // Timer object in charge to call periodically the Space Garbage Collector - private final Timer gc = new Timer(); + private final Timer gc; private TimerTask cleaningTask; - private boolean isSpaceGCRunning = false; + private boolean isSpaceGCRunning; /* - * Timer object in charge of transit expired put requests from SRM_SPACE_AVAILABLE to - * SRM_FILE_LIFETIME_EXPIRED and from SRM_REQUEST_INPROGRESS to SRM_FAILURE + * Agent in charge of transit expired ptg/ptp/bol requests to final statuses */ - private final Timer transiter = new Timer(); - private TimerTask expiredAgent; - private boolean isExpiredAgentRunning = false; + private RequestFinalizerService expiredAgent; + private boolean isExpiredAgentRunning; - private boolean isDiskUsageServiceEnabled = false; - private DiskUsageService duService; + /* Requests Garbage Collector */ + private final Timer rgc; + private TimerTask rgcTask; + private boolean isRequestGCRunning; - private final ReservedSpaceCatalog spaceCatalog; + private boolean isDiskUsageServiceEnabled; + private DiskUsageService duService; - private boolean isPickerRunning = false; - private boolean isXmlrpcServerRunning = false; + private boolean isPickerRunning; + private boolean isXmlrpcServerRunning; - private boolean isRestServerRunning = false; + private boolean isRestServerRunning; private RestServer restServer; private final Configuration config; + private final ReservedSpaceCatalog spaceCatalog; + private final Namespace namespace; + + public StoRM(Configuration config, Namespace namespace) { + + this.config = config; + this.namespace = namespace; + this.spaceCatalog = ReservedSpaceCatalog.getInstance(); + + this.picker = new AdvancedPicker(); + this.isPickerRunning = false; + + this.isXmlrpcServerRunning = false; + + this.isRestServerRunning = false; - public StoRM() { + this.gc = new Timer(); + this.isSpaceGCRunning = false; + this.isExpiredAgentRunning = false; - config = Configuration.getInstance(); - picker = new AdvancedPicker(); - spaceCatalog = new ReservedSpaceCatalog(); + this.rgc = new Timer(); + this.isRequestGCRunning = false; + this.isDiskUsageServiceEnabled = false; } public void init() throws BootstrapException { - configureLogging(); - + // ==== From Namepsace ==== + handleTotalOnlineSizeFromGPFSQuota(); + // Update SA within Reserved Space Catalog + updateSA(); + configureSecurity(); configureMetricsReporting(); - configureStoRMDataSource(); - - loadNamespaceConfiguration(); - - HealthDirector.initializeDirector(false); + HealthDirector.initializeDirector(); loadPathAuthzDBConfiguration(); @@ -128,11 +155,84 @@ public void init() throws BootstrapException { } - private void configureLogging() { + private void handleTotalOnlineSizeFromGPFSQuota() { + + namespace.getAllDefinedVFS().forEach(storageArea -> { + if (SupportedFSType.parseFS(storageArea.getFSType()) == SupportedFSType.GPFS) { + Quota quota = storageArea.getCapabilities().getQuota(); + if (quota != null && quota.getEnabled()) { + + GPFSFilesetQuotaInfo quotaInfo = getGPFSQuotaInfo(storageArea); + if (quotaInfo != null) { + updateTotalOnlineSizeFromGPFSQuota(storageArea, quotaInfo); + } + } + } + }); + } + + private GPFSFilesetQuotaInfo getGPFSQuotaInfo(VirtualFS storageArea) { + + GetGPFSFilesetQuotaInfoCommand cmd = new GetGPFSFilesetQuotaInfoCommand(storageArea); + + try { + return cmd.call(); + } catch (Throwable t) { + log.warn( + "Cannot get quota information out of GPFS. Using the TotalOnlineSize in namespace.xml " + + "for Storage Area {}. Reason: {}", + storageArea.getAliasName(), t.getMessage()); + return null; + } + } + + private void updateTotalOnlineSizeFromGPFSQuota(VirtualFS storageArea, + GPFSFilesetQuotaInfo quotaInfo) { + + long gpfsTotalOnlineSize = GPFSSizeHelper.getBytesFromKIB(quotaInfo.getBlockSoftLimit()); + Property newProperties = Property.from(storageArea.getProperties()); + try { + newProperties.setTotalOnlineSize(SizeUnitType.BYTE.getTypeName(), gpfsTotalOnlineSize); + storageArea.setProperties(newProperties); + log.warn("TotalOnlineSize as specified in namespace.xml will be ignored " + + "since quota is enabled on the GPFS {} Storage Area.", storageArea.getAliasName()); + } catch (NamespaceException e) { + log.warn( + "Cannot get quota information out of GPFS. Using the TotalOnlineSize in namespace.xml " + + "for Storage Area {}.", + storageArea.getAliasName(), e); + } + } + + private void updateSA() { + + SpaceHelper spaceHelp = new SpaceHelper(); + log.debug("Updating Space Catalog with Storage Area defined within NAMESPACE"); + namespace.getAllDefinedVFS().forEach(vfs ->{ + + String vfsAliasName = vfs.getAliasName(); + log.debug(" Considering VFS : {}", vfsAliasName); + String aliasName = vfs.getSpaceTokenDescription(); + if (aliasName == null) { + // Found a VFS without the optional element Space Token Description + log.debug( + "XMLNamespaceParser.UpdateSA() : Found a VFS ('{}') without space-token-description. " + + "Skipping the Update of SA", + vfsAliasName); + } else { + TSizeInBytes onlineSize = vfs.getProperties().getTotalOnlineSize(); + String spaceFileName = vfs.getRootPath(); + TSpaceToken spaceToken = spaceHelp.createVOSA_Token(aliasName, onlineSize, spaceFileName); + vfs.setSpaceToken(spaceToken); + + log.debug(" Updating SA ('{}'), token:'{}', onlineSize:'{}', spaceFileName:'{}'", aliasName, + spaceToken, onlineSize, spaceFileName); + } + + }); + spaceHelp.purgeOldVOSA_token(); + log.debug("Updating Space Catalog... DONE!!"); - String configurationDir = config.configurationDir(); - String logFile = configurationDir + "logging.xml"; - Bootstrap.configureLogging(logFile); } private void configureSecurity() { @@ -157,15 +257,9 @@ private void configureMetricsReporting() { } - private void loadNamespaceConfiguration() { - - NamespaceDirector.initializeDirector(); - - } - private void loadPathAuthzDBConfiguration() throws BootstrapException { - String pathAuthzDBFileName = config.configurationDir() + "path-authz.db"; + String pathAuthzDBFileName = config.getConfigurationDir() + "/path-authz.db"; Bootstrap.initializePathAuthz(pathAuthzDBFileName); } @@ -174,8 +268,8 @@ private void configureXMLRPCService() throws BootstrapException { try { - xmlrpcServer = new XMLRPCHttpServer(config.getXmlRpcServerPort(), config.getXMLRPCMaxThread(), - config.getXMLRPCMaxQueueSize()); + xmlrpcServer = new XMLRPCHttpServer(config.getXmlRpcServerPort(), config.getXmlrpcMaxThreads(), + config.getXmlrpcMaxQueueSize()); } catch (StoRMXmlRpcException e) { @@ -186,7 +280,7 @@ private void configureXMLRPCService() throws BootstrapException { private void performSanityChecks() throws BootstrapException { - if (config.getSanityCheckEnabled()) { + if (config.isSanityCheckEnabled()) { CheckManager checkManager = new SimpleCheckManager(); checkManager.init(); @@ -216,11 +310,6 @@ private void performSanityChecks() throws BootstrapException { } - private void configureStoRMDataSource() { - - StoRMDataSource.init(); - } - /** * Method used to start the picker. */ @@ -247,14 +336,6 @@ public synchronized void stopPicker() { isPickerRunning = false; } - /** - * @return - */ - public synchronized boolean pickerIsRunning() { - - return isPickerRunning; - } - /** * Method used to start xmlrpcServer. * @@ -286,11 +367,11 @@ public synchronized void stopXmlRpcServer() { private void configureRestService() { - int restServicePort = Configuration.getInstance().getRestServicesPort(); - boolean isTokenEnabled = Configuration.getInstance().getXmlRpcTokenEnabled(); - String token = Configuration.getInstance().getXmlRpcToken(); - int maxThreads = Configuration.getInstance().getRestServicesMaxThreads(); - int maxQueueSize = Configuration.getInstance().getRestServicesMaxQueueSize(); + int restServicePort = config.getRestServicesPort(); + boolean isTokenEnabled = config.isSecurityEnabled(); + String token = config.getSecurityToken(); + int maxThreads = config.getRestServicesMaxThreads(); + int maxQueueSize = config.getRestServicesMaxQueueSize(); restServer = new RestServer(restServicePort, maxThreads, maxQueueSize, isTokenEnabled, token); } @@ -343,11 +424,11 @@ public synchronized void startSpaceGC() { log.debug("Starting Space Garbage Collector ..."); // Delay time before starting - long delay = config.getCleaningInitialDelay() * 1000; + long delay = config.getExpiredSpacesAgentInitialDelay() * 1000; // cleaning thread! Set to 1 minute // Period of execution of cleaning - long period = config.getCleaningTimeInterval() * 1000; + long period = config.getExpiredSpacesAgentInterval() * 1000; // Set to 1 hour cleaningTask = new TimerTask() { @@ -382,14 +463,6 @@ public synchronized void stopSpaceGC() { isSpaceGCRunning = false; } - /** - * @return - */ - public synchronized boolean spaceGCIsRunning() { - - return isSpaceGCRunning; - } - /** * Starts the internal timer needed to periodically check and transit requests whose pinLifetime * has expired and are in SRM_SPACE_AVAILABLE, to SRM_FILE_LIFETIME_EXPIRED. Moreover, the @@ -405,16 +478,8 @@ public synchronized void startExpiredAgent() { return; } - /* Delay time before starting cleaning thread! Set to 1 minute */ - final long delay = config.getTransitInitialDelay() * 1000L; - /* Period of execution of cleaning! Set to 1 hour */ - final long period = config.getTransitTimeInterval() * 1000L; - /* Expiration time before starting move in-progress requests to failure */ - final long inProgressExpirationTime = config.getInProgressPutRequestExpirationTime(); - log.debug("Starting Expired Agent."); - expiredAgent = new ExpiredPutRequestsAgent(inProgressExpirationTime); - transiter.scheduleAtFixedRate(expiredAgent, delay, period); + expiredAgent = new RequestFinalizerService(config); isExpiredAgentRunning = true; log.debug("Expired Agent started."); } @@ -428,7 +493,7 @@ public synchronized void stopExpiredAgent() { log.debug("Stopping Expired Agent."); if (expiredAgent != null) { - expiredAgent.cancel(); + expiredAgent.stop(); } log.debug("Expired Agent stopped."); isExpiredAgentRunning = false; @@ -441,19 +506,20 @@ public synchronized boolean isExpiredAgentRunning() { private void configureDiskUsageService() { - isDiskUsageServiceEnabled = config.getDiskUsageServiceEnabled(); + isDiskUsageServiceEnabled = config.isDiskUsageServiceEnabled(); + int delay = config.getDiskUsageServiceInitialDelay(); + long period = config.getDiskUsageServiceTasksInterval(); - NamespaceInterface namespace = NamespaceDirector.getNamespace(); - List quotaEnabledVfs = namespace.getVFSWithQuotaEnabled(); - List sas = namespace.getAllDefinedVFS() + List quotaEnabledVfs = namespace.getVFSWithQuotaEnabled(); + List sas = namespace.getAllDefinedVFS() .stream() .filter(vfs -> !quotaEnabledVfs.contains(vfs)) .collect(Collectors.toList()); - if (config.getDiskUsageServiceTasksParallel()) { - duService = DiskUsageService.getScheduledThreadPoolService(sas); + if (config.isDiskUsageServiceTasksParallel()) { + duService = DiskUsageService.getScheduledThreadPoolService(sas, delay, period); } else { - duService = DiskUsageService.getSingleThreadScheduledService(sas); + duService = DiskUsageService.getSingleThreadScheduledService(sas, delay, period); } duService.setDelay(config.getDiskUsageServiceInitialDelay()); duService.setPeriod(config.getDiskUsageServiceTasksInterval()); @@ -498,6 +564,40 @@ public synchronized void stopDiskUsageService() { } } + public synchronized void startRequestGarbageCollector() { + + if (isRequestGCRunning) { + log.debug("Requests Garbage Collector is already running."); + return; + } + + /* Delay time before starting cleaning thread */ + final long delay = config.getCompletedRequestsAgentDelay() * 1000L; + /* Period of execution of cleaning */ + final long period = config.getCompletedRequestsAgentPeriod() * 1000L; + + log.debug("Starting Requests Garbage Collector ."); + rgcTask = new RequestsGarbageCollector(rgc, period); + rgc.schedule(rgcTask, delay); + isRequestGCRunning = true; + log.debug("Requests Garbage Collector started."); + } + + public synchronized void stopRequestGarbageCollector() { + + if (!isRequestGCRunning) { + log.debug("Requests Garbage Collector is not running."); + return; + } + + log.debug("Stopping Requests Garbage Collector."); + if (rgcTask != null) { + rgcTask.cancel(); + } + log.debug("Requests Garbage Collector stopped."); + isRequestGCRunning = false; + } + public void startServices() throws Exception { startPicker(); @@ -505,6 +605,7 @@ public void startServices() throws Exception { startRestServer(); startSpaceGC(); startExpiredAgent(); + startRequestGarbageCollector(); startDiskUsageService(); } @@ -515,6 +616,7 @@ public void stopServices() { stopRestServer(); stopSpaceGC(); stopExpiredAgent(); + stopRequestGarbageCollector(); stopDiskUsageService(); GPFSQuotaManager.INSTANCE.shutdown(); diff --git a/src/main/java/it/grid/storm/acl/AclManagementInterface.java b/src/main/java/it/grid/storm/acl/AclManagementInterface.java deleted file mode 100644 index 08dbb1a10..000000000 --- a/src/main/java/it/grid/storm/acl/AclManagementInterface.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.acl; - -import it.grid.storm.filesystem.FilesystemPermission; -import it.grid.storm.filesystem.LocalFile; -import it.grid.storm.griduser.LocalUser; - -/** - * @author Michele Dibenedetto - * - */ - -public interface AclManagementInterface { - - /** - * Grants the provided permission on the provided file to the provided group - * - * @param localFile - * @param localUser a local user representing a group on the operating system - * @param permission - */ - void grantGroupPermission(LocalFile localFile, LocalUser localUser, - FilesystemPermission permission); - - /** - * Grants the provided permission on the provided file to the provided user - * - * @param localFile - * @param localUser a local user representing an user on the operating system - * @param permission - */ - void grantUserPermission(LocalFile localFile, LocalUser localUser, - FilesystemPermission permission); - - /** - * Removes all the permission eventually assigned to the provided group on the provided file - * - * @param localFile a local user representing a group on the operating system - * @param localUser - */ - void removeGroupPermission(LocalFile localFile, LocalUser localUser); - - /** - * Removes all the permission eventually assigned to the provided user on the provided file - * - * @param localFile - * @param localUser a local user representing an user on the operating system - */ - void removeUserPermission(LocalFile localFile, LocalUser localUser); - - /** - * Revokes the provided permission on the provided file to the provided group - * - * @param localFile - * @param localUser a local user representing a group on the operating system - * @param permission - */ - void revokeGroupPermission(LocalFile localFile, LocalUser localUser, - FilesystemPermission permission); - - /** - * Revokes the provided permission on the provided file to the provided user - * - * @param localFile - * @param localUser a local user representing an user on the operating system - * @param permission - */ - void revokeUserPermission(LocalFile localFile, LocalUser localUser, - FilesystemPermission permission); - - /** - * Sets the provided permission on the provided file to the provided group - * - * @param localFile - * @param localUser a local user representing a group on the operating system - * @param permission - */ - void setGroupPermission(LocalFile localFile, LocalUser localUser, - FilesystemPermission permission); - - /** - * Sets the provided permission on the provided file to the provided user - * - * @param localFile - * @param localUser a local user representing an user on the operating system - * @param permission - */ - void setUserPermission(LocalFile localFile, LocalUser localUser, FilesystemPermission permission); - - /** - * Removes all the permission from any user/group from the provided file - * - * @param localFile - */ - void removeAllPermissions(LocalFile localFile); - - /** - * Moves all the permission from any user/group from the provided fromLocalFile to the new - * toLocalFile (NOTE: can be assumed that toLocalFile has no ACL) - * - * @param fromLocalFile - * @param toLocalFile - */ - void moveAllPermissions(LocalFile fromLocalFile, LocalFile toLocalFile); - -} diff --git a/src/main/java/it/grid/storm/acl/AclManager.java b/src/main/java/it/grid/storm/acl/AclManager.java index 5a9f03de9..5eba59c74 100644 --- a/src/main/java/it/grid/storm/acl/AclManager.java +++ b/src/main/java/it/grid/storm/acl/AclManager.java @@ -114,60 +114,4 @@ FilesystemPermission setGroupPermission(LocalFile localFile, LocalUser localUser FilesystemPermission setUserPermission(LocalFile localFile, LocalUser localUser, FilesystemPermission permission) throws IllegalArgumentException; - /** - * @param localFile an existent file - * @throws IllegalArgumentException if received null parameters or the LocalFile object refers to - * a not existent file - */ - void removeHttpsPermissions(LocalFile localFile) throws IllegalArgumentException; - - /** - * @param localFile an existent file - * @param localUser - * @param permission - * @throws IllegalArgumentException if received null parameters or the LocalFile object refers to - * a not existent file - */ - void grantHttpsUserPermission(LocalFile localFile, LocalUser localUser, - FilesystemPermission permission) throws IllegalArgumentException; - - /** - * @param localFile an existent file - * @param localUser - * @param permission - * @throws IllegalArgumentException if received null parameters or the LocalFile object refers to - * a not existent file - */ - void grantHttpsServiceGroupPermission(LocalFile localFile, FilesystemPermission permission) - throws IllegalArgumentException; - - /** - * @param localFile an existent file - * @param localUser - * @param permission - * @throws IllegalArgumentException if received null parameters or the LocalFile object refers to - * a not existent file - */ - void grantHttpsGroupPermission(LocalFile localFile, LocalUser localUser, - FilesystemPermission permission) throws IllegalArgumentException; - - /** - * @param localFile an existent file - * @param localUser - * @param permission - * @throws IllegalArgumentException if received null parameters or the LocalFile object refers to - * a not existent file - */ - void grantHttpsServiceUserPermission(LocalFile localFile, FilesystemPermission permission) - throws IllegalArgumentException; - - /** - * @param oldLocalFile an existent source file - * @param newLocalFile an existent destination file - * @throws IllegalArgumentException if received null parameters or the LocalFile objects refers to - * not existent files - */ - void moveHttpsPermissions(LocalFile oldLocalFile, LocalFile newLocalFile) - throws IllegalArgumentException; - } diff --git a/src/main/java/it/grid/storm/acl/AclManagerFS.java b/src/main/java/it/grid/storm/acl/AclManagerFS.java index 52f813bea..028c441d0 100644 --- a/src/main/java/it/grid/storm/acl/AclManagerFS.java +++ b/src/main/java/it/grid/storm/acl/AclManagerFS.java @@ -27,12 +27,6 @@ public static AclManager getInstance() { return instance; } - /* - * (non-Javadoc) - * - * @see it.grid.storm.acl.AclManager#grantGroupPermission(it.grid.storm.griduser .LocalUser, - * it.grid.storm.filesystem.FilesystemPermission) - */ @Override public FilesystemPermission grantGroupPermission(LocalFile localFile, LocalUser localUser, FilesystemPermission permission) throws IllegalArgumentException { @@ -50,12 +44,6 @@ public FilesystemPermission grantGroupPermission(LocalFile localFile, LocalUser return newPermission; } - /* - * (non-Javadoc) - * - * @see it.grid.storm.acl.AclManager#grantUserPermission(it.grid.storm.filesystem .LocalFile, - * it.grid.storm.griduser.LocalUser, it.grid.storm.filesystem.FilesystemPermission) - */ @Override public FilesystemPermission grantUserPermission(LocalFile localFile, LocalUser localUser, FilesystemPermission permission) throws IllegalArgumentException { @@ -175,68 +163,4 @@ public FilesystemPermission setUserPermission(LocalFile localFile, LocalUser loc return newPermission; } - @Override - public void removeHttpsPermissions(LocalFile localFile) throws IllegalArgumentException { - - if (localFile == null) { - throw new IllegalArgumentException( - "Unable to perform the operation. The received file parameter is null"); - } - } - - @Override - public void grantHttpsUserPermission(LocalFile localFile, LocalUser localUser, - FilesystemPermission permission) throws IllegalArgumentException { - - if (localFile == null || localUser == null || permission == null) { - throw new IllegalArgumentException( - "Unable to perform the operation. The received null parameters: localFile=" + localFile - + " localUser=" + localUser + " permission=" + permission); - } - } - - @Override - public void grantHttpsServiceUserPermission(LocalFile localFile, FilesystemPermission permission) - throws IllegalArgumentException { - - if (localFile == null || permission == null) { - throw new IllegalArgumentException( - "Unable to perform the operation. The received null parameters: localFile=" + localFile - + " permission=" + permission); - } - } - - @Override - public void grantHttpsGroupPermission(LocalFile localFile, LocalUser localUser, - FilesystemPermission permission) throws IllegalArgumentException { - - if (localFile == null || localUser == null || permission == null) { - throw new IllegalArgumentException( - "Unable to perform the operation. The received null parameters: localFile=" + localFile - + " localUser=" + localUser + " permission=" + permission); - } - } - - @Override - public void grantHttpsServiceGroupPermission(LocalFile localFile, FilesystemPermission permission) - throws IllegalArgumentException { - - if (localFile == null || permission == null) { - throw new IllegalArgumentException( - "Unable to perform the operation. The received null parameters: localFile=" + localFile - + " permission=" + permission); - } - } - - @Override - public void moveHttpsPermissions(LocalFile fromLocalFile, LocalFile toLocalFile) - throws IllegalArgumentException { - - if (fromLocalFile == null || toLocalFile == null) { - throw new IllegalArgumentException( - "Unable to perform the operation. The received null parameters: fromLocalFile=" - + fromLocalFile + " toLocalFile=" + toLocalFile); - } - } - } diff --git a/src/main/java/it/grid/storm/asynch/AdvancedPicker.java b/src/main/java/it/grid/storm/asynch/AdvancedPicker.java index c990a3c74..85192fe57 100644 --- a/src/main/java/it/grid/storm/asynch/AdvancedPicker.java +++ b/src/main/java/it/grid/storm/asynch/AdvancedPicker.java @@ -18,8 +18,8 @@ package it.grid.storm.asynch; import it.grid.storm.catalogs.RequestSummaryCatalog; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.config.Configuration; +import it.grid.storm.persistence.model.RequestSummaryData; import it.grid.storm.scheduler.CrusherScheduler; import it.grid.storm.scheduler.SchedulerException; import it.grid.storm.scheduler.SchedulerStatus; @@ -53,10 +53,10 @@ public class AdvancedPicker { private TimerTask retrievingTask = null; /* delay time before starting retriever thread, in mssec */ - private final long delay = Configuration.getInstance().getPickingInitialDelay() * 1000; + private final long delay = Configuration.getInstance().getRequestsPickerAgentInitialDelay() * 1000; /* period of execution of retrieving, in mssec */ - private final long period = Configuration.getInstance().getPickingTimeInterval() * 1000; + private final long period = Configuration.getInstance().getRequestsPickerAgentInterval() * 1000; /* boolean that indicates there is a token to abort! */ private boolean abort = false; @@ -281,31 +281,4 @@ synchronized public boolean abortRequest(TRequestToken rt) { return true; } - /** - * Method used to remove chunks of the request identified by the supplied TRequestToken, with - * surls given by the collection c. Chunks in the DB get their status changed and so will not be - * considered for processing. - * - * If a null TRequestToken or Collection is supplied, or some other abort request has been issued, - * then FALSE is returned; otherwise TRUE is returned. - */ - synchronized public boolean abortChunksOfRequest(TRequestToken rt, Collection c) { - - if (abort) { - - return false; - } - - if ((rt == null) || (c == null)) { - - return false; - } - - abortToken = rt; - abortSURLS = c; - abort = true; - - return true; - } - } diff --git a/src/main/java/it/grid/storm/asynch/BoL.java b/src/main/java/it/grid/storm/asynch/BoL.java index cb37db2bb..9f3a324cd 100644 --- a/src/main/java/it/grid/storm/asynch/BoL.java +++ b/src/main/java/it/grid/storm/asynch/BoL.java @@ -17,11 +17,9 @@ package it.grid.storm.asynch; -import it.grid.storm.authz.AuthzDirector; -import it.grid.storm.authz.SpaceAuthzInterface; -import it.grid.storm.authz.sa.model.SRMSpaceRequest; -import it.grid.storm.catalogs.BoLData; -import it.grid.storm.catalogs.RequestData; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.catalogs.surl.SURLStatusManager; import it.grid.storm.catalogs.surl.SURLStatusManagerFactory; import it.grid.storm.common.types.SizeUnit; @@ -31,25 +29,22 @@ import it.grid.storm.griduser.AbstractGridUser; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.namespace.InvalidSURLException; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.UnapprochableSurlException; +import it.grid.storm.persistence.model.BoLData; +import it.grid.storm.persistence.model.RequestData; import it.grid.storm.scheduler.Chooser; import it.grid.storm.scheduler.Delegable; import it.grid.storm.scheduler.Streets; -import it.grid.storm.space.SpaceHelper; import it.grid.storm.srm.types.TReturnStatus; import it.grid.storm.srm.types.TSURL; import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TSpaceToken; import it.grid.storm.srm.types.TStatusCode; import it.grid.storm.tape.recalltable.TapeRecallCatalog; import it.grid.storm.tape.recalltable.model.TapeRecallStatus; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** * Class that represents a chunk of an srmBringOnLine request: it handles a single file of a * multifile/directory-expansion request. StoRM then sends the chunk to a chunk-scheduler. Security @@ -202,7 +197,7 @@ public void doIt() { StoRI fileStoRI = null; try { - fileStoRI = NamespaceDirector.getNamespace().resolveStoRIbySURL(surl, gu); + fileStoRI = Namespace.getInstance().resolveStoRIbySURL(surl, gu); } catch (IllegalArgumentException e) { log.error( "Unable to build a stori for surl '{}' and user '{}'. " + "IllegalArgumentException: {}", @@ -231,20 +226,6 @@ public void doIt() { } } - SpaceHelper sp = new SpaceHelper(); - TSpaceToken token = sp.getTokenFromStoRI(log, fileStoRI); - SpaceAuthzInterface spaceAuth = AuthzDirector.getSpaceAuthz(token); - - if (!spaceAuth.authorize(gu, SRMSpaceRequest.BOL)) { - String emsg = - String.format("Space authorization denied %s" + " in Storage Area: %s", surl, token); - log.debug(emsg); - requestData.changeStatusSRM_AUTHORIZATION_FAILURE(emsg); - failure = true; - printOutcome(dn, surl, requestData.getStatus()); - return; - } - manageIsPermit(fileStoRI); printOutcome(dn, surl, requestData.getStatus()); } diff --git a/src/main/java/it/grid/storm/asynch/BoLFeeder.java b/src/main/java/it/grid/storm/asynch/BoLFeeder.java index 483bfe179..c924b97c7 100644 --- a/src/main/java/it/grid/storm/asynch/BoLFeeder.java +++ b/src/main/java/it/grid/storm/asynch/BoLFeeder.java @@ -17,20 +17,25 @@ package it.grid.storm.asynch; +import java.util.Collection; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.catalogs.BoLChunkCatalog; -import it.grid.storm.catalogs.BoLPersistentChunkData; -import it.grid.storm.catalogs.InvalidSurlRequestDataAttributesException; import it.grid.storm.catalogs.RequestSummaryCatalog; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.namespace.InvalidDescendantsEmptyRequestException; import it.grid.storm.namespace.InvalidDescendantsFileRequestException; import it.grid.storm.namespace.InvalidDescendantsPathRequestException; import it.grid.storm.namespace.InvalidSURLException; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.UnapprochableSurlException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.persistence.model.BoLPersistentChunkData; +import it.grid.storm.persistence.model.RequestSummaryData; import it.grid.storm.scheduler.Delegable; import it.grid.storm.scheduler.SchedulerException; import it.grid.storm.srm.types.InvalidTDirOptionAttributesException; @@ -38,11 +43,6 @@ import it.grid.storm.srm.types.TSURL; import it.grid.storm.synchcall.data.DataHelper; -import java.util.Collection; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** * This class represents a BringOnLine Feeder: the Feeder that will handle the srmBringOnLine * statements. It chops a multifile request, and for each part it checks whether the dir option is @@ -247,7 +247,7 @@ private void manageIsDirectory(BoLPersistentChunkData chunkData) { StoRI stori = null; try { - stori = NamespaceDirector.getNamespace().resolveStoRIbySURL(surl, gu); + stori = Namespace.getInstance().resolveStoRIbySURL(surl, gu); } catch (IllegalArgumentException e) { log.error( "Unable to build a stori for surl {} for user {}. " + "IllegalArgumentException: {}", diff --git a/src/main/java/it/grid/storm/asynch/BoLPersistentChunk.java b/src/main/java/it/grid/storm/asynch/BoLPersistentChunk.java index 5979c88a5..a3e145d25 100644 --- a/src/main/java/it/grid/storm/asynch/BoLPersistentChunk.java +++ b/src/main/java/it/grid/storm/asynch/BoLPersistentChunk.java @@ -18,9 +18,9 @@ package it.grid.storm.asynch; import it.grid.storm.catalogs.BoLChunkCatalog; -import it.grid.storm.catalogs.BoLPersistentChunkData; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.model.BoLPersistentChunkData; +import it.grid.storm.persistence.model.RequestSummaryData; import it.grid.storm.scheduler.PersistentRequestChunk; import it.grid.storm.srm.types.TStatusCode; import it.grid.storm.tape.recalltable.model.TapeRecallStatus; diff --git a/src/main/java/it/grid/storm/asynch/BuilderException.java b/src/main/java/it/grid/storm/asynch/BuilderException.java index 9513c5b8e..87e4f71f1 100644 --- a/src/main/java/it/grid/storm/asynch/BuilderException.java +++ b/src/main/java/it/grid/storm/asynch/BuilderException.java @@ -36,14 +36,4 @@ public BuilderException(String message) { super(message); } - - public BuilderException(Throwable cause) { - - super(cause); - } - - public BuilderException(String message, Throwable cause) { - - super(message, cause); - } } diff --git a/src/main/java/it/grid/storm/asynch/GlobalStatusManager.java b/src/main/java/it/grid/storm/asynch/GlobalStatusManager.java index 6deab60c4..f6ab96044 100644 --- a/src/main/java/it/grid/storm/asynch/GlobalStatusManager.java +++ b/src/main/java/it/grid/storm/asynch/GlobalStatusManager.java @@ -17,8 +17,8 @@ package it.grid.storm.asynch; -import it.grid.storm.catalogs.ChunkData; import it.grid.storm.catalogs.RequestSummaryCatalog; +import it.grid.storm.persistence.model.ChunkData; import it.grid.storm.srm.types.TRequestToken; import it.grid.storm.srm.types.TReturnStatus; import it.grid.storm.srm.types.TStatusCode; diff --git a/src/main/java/it/grid/storm/asynch/InvalidBoLChunkAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidBoLChunkAttributesException.java deleted file mode 100644 index d56575f67..000000000 --- a/src/main/java/it/grid/storm/asynch/InvalidBoLChunkAttributesException.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.asynch; - -import it.grid.storm.catalogs.BoLPersistentChunkData; -import it.grid.storm.catalogs.RequestSummaryData; -import it.grid.storm.griduser.GridUserInterface; - -/** - * This class represents an Exception thrown when a BoLChunk is created with any null attribute: - * GridUser, RequestSummaryData, BoLChunkData or GlobalStatusManager. - * - * @author: CNAF - * @version: 1.0 - * @date: Aug 2009 - */ -public class InvalidBoLChunkAttributesException extends Exception { - - private static final long serialVersionUID = 2320080131526579634L; - - private final boolean nullGu; // true if GridUser is null - private final boolean nullRsd; // true if RequestSummaryData is null - private final boolean nullChunkData; // true if BoLChunkData is null - private final boolean nullGlobalStatusManager; // true if gsm is null - - /** - * Constructor that requires the GridUser, RequestSummaryData, BoLChunkData and - * GlobalStatusManager that caused the exception to be thrown. - */ - public InvalidBoLChunkAttributesException(GridUserInterface gu, RequestSummaryData rsd, - BoLPersistentChunkData chunkData, GlobalStatusManager gsm) { - - nullGu = (gu == null); - nullRsd = (rsd == null); - nullChunkData = (chunkData == null); - nullGlobalStatusManager = (gsm == null); - } - - @Override - public String toString() { - - return String.format( - "Invalid attributes when creating BoLChunk: " - + "nullGridUser=%b; nullRequestSumamryData=%b; nullBoLChunkData=%b; " - + "nullGlobalStatusManager=%b", - nullGu, nullRsd, nullChunkData, nullGlobalStatusManager); - } -} diff --git a/src/main/java/it/grid/storm/asynch/InvalidBoLFeederAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidBoLFeederAttributesException.java index 8a3af9060..2a9039328 100644 --- a/src/main/java/it/grid/storm/asynch/InvalidBoLFeederAttributesException.java +++ b/src/main/java/it/grid/storm/asynch/InvalidBoLFeederAttributesException.java @@ -17,8 +17,8 @@ package it.grid.storm.asynch; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.model.RequestSummaryData; /** * Class that represents an Exception thrown when a BoLFeeder could not be created because the diff --git a/src/main/java/it/grid/storm/asynch/InvalidPersistentRequestAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidPersistentRequestAttributesException.java index 9e55f6fe0..bef52a491 100644 --- a/src/main/java/it/grid/storm/asynch/InvalidPersistentRequestAttributesException.java +++ b/src/main/java/it/grid/storm/asynch/InvalidPersistentRequestAttributesException.java @@ -11,9 +11,9 @@ package it.grid.storm.asynch; -import it.grid.storm.catalogs.PersistentChunkData; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.model.PersistentChunkData; +import it.grid.storm.persistence.model.RequestSummaryData; /** * This class represents an Exceptin thrown when a PtPChunk is created with any null attribute: diff --git a/src/main/java/it/grid/storm/asynch/InvalidPtGAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidPtGAttributesException.java deleted file mode 100644 index cf3333a4f..000000000 --- a/src/main/java/it/grid/storm/asynch/InvalidPtGAttributesException.java +++ /dev/null @@ -1,36 +0,0 @@ -package it.grid.storm.asynch; - -import it.grid.storm.catalogs.PtGData; -import it.grid.storm.griduser.GridUserInterface; - -/** - * @author Michele Dibenedetto - * - */ -public class InvalidPtGAttributesException extends Exception { - - /** - * - */ - private static final long serialVersionUID = 6957632945020144458L; - protected final boolean nullGu; // true if GridUser is null - protected final boolean nullChunkData; // true if PtGChunkData is null - - /** - * Constructor that requires the GridUser, RequestSummaryData, PtGChunkData and - * GlobalStatusManager that caused the exception to be thrown. - */ - public InvalidPtGAttributesException(GridUserInterface gu, PtGData chunkData) { - - nullGu = (gu == null); - nullChunkData = (chunkData == null); - } - - @Override - public String toString() { - - return String.format( - "Invalid attributes when creating PtG: " + "null-GridUser=%b, null-PtGChunkData=%b", nullGu, - nullChunkData); - } -} diff --git a/src/main/java/it/grid/storm/asynch/InvalidPtGChunkAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidPtGChunkAttributesException.java deleted file mode 100644 index 12a830e62..000000000 --- a/src/main/java/it/grid/storm/asynch/InvalidPtGChunkAttributesException.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.asynch; - -import it.grid.storm.catalogs.RequestSummaryData; -import it.grid.storm.catalogs.PtGData; -import it.grid.storm.griduser.GridUserInterface; - -/** - * This class represents an Exceptin thrown when a PtGChunk is created with any null attribute: - * GridUser, RequestSummaryData, PtGChunkData or GlobalStatusManager. - * - * @author: EGRID - ICTP Trieste - * @version: 2.0 - * @date: May 16th, 2005 - */ -public class InvalidPtGChunkAttributesException extends InvalidPtGAttributesException { - - /** - * - */ - private static final long serialVersionUID = 754275707315797289L; - /** - * true if RequestSummaryData is null - */ - private final boolean nullRsd; - - /** - * true if gsm is null - */ - private final boolean nullGlobalStatusManager; - - /** - * Constructor that requires the GridUser, RequestSummaryData, PtGChunkData and - * GlobalStatusManager that caused the exception to be thrown. - */ - public InvalidPtGChunkAttributesException(GridUserInterface gu, RequestSummaryData rsd, - PtGData chunkData, GlobalStatusManager gsm) { - - super(gu, chunkData); - nullRsd = (rsd == null); - nullGlobalStatusManager = (gsm == null); - } - - @Override - public String toString() { - - return String.format( - "Invalid attributes when creating PtGChunk: " - + "null-GridUser=%b, null-RequestSumamryData=%b, null-PtGChunkData=%b, " - + "null-GlobalStatusManager=%b", - nullGu, nullRsd, nullChunkData, nullGlobalStatusManager); - } -} diff --git a/src/main/java/it/grid/storm/asynch/InvalidPtGFeederAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidPtGFeederAttributesException.java index ce8793add..aacf3c43a 100644 --- a/src/main/java/it/grid/storm/asynch/InvalidPtGFeederAttributesException.java +++ b/src/main/java/it/grid/storm/asynch/InvalidPtGFeederAttributesException.java @@ -17,8 +17,8 @@ package it.grid.storm.asynch; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.model.RequestSummaryData; /** * Class that represents an Exception thrown when a PtGFeeder could not be created because the diff --git a/src/main/java/it/grid/storm/asynch/InvalidPtPFeederAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidPtPFeederAttributesException.java index 748715ae4..638b026f4 100644 --- a/src/main/java/it/grid/storm/asynch/InvalidPtPFeederAttributesException.java +++ b/src/main/java/it/grid/storm/asynch/InvalidPtPFeederAttributesException.java @@ -17,8 +17,8 @@ package it.grid.storm.asynch; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.model.RequestSummaryData; /** * Class that represents an Exception thrown when a PtPFeeder could not be created because the diff --git a/src/main/java/it/grid/storm/asynch/InvalidPutReplyAttributeException.java b/src/main/java/it/grid/storm/asynch/InvalidPutReplyAttributeException.java deleted file mode 100644 index cdea7b83c..000000000 --- a/src/main/java/it/grid/storm/asynch/InvalidPutReplyAttributeException.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.asynch; - -/** - * Class that represents an exception thrown when an SRMPrepareToPutReply cannot be created because - * the supplied TRequestToken is null. - * - * @author EGRID - ICTP Trieste - * @version 1.0 - * @date September, 2005 - */ -public class InvalidPutReplyAttributeException extends Exception { - - /** - * - */ - private static final long serialVersionUID = 1L; - - @Override - public String toString() { - - return "null TRequestToken"; - } -} diff --git a/src/main/java/it/grid/storm/asynch/InvalidPutStatusAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidPutStatusAttributesException.java deleted file mode 100644 index b7302de88..000000000 --- a/src/main/java/it/grid/storm/asynch/InvalidPutStatusAttributesException.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.asynch; - -import it.grid.storm.srm.types.TTURL; -import it.grid.storm.srm.types.TReturnStatus; - -/** - * Class that represents an exception thrown when an SRMStatusOfPutRequestReply cannot be created - * because the supplied toTURL or returnStatus are null. - * - * @author EGRID - ICTP Trieste - * @version 1.0 - * @date October, 2005 - */ -public class InvalidPutStatusAttributesException extends Exception { - - /** - * - */ - private static final long serialVersionUID = 1L; - - // boolean indicating whether the supplied TURL is null or not - private final boolean nullToTURL; - - // boolean indicating whether the supplied TReturnStatus is null or not - private final boolean nullReturnStatus; - - /** - * Constructor that requires the attributes that caused the exception to be thrown. - */ - public InvalidPutStatusAttributesException(TTURL toTURL, TReturnStatus returnStatus) { - - nullToTURL = (toTURL == null); - nullReturnStatus = (returnStatus == null); - } - - @Override - public String toString() { - - return String.format("nullToTURL=%b; nullReturnStatus=%b", nullToTURL, nullReturnStatus); - } -} diff --git a/src/main/java/it/grid/storm/asynch/InvalidRequestAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidRequestAttributesException.java index ae0279eb7..fd7fc4522 100644 --- a/src/main/java/it/grid/storm/asynch/InvalidRequestAttributesException.java +++ b/src/main/java/it/grid/storm/asynch/InvalidRequestAttributesException.java @@ -11,8 +11,8 @@ package it.grid.storm.asynch; -import it.grid.storm.catalogs.RequestData; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.model.RequestData; /** * @author Michele Dibenedetto diff --git a/src/main/java/it/grid/storm/asynch/PtG.java b/src/main/java/it/grid/storm/asynch/PtG.java index 260a5c621..a8972accb 100644 --- a/src/main/java/it/grid/storm/asynch/PtG.java +++ b/src/main/java/it/grid/storm/asynch/PtG.java @@ -11,13 +11,17 @@ package it.grid.storm.asynch; +import java.util.Arrays; +import java.util.Calendar; +import java.util.List; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.acl.AclManagerFS; import it.grid.storm.authz.AuthzDecision; import it.grid.storm.authz.AuthzDirector; -import it.grid.storm.authz.SpaceAuthzInterface; import it.grid.storm.authz.path.model.SRMFileRequest; -import it.grid.storm.authz.sa.model.SRMSpaceRequest; -import it.grid.storm.catalogs.PtGData; import it.grid.storm.catalogs.VolatileAndJiTCatalog; import it.grid.storm.catalogs.surl.SURLStatusManager; import it.grid.storm.catalogs.surl.SURLStatusManagerFactory; @@ -32,24 +36,23 @@ import it.grid.storm.griduser.LocalUser; import it.grid.storm.namespace.InvalidGetTURLProtocolException; import it.grid.storm.namespace.InvalidSURLException; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.TURLBuildingException; import it.grid.storm.namespace.UnapprochableSurlException; -import it.grid.storm.namespace.VirtualFSInterface; import it.grid.storm.namespace.model.ACLEntry; import it.grid.storm.namespace.model.DefaultACL; import it.grid.storm.namespace.model.Protocol; +import it.grid.storm.namespace.model.VirtualFS; import it.grid.storm.persistence.exceptions.DataAccessException; +import it.grid.storm.persistence.model.PtGData; import it.grid.storm.scheduler.Chooser; import it.grid.storm.scheduler.Delegable; import it.grid.storm.scheduler.Streets; -import it.grid.storm.space.SpaceHelper; import it.grid.storm.srm.types.InvalidTSizeAttributesException; import it.grid.storm.srm.types.TSURL; import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TSpaceToken; import it.grid.storm.srm.types.TStatusCode; import it.grid.storm.srm.types.TTURL; import it.grid.storm.synchcall.command.CommandHelper; @@ -58,13 +61,6 @@ import it.grid.storm.tape.recalltable.TapeRecallCatalog; import it.grid.storm.tape.recalltable.model.TapeRecallStatus; -import java.util.Arrays; -import java.util.Calendar; -import java.util.List; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - public class PtG implements Delegable, Chooser, Request, Suspendedable { protected static final String SRM_COMMAND = "srmPrepareToGet"; @@ -112,7 +108,7 @@ public PtG(PtGData reqData) throws IllegalArgumentException { requestData = reqData; start = Calendar.getInstance(); - if (Configuration.getInstance().getPTGSkipACLSetup()) { + if (Configuration.getInstance().isSkipPtgACLSetup()) { setupACLs = false; log.debug("Skipping ACL setup on PTG as requested by configuration."); } @@ -146,7 +142,7 @@ public void doIt() { try { if (!downgradedToAnonymous && requestData instanceof IdentityInputData) { try { - fileStoRI = NamespaceDirector.getNamespace() + fileStoRI = Namespace.getInstance() .resolveStoRIbySURL(surl, ((IdentityInputData) requestData).getUser()); } catch (UnapprochableSurlException e) { unapprochableSurl = true; @@ -165,7 +161,7 @@ public void doIt() { } } else { try { - fileStoRI = NamespaceDirector.getNamespace().resolveStoRIbySURL(requestData.getSURL()); + fileStoRI = Namespace.getInstance().resolveStoRIbySURL(requestData.getSURL()); } catch (UnapprochableSurlException e) { failure = true; log.info("Unable to build a stori for surl {}. " + "UnapprochableSurlException: {}", surl, @@ -202,7 +198,7 @@ public void doIt() { } else { if (requestData.getTransferProtocols().allows(Protocol.HTTP)) { try { - fileStoRI = NamespaceDirector.getNamespace().resolveStoRIbySURL(requestData.getSURL()); + fileStoRI = Namespace.getInstance().resolveStoRIbySURL(requestData.getSURL()); } catch (UnapprochableSurlException e) { failure = true; log.info("Unable to build a stori for surl {}. " + "UnapprochableSurlException: {}", @@ -266,214 +262,196 @@ private void downgradeToAnonymousHttpRequest() { */ private void manageIsPermit(StoRI fileStoRI) { - TSpaceToken token = new SpaceHelper().getTokenFromStoRI(log, fileStoRI); - SpaceAuthzInterface spaceAuth = AuthzDirector.getSpaceAuthz(token); + if ((!fileStoRI.getLocalFile().exists()) || (fileStoRI.getLocalFile().isDirectory())) { + /* File does not exist, or it is a directory! Fail request with SRM_INVALID_PATH */ + requestData.changeStatusSRM_INVALID_PATH( + "The requested file either does not exist, or it is a directory!"); + failure = true; + log.debug("ANOMALY in PtGChunk! PolicyCollector confirms read rights on" + + " file, yet file does not exist physically! Or, an srmPrepareToGet" + + " was attempted on a directory!"); + return; + } - boolean isSpaceAuthorized; - if (!downgradedToAnonymous && requestData instanceof IdentityInputData) { - isSpaceAuthorized = - spaceAuth.authorize(((IdentityInputData) requestData).getUser(), SRMSpaceRequest.PTG); - } else { - isSpaceAuthorized = spaceAuth.authorizeAnonymous(SRMSpaceRequest.PTG); + /* File exists and it is not a directory */ + /* Sets traverse permissions on file parent folders */ + boolean canTraverse; + try { + canTraverse = managePermitTraverseStep(fileStoRI); + } catch (CannotMapUserException e) { + String explanation = "Unable to find local user for " + DataHelper.getRequestor(requestData); + requestData.changeStatusSRM_FAILURE(explanation); + failure = true; + log.error("{}! CannotMapUserException: {}", explanation, e.getMessage(), e); + return; + } + + if (!canTraverse) { + String explanation = "Cannot travers parents"; + requestData.changeStatusSRM_FAILURE(explanation); + log.error(explanation); + failure = true; + return; } - if (isSpaceAuthorized) { + + try { + + TTURL turl; try { - if ((!fileStoRI.getLocalFile().exists()) || (fileStoRI.getLocalFile().isDirectory())) { - /* - * File does not exist, or it is a directory! Fail request with SRM_INVALID_PATH! - */ - requestData.changeStatusSRM_INVALID_PATH( - "The requested file either" + " does not exist, or it is a directory!"); + turl = fileStoRI.getTURL(requestData.getTransferProtocols()); + } catch (TURLBuildingException e) { + requestData + .changeStatusSRM_FAILURE("Unable to build the TURL for the provided transfer protocol"); + failure = true; + log.error("ERROR in PtGChunk! There was a failure building the " + + "TURL. : TURLBuildingException {}", e.getMessage(), e); + return; + } catch (IllegalArgumentException e) { + /* + * Handle null TURL prefix! This is a programming error: it should not occur! + */ + requestData.changeStatusSRM_FAILURE("Unable to decide TURL!"); + failure = true; + log.error( + "ERROR in PtGChunk! invalid TURLPrefix in PtGChunkData " + + "caused StoRI to be unable to establish TTURL! " + "IllegalArgumentException: {}", + e.getMessage(), e); + return; + } catch (InvalidGetTURLProtocolException e) { + requestData.changeStatusSRM_FAILURE("Unable to decide TURL!"); + failure = true; + log.error("ERROR in PtGChunk! invalid TURL Protocol in PtGChunkData " + + "caused StoRI to be unable to establish TTURL! " + + "InvalidGetTURLProtocolException: {}", e.getMessage(), e); + return; + } + if (fileStoRI.getVirtualFileSystem().getStorageClassType().isTapeEnabled()) { + /* Compute the Expiration Time in seconds */ + long expDate = (System.currentTimeMillis() / 1000 + requestData.getPinLifeTime().value()); + StormEA.setPinned(fileStoRI.getLocalFile().getAbsolutePath(), expDate); + + + try { + TSizeInBytes fileSize = + TSizeInBytes.make(fileStoRI.getLocalFile().length(), SizeUnit.BYTES); + + requestData.setFileSize(fileSize); + log.debug("File size: {}", fileSize); + + } catch (InvalidTSizeAttributesException e) { + requestData.changeStatusSRM_FAILURE("Unable to determine file size"); failure = true; - log.debug("ANOMALY in PtGChunk! PolicyCollector confirms read rights on" - + " file, yet file does not exist physically! Or, an srmPrepareToGet" - + " was attempted on a directory!"); - } else { - /* File exists and it is not a directory */ - /* Sets traverse permissions on file parent folders */ - boolean canTraverse; + log.error("ERROR in PtGChunk! error in file size computation! " + + "InvalidTSizeAttributesException: {}", e.getMessage(), e); + return; + } + } + boolean isOnDisk; + try { + isOnDisk = isStoriOndisk(fileStoRI); + } catch (FSException e) { + requestData.changeStatusSRM_FAILURE("Unable to verify file disk status"); + failure = true; + log.error("ERROR in PtGChunk! error in file on disk check! " + "FSException: {}", + e.getMessage(), e); + return; + } + if (!isOnDisk && fileStoRI.getVirtualFileSystem().getStorageClassType().isTapeEnabled()) { + requestData.changeStatusSRM_REQUEST_INPROGRESS("Recalling" + " file from tape"); + String voName = null; + if (!downgradedToAnonymous && requestData instanceof IdentityInputData) { + if (((IdentityInputData) requestData).getUser() instanceof AbstractGridUser) { + voName = + ((AbstractGridUser) ((IdentityInputData) requestData).getUser()).getVO().getValue(); + } + } + try { + new TapeRecallCatalog().insertTask(this, voName, + fileStoRI.getLocalFile().getAbsolutePath()); + } catch (DataAccessException e) { + requestData.changeStatusSRM_FAILURE("Unable to request file recall from tape"); + failure = true; + log.error("ERROR in PtGChunk! error in tape recall task " + + "insertion! DataAccessException: {}", e.getMessage(), e); + return; + } + /* Stores the parameters in this object */ + if (!downgradedToAnonymous && requestData instanceof IdentityInputData) { try { - canTraverse = managePermitTraverseStep(fileStoRI); + backupData(fileStoRI, fileStoRI.getLocalFile(), + ((IdentityInputData) requestData).getUser().getLocalUser(), turl); } catch (CannotMapUserException e) { requestData.changeStatusSRM_FAILURE( "Unable to find local user for " + DataHelper.getRequestor(requestData)); failure = true; log.error( - "ERROR in PtGChunk! Unable to find LocalUser for {}! " - + "CannotMapUserException: {}", + "ERROR in PtGChunk! Unable to find LocalUser " + + "for {}! CannotMapUserException: {}", DataHelper.getRequestor(requestData), e.getMessage(), e); return; } - if (canTraverse) { - TTURL turl; - try { - turl = fileStoRI.getTURL(requestData.getTransferProtocols()); - } catch (TURLBuildingException e) { - requestData.changeStatusSRM_FAILURE( - "Unable to build the TURL for the provided transfer protocol"); - failure = true; - log.error("ERROR in PtGChunk! There was a failure building the " - + "TURL. : TURLBuildingException {}", e.getMessage(), e); - return; - } catch (IllegalArgumentException e) { - /* - * Handle null TURL prefix! This is a programming error: it should not occur! - */ - requestData.changeStatusSRM_FAILURE("Unable to decide TURL!"); - failure = true; - log.error("ERROR in PtGChunk! invalid TURLPrefix in PtGChunkData " - + "caused StoRI to be unable to establish TTURL! " - + "IllegalArgumentException: {}", e.getMessage(), e); - return; - } catch (InvalidGetTURLProtocolException e) { - requestData.changeStatusSRM_FAILURE("Unable to decide TURL!"); - failure = true; - log.error("ERROR in PtGChunk! invalid TURL Protocol in PtGChunkData " - + "caused StoRI to be unable to establish TTURL! " - + "InvalidGetTURLProtocolException: {}", e.getMessage(), e); - return; - } - if (fileStoRI.getVirtualFileSystem().getStorageClassType().isTapeEnabled()) { - /* Compute the Expiration Time in seconds */ - long expDate = - (System.currentTimeMillis() / 1000 + requestData.getPinLifeTime().value()); - StormEA.setPinned(fileStoRI.getLocalFile().getAbsolutePath(), expDate); - - - try { - TSizeInBytes fileSize = - TSizeInBytes.make(fileStoRI.getLocalFile().length(), SizeUnit.BYTES); - - requestData.setFileSize(fileSize); - log.debug("File size: {}", fileSize); - - } catch (InvalidTSizeAttributesException e) { - requestData.changeStatusSRM_FAILURE("Unable to determine file size"); - failure = true; - log.error("ERROR in PtGChunk! error in file size computation! " - + "InvalidTSizeAttributesException: {}", e.getMessage(), e); - return; - } - } - boolean isOnDisk; - try { - isOnDisk = isStoriOndisk(fileStoRI); - } catch (FSException e) { - requestData.changeStatusSRM_FAILURE("Unable to verify file disk status"); - failure = true; - log.error("ERROR in PtGChunk! error in file on disk check! " + "FSException: {}", - e.getMessage(), e); - return; - } - if (!isOnDisk - && fileStoRI.getVirtualFileSystem().getStorageClassType().isTapeEnabled()) { - requestData.changeStatusSRM_REQUEST_INPROGRESS("Recalling" + " file from tape"); - String voName = null; - if (!downgradedToAnonymous && requestData instanceof IdentityInputData) { - if (((IdentityInputData) requestData).getUser() instanceof AbstractGridUser) { - voName = ((AbstractGridUser) ((IdentityInputData) requestData).getUser()).getVO() - .getValue(); - } - } - try { - new TapeRecallCatalog().insertTask(this, voName, - fileStoRI.getLocalFile().getAbsolutePath()); - } catch (DataAccessException e) { - requestData.changeStatusSRM_FAILURE("Unable to request file recall from tape"); - failure = true; - log.error("ERROR in PtGChunk! error in tape recall task " - + "insertion! DataAccessException: {}", e.getMessage(), e); - return; - } - /* Stores the parameters in this object */ - if (!downgradedToAnonymous && requestData instanceof IdentityInputData) { - try { - backupData(fileStoRI, fileStoRI.getLocalFile(), - ((IdentityInputData) requestData).getUser().getLocalUser(), turl); - } catch (CannotMapUserException e) { - requestData.changeStatusSRM_FAILURE( - "Unable to find local user for " + DataHelper.getRequestor(requestData)); - failure = true; - log.error( - "ERROR in PtGChunk! Unable to find LocalUser " - + "for {}! CannotMapUserException: {}", - DataHelper.getRequestor(requestData), e.getMessage(), e); - return; - } - } else { - backupData(fileStoRI, fileStoRI.getLocalFile(), null, turl); - } - - /* - * The request now ends by saving in the DB the IN_PROGRESS status information. The - * effective PtG will be accomplished when the setTaskStatus() method of the - * tapeRecallDAO calls the completeRequest() method. - */ - } else { - /* - * Set the read permission for the user on the localfile and any default ace specified - * in the story files - */ - boolean canRead; - try { - canRead = managePermitReadFileStep(fileStoRI, turl); - } catch (CannotMapUserException e) { - requestData.changeStatusSRM_FAILURE( - "Unable to find local user for " + DataHelper.getRequestor(requestData)); - failure = true; - log.error( - "ERROR in PtGChunk! Unable to find LocalUser for {}! " - + "CannotMapUserException: {}", - DataHelper.getRequestor(requestData), e.getMessage(), e); - return; - } - if (canRead) { - - try { - TSizeInBytes fileSize = - TSizeInBytes.make(fileStoRI.getLocalFile().length(), SizeUnit.BYTES); - - requestData.setFileSize(fileSize); - log.debug("File size: {}", fileSize); - - } catch (InvalidTSizeAttributesException e) { - requestData.changeStatusSRM_FAILURE("Unable to determine file size"); - failure = true; - log.error("ERROR in PtGChunk! error in file size computation! " - + "InvalidTSizeAttributesException: {}", e.getMessage(), e); - return; - } - - requestData.setTransferURL(turl); - requestData.changeStatusSRM_FILE_PINNED("srmPrepareToGet successfully handled!"); - } else { - requestData.changeStatusSRM_FAILURE( - "Local filesystem mask does not allow" + " setting up correct ACLs for PtG!"); - } - } - } else { - // FIXME roll back Read, and Traverse URGENT! - } + } else { + backupData(fileStoRI, fileStoRI.getLocalFile(), null, turl); } - } catch (SecurityException e) { + /* - * The check for existence of the File failed because there is a SecurityManager installed - * that denies read privileges for that File! Perhaps the local system administrator of - * StoRM set up Java policies that contrast policies described by the PolicyCollector! There - * is a conflict here! + * The request now ends by saving in the DB the IN_PROGRESS status information. The + * effective PtG will be accomplished when the setTaskStatus() method of the tapeRecallDAO + * calls the completeRequest() method. */ - requestData.changeStatusSRM_FAILURE("StoRM is not allowed to work on " + "requested file!"); - failure = true; - log.error("ATTENTION in PtGChunk! PtGChunk received a SecurityException " - + "from Java SecurityManager; StoRM cannot check-existence or " - + "check-if-directory for: {}", fileStoRI.getLocalFile().toString(), e); + } else { + /* + * Set the read permission for the user on the localfile and any default ace specified in + * the story files + */ + boolean canRead; + try { + canRead = managePermitReadFileStep(fileStoRI, turl); + } catch (CannotMapUserException e) { + requestData.changeStatusSRM_FAILURE( + "Unable to find local user for " + DataHelper.getRequestor(requestData)); + failure = true; + log.error( + "ERROR in PtGChunk! Unable to find LocalUser for {}! " + "CannotMapUserException: {}", + DataHelper.getRequestor(requestData), e.getMessage(), e); + return; + } + if (canRead) { + + try { + TSizeInBytes fileSize = + TSizeInBytes.make(fileStoRI.getLocalFile().length(), SizeUnit.BYTES); + + requestData.setFileSize(fileSize); + log.debug("File size: {}", fileSize); + + } catch (InvalidTSizeAttributesException e) { + requestData.changeStatusSRM_FAILURE("Unable to determine file size"); + failure = true; + log.error("ERROR in PtGChunk! error in file size computation! " + + "InvalidTSizeAttributesException: {}", e.getMessage(), e); + return; + } + + requestData.setTransferURL(turl); + requestData.changeStatusSRM_FILE_PINNED("srmPrepareToGet successfully handled!"); + } else { + requestData.changeStatusSRM_FAILURE( + "Local filesystem mask does not allow" + " setting up correct ACLs for PtG!"); + } } - } else { - String emsg = String.format("Read access to %s in Storage Area: %s " + "denied!", - requestData.getSURL(), token); - requestData.changeStatusSRM_AUTHORIZATION_FAILURE(emsg); + } catch (SecurityException e) { + /* + * The check for existence of the File failed because there is a SecurityManager installed + * that denies read privileges for that File! Perhaps the local system administrator of StoRM + * set up Java policies that contrast policies described by the PolicyCollector! There is a + * conflict here! + */ + requestData.changeStatusSRM_FAILURE("StoRM is not allowed to work on " + "requested file!"); failure = true; - log.debug(emsg); + log.error("ATTENTION in PtGChunk! PtGChunk received a SecurityException " + + "from Java SecurityManager; StoRM cannot check-existence or " + + "check-if-directory for: {}", fileStoRI.getLocalFile().toString(), e); } } @@ -491,24 +469,13 @@ private void manageIsPermit(StoRI fileStoRI) { private boolean managePermitTraverseStep(StoRI fileStoRI) throws CannotMapUserException { - if (!downgradedToAnonymous && requestData instanceof IdentityInputData) { - - if (!setupACLs) - return verifyPath(fileStoRI); + if (!downgradedToAnonymous && requestData instanceof IdentityInputData && setupACLs) { return verifyPath(fileStoRI) && setParentsAcl(fileStoRI, ((IdentityInputData) requestData).getUser().getLocalUser()); } - if (verifyPath(fileStoRI)) { - - if (setupACLs) - setHttpsServiceParentAcl(fileStoRI); - - return true; - } - - return false; + return verifyPath(fileStoRI); } private boolean verifyPath(StoRI fileStoRI) { @@ -582,7 +549,6 @@ private boolean managePermitReadFileStep(StoRI fileStoRI, TTURL turl) if (setupACLs) { setDefaultAcl(fileStoRI, fileStoRI.getLocalFile()); - setHttpsServiceAcl(fileStoRI.getLocalFile(), FilesystemPermission.Read); } return true; @@ -699,33 +665,10 @@ private boolean setAoTAcl(StoRI fileStori, LocalUser localUser, FilesystemPermis return true; } - private void setHttpsServiceParentAcl(StoRI fileStoRI) { - - log.debug("Adding parent https ACL for directory : '{}' parents", fileStoRI.getAbsolutePath()); - - for (StoRI parentStoRI : fileStoRI.getParents()) { - setHttpsServiceAcl(parentStoRI.getLocalFile(), FilesystemPermission.Traverse); - } - } - - private void setHttpsServiceAcl(LocalFile file, FilesystemPermission permission) { - - log.debug("Adding https ACL {} for directory : '{}'", permission, file); - - try { - AclManagerFS.getInstance().grantHttpsServiceGroupPermission(file, permission); - } catch (IllegalArgumentException e) { - log.error("Unable to grant user permission on the created folder. " - + "IllegalArgumentException: {}", e.getMessage(), e); - requestData.getStatus() - .extendExplaination("Unable to grant group permission on the created folder"); - } - } - private void setDefaultAcl(StoRI fileStoRI, LocalFile localFile) { /* Manage DefaultACL */ - VirtualFSInterface vfs = fileStoRI.getVirtualFileSystem(); + VirtualFS vfs = fileStoRI.getVirtualFileSystem(); DefaultACL acl = vfs.getCapabilities().getDefaultACL(); if ((acl == null) || (acl.isEmpty())) { diff --git a/src/main/java/it/grid/storm/asynch/PtGBuilder.java b/src/main/java/it/grid/storm/asynch/PtGBuilder.java index e9d942105..f7915ae5d 100644 --- a/src/main/java/it/grid/storm/asynch/PtGBuilder.java +++ b/src/main/java/it/grid/storm/asynch/PtGBuilder.java @@ -17,15 +17,16 @@ package it.grid.storm.asynch; -import it.grid.storm.asynch.BuilderException; -import it.grid.storm.asynch.PtG; -import it.grid.storm.catalogs.AnonymousPtGData; -import it.grid.storm.catalogs.IdentityPtGData; -import it.grid.storm.catalogs.InvalidFileTransferDataAttributesException; -import it.grid.storm.catalogs.InvalidPtGDataAttributesException; -import it.grid.storm.catalogs.InvalidSurlRequestDataAttributesException; -import it.grid.storm.catalogs.PtGData; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtGDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.persistence.model.AnonymousPtGData; +import it.grid.storm.persistence.model.IdentityPtGData; +import it.grid.storm.persistence.model.PtGData; import it.grid.storm.srm.types.TDirOption; import it.grid.storm.srm.types.TLifeTimeInSeconds; import it.grid.storm.srm.types.TReturnStatus; @@ -35,8 +36,6 @@ import it.grid.storm.srm.types.TTURL; import it.grid.storm.synchcall.data.IdentityInputData; import it.grid.storm.synchcall.data.datatransfer.FileTransferInputData; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * @author Michele Dibenedetto diff --git a/src/main/java/it/grid/storm/asynch/PtGFeeder.java b/src/main/java/it/grid/storm/asynch/PtGFeeder.java index 0a73c85d1..ca90eaddf 100644 --- a/src/main/java/it/grid/storm/asynch/PtGFeeder.java +++ b/src/main/java/it/grid/storm/asynch/PtGFeeder.java @@ -17,19 +17,24 @@ package it.grid.storm.asynch; -import it.grid.storm.catalogs.InvalidSurlRequestDataAttributesException; +import java.util.Collection; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.catalogs.PtGChunkCatalog; -import it.grid.storm.catalogs.PtGPersistentChunkData; import it.grid.storm.catalogs.RequestSummaryCatalog; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.namespace.InvalidDescendantsEmptyRequestException; import it.grid.storm.namespace.InvalidDescendantsFileRequestException; import it.grid.storm.namespace.InvalidDescendantsPathRequestException; import it.grid.storm.namespace.InvalidSURLException; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.UnapprochableSurlException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.persistence.model.PtGPersistentChunkData; +import it.grid.storm.persistence.model.RequestSummaryData; import it.grid.storm.scheduler.Delegable; import it.grid.storm.scheduler.SchedulerException; import it.grid.storm.srm.types.InvalidTDirOptionAttributesException; @@ -37,11 +42,6 @@ import it.grid.storm.srm.types.TSURL; import it.grid.storm.synchcall.data.DataHelper; -import java.util.Collection; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** * This class represents a PrepareToGet Feeder: the Feeder that will handle the srmPrepareToGet * statements. It chops a multifile request, and for each part it checks whether the dir option is @@ -263,7 +263,7 @@ private void manageIsDirectory(PtGPersistentChunkData chunkData) { /* Build StoRI for current chunk */ StoRI stori = null; try { - stori = NamespaceDirector.getNamespace().resolveStoRIbySURL(surl, chunkData.getUser()); + stori = Namespace.getInstance().resolveStoRIbySURL(surl, chunkData.getUser()); } catch (IllegalArgumentException e) { log.error( "Unable to build a stori for surl {} for user {}. " + "IllegalArgumentException: {}", diff --git a/src/main/java/it/grid/storm/asynch/PtGPersistentChunk.java b/src/main/java/it/grid/storm/asynch/PtGPersistentChunk.java index cba0e3227..40a6dca6d 100644 --- a/src/main/java/it/grid/storm/asynch/PtGPersistentChunk.java +++ b/src/main/java/it/grid/storm/asynch/PtGPersistentChunk.java @@ -19,9 +19,9 @@ import java.util.Arrays; import it.grid.storm.catalogs.PtGChunkCatalog; -import it.grid.storm.catalogs.PtGData; -import it.grid.storm.catalogs.PtGPersistentChunkData; -import it.grid.storm.catalogs.RequestSummaryData; +import it.grid.storm.persistence.model.PtGData; +import it.grid.storm.persistence.model.PtGPersistentChunkData; +import it.grid.storm.persistence.model.RequestSummaryData; import it.grid.storm.scheduler.PersistentRequestChunk; import it.grid.storm.srm.types.TStatusCode; import it.grid.storm.synchcall.command.CommandHelper; diff --git a/src/main/java/it/grid/storm/asynch/PtP.java b/src/main/java/it/grid/storm/asynch/PtP.java index 257aedb8f..6f096dec3 100644 --- a/src/main/java/it/grid/storm/asynch/PtP.java +++ b/src/main/java/it/grid/storm/asynch/PtP.java @@ -22,10 +22,7 @@ import it.grid.storm.acl.AclManagerFS; import it.grid.storm.authz.AuthzDecision; import it.grid.storm.authz.AuthzDirector; -import it.grid.storm.authz.SpaceAuthzInterface; import it.grid.storm.authz.path.model.SRMFileRequest; -import it.grid.storm.authz.sa.model.SRMSpaceRequest; -import it.grid.storm.catalogs.PtPData; import it.grid.storm.catalogs.ReservedSpaceCatalog; import it.grid.storm.catalogs.VolatileAndJiTCatalog; import it.grid.storm.catalogs.surl.SURLStatusManager; @@ -40,15 +37,16 @@ import it.grid.storm.namespace.ExpiredSpaceTokenException; import it.grid.storm.namespace.InvalidGetTURLProtocolException; import it.grid.storm.namespace.InvalidSURLException; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.TURLBuildingException; import it.grid.storm.namespace.UnapprochableSurlException; -import it.grid.storm.namespace.VirtualFSInterface; import it.grid.storm.namespace.model.ACLEntry; import it.grid.storm.namespace.model.DefaultACL; +import it.grid.storm.namespace.model.VirtualFS; import it.grid.storm.persistence.exceptions.DataAccessException; +import it.grid.storm.persistence.model.PtPData; import it.grid.storm.persistence.model.TransferObjectDecodingException; import it.grid.storm.scheduler.Chooser; import it.grid.storm.scheduler.Delegable; @@ -183,10 +181,10 @@ public void doIt() { try { if (requestData instanceof IdentityInputData) { - fileStoRI = NamespaceDirector.getNamespace() + fileStoRI = Namespace.getInstance() .resolveStoRIbySURL(surl, ((IdentityInputData) requestData).getUser()); } else { - fileStoRI = NamespaceDirector.getNamespace().resolveStoRIbySURL(surl); + fileStoRI = Namespace.getInstance().resolveStoRIbySURL(surl); } } catch (UnapprochableSurlException e) { @@ -332,24 +330,6 @@ private void manageOverwriteExistingFile(StoRI fileStoRI) { */ private void managePermit(StoRI fileStoRI) { - TSpaceToken token = new SpaceHelper().getTokenFromStoRI(PtP.log, fileStoRI); - SpaceAuthzInterface spaceAuth = AuthzDirector.getSpaceAuthz(token); - - boolean isSpaceAuthorized; - if (requestData instanceof IdentityInputData) { - isSpaceAuthorized = - spaceAuth.authorize(((IdentityInputData) requestData).getUser(), SRMSpaceRequest.PTP); - } else { - isSpaceAuthorized = spaceAuth.authorizeAnonymous(SRMSpaceRequest.PTP); - } - if (!isSpaceAuthorized) { - requestData.changeStatusSRM_AUTHORIZATION_FAILURE("Create/Write access for " - + requestData.getSURL() + " in Storage Area: " + token + " denied!"); - failure = true; - log.debug("Create/Write access for {} in Storage Area: {} denied!", requestData.getSURL(), - token); - return; - } TTURL auxTURL; try { auxTURL = fileStoRI.getTURL(requestData.getTransferProtocols()); @@ -445,7 +425,6 @@ private boolean managePermitTraverseStep(StoRI fileStoRI) throws CannotMapUserEx return setParentAcl(fileStoRI, user); } - setHttpsServiceParentAcl(fileStoRI); return true; } @@ -468,7 +447,7 @@ private boolean preparePath(StoRI fileStoRI) { private boolean prepareDirectory(LocalFile dir) { boolean automaticDirectoryCreation = - Configuration.getInstance().getAutomaticDirectoryCreation(); + Configuration.getInstance().isAutomaticDirectoryCreationEnabled(); if (dir.exists()) { if (!dir.isDirectory()) { @@ -506,16 +485,16 @@ private boolean prepareDirectory(LocalFile dir) { private void updateUsedSpace(LocalFile dir) { - VirtualFSInterface vfs; + VirtualFS vfs; try { - vfs = NamespaceDirector.getNamespace().resolveVFSbyLocalFile(dir); + vfs = Namespace.getInstance().resolveVFSbyLocalFile(dir); } catch (NamespaceException e) { log.error("srmPtP: Error during used space update - {}", e.getMessage()); return; } long size = dir.getSize(); log.debug("srmPtP: Update {} used space [+ {}]", vfs.getAliasName(), size); - vfs.increaseUsedSpace(size); + vfs.getSpaceUpdater().increaseUsedSpace(size); } private boolean setParentAcl(StoRI fileStoRI, LocalUser localUser) { @@ -559,7 +538,6 @@ private boolean managePermitSetFileStep(StoRI fileStoRI) throws CannotMapUserExc setDefaultAcl(fileStoRI); setTapeManagementAcl(fileStoRI); - setHttpsServiceAcl(fileStoRI.getLocalFile(), FilesystemPermission.ReadWrite); return true; } @@ -674,29 +652,6 @@ private boolean setAoTAcl(StoRI fileStori, LocalUser localUser, FilesystemPermis return response; } - private void setHttpsServiceParentAcl(StoRI fileStoRI) { - - log.debug("SrmMkdir: Adding parent https ACL for directory: '{}' parents", - fileStoRI.getAbsolutePath()); - for (StoRI parentStoRI : fileStoRI.getParents()) { - setHttpsServiceAcl(parentStoRI.getLocalFile(), FilesystemPermission.Traverse); - } - } - - private void setHttpsServiceAcl(LocalFile file, FilesystemPermission permission) { - - log.debug("SrmMkdir: Adding https ACL {} for directory : '{}'", permission, file); - - try { - AclManagerFS.getInstance().grantHttpsServiceGroupPermission(file, permission); - } catch (IllegalArgumentException e) { - log.error("Unable to grant user permission on the created folder. " - + "IllegalArgumentException: {}", e.getMessage(), e); - requestData.getStatus() - .extendExplaination("Unable to grant group permission on the created folder"); - } - } - /** * Private method used to manage ReserveSpace. Returns false if something went wrong! */ @@ -711,7 +666,7 @@ private boolean managePermitReserveSpaceStep(StoRI fileStoRI) { // the Storage Area free size is retrieved from the database // and the PtP fails if there is not enougth space. - VirtualFSInterface fs = fileStoRI.getVirtualFileSystem(); + VirtualFS fs = fileStoRI.getVirtualFileSystem(); if (fs != null && fs.getProperties().isOnlineSpaceLimited()) { SpaceHelper sp = new SpaceHelper(); @@ -733,7 +688,7 @@ private boolean managePermitReserveSpaceStep(StoRI fileStoRI) { return false; } else { if (!sp.isSAInitialized(PtP.log, fileStoRI) - && Configuration.getInstance().getDiskUsageServiceEnabled()) { + && Configuration.getInstance().isDiskUsageServiceEnabled()) { /* Trust we got space, let the request pass */ log.debug("PtPChunk: ReserveSpaceStep: the storage area space " + "initialization is in progress, optimistic approach, considering " @@ -868,7 +823,7 @@ private boolean isExistingSpaceToken(TSpaceToken spaceToken) throws Exception { StorageSpaceData spaceData = null; try { - spaceData = new ReservedSpaceCatalog().getStorageSpace(spaceToken); + spaceData = ReservedSpaceCatalog.getInstance().getStorageSpace(spaceToken); } catch (TransferObjectDecodingException e) { log.error("Unable to build StorageSpaceData from StorageSpaceTO." + " TransferObjectDecodingException: {}", e.getMessage()); diff --git a/src/main/java/it/grid/storm/asynch/PtPBuilder.java b/src/main/java/it/grid/storm/asynch/PtPBuilder.java index 38150ea60..a04317c88 100644 --- a/src/main/java/it/grid/storm/asynch/PtPBuilder.java +++ b/src/main/java/it/grid/storm/asynch/PtPBuilder.java @@ -19,14 +19,15 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import it.grid.storm.catalogs.AnonymousPtPData; -import it.grid.storm.catalogs.IdentityPtPData; -import it.grid.storm.catalogs.InvalidFileTransferDataAttributesException; -import it.grid.storm.catalogs.InvalidPtPDataAttributesException; -import it.grid.storm.catalogs.InvalidSurlRequestDataAttributesException; -import it.grid.storm.catalogs.PtPData; + import it.grid.storm.common.types.TURLPrefix; import it.grid.storm.config.Configuration; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtPDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.persistence.model.AnonymousPtPData; +import it.grid.storm.persistence.model.IdentityPtPData; +import it.grid.storm.persistence.model.PtPData; import it.grid.storm.srm.types.TFileStorageType; import it.grid.storm.srm.types.TLifeTimeInSeconds; import it.grid.storm.srm.types.TOverwriteMode; diff --git a/src/main/java/it/grid/storm/asynch/PtPFeeder.java b/src/main/java/it/grid/storm/asynch/PtPFeeder.java index 65486ac20..c1df96972 100644 --- a/src/main/java/it/grid/storm/asynch/PtPFeeder.java +++ b/src/main/java/it/grid/storm/asynch/PtPFeeder.java @@ -18,10 +18,10 @@ package it.grid.storm.asynch; import it.grid.storm.catalogs.PtPChunkCatalog; -import it.grid.storm.catalogs.PtPPersistentChunkData; import it.grid.storm.catalogs.RequestSummaryCatalog; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.model.PtPPersistentChunkData; +import it.grid.storm.persistence.model.RequestSummaryData; import it.grid.storm.scheduler.Delegable; import it.grid.storm.scheduler.SchedulerException; import it.grid.storm.srm.types.TSURL; diff --git a/src/main/java/it/grid/storm/asynch/PtPPersistentChunk.java b/src/main/java/it/grid/storm/asynch/PtPPersistentChunk.java index 65f830d32..686649ec5 100644 --- a/src/main/java/it/grid/storm/asynch/PtPPersistentChunk.java +++ b/src/main/java/it/grid/storm/asynch/PtPPersistentChunk.java @@ -13,9 +13,9 @@ import java.util.Arrays; import it.grid.storm.catalogs.PtPChunkCatalog; -import it.grid.storm.catalogs.PtPData; -import it.grid.storm.catalogs.PtPPersistentChunkData; -import it.grid.storm.catalogs.RequestSummaryData; +import it.grid.storm.persistence.model.PtPData; +import it.grid.storm.persistence.model.PtPPersistentChunkData; +import it.grid.storm.persistence.model.RequestSummaryData; import it.grid.storm.scheduler.PersistentRequestChunk; import it.grid.storm.srm.types.TStatusCode; import it.grid.storm.synchcall.command.CommandHelper; diff --git a/src/main/java/it/grid/storm/asynch/SRMPrepareToPutReply.java b/src/main/java/it/grid/storm/asynch/SRMPrepareToPutReply.java deleted file mode 100644 index 7d036c706..000000000 --- a/src/main/java/it/grid/storm/asynch/SRMPrepareToPutReply.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.asynch; - -import it.grid.storm.srm.types.TRequestToken; - -/** - * Class that represents a reply to an issued SRMPrepareToPut command. It provides a method to - * recover the assigned request token. - * - * @author EGRID - ICTP Trieste - * @version 1.0 - * @date September, 2005 - */ -public class SRMPrepareToPutReply { - - // TRequestToken assigned during the SRM prepare to put operation - private TRequestToken requestToken = null; - - /** - * Constructor that requires the assigned TRequestToken; if it is null, an - * InvalidPutReplyAttributeException is thrown. - */ - public SRMPrepareToPutReply(TRequestToken requestToken) throws InvalidPutReplyAttributeException { - - if (requestToken == null) - throw new InvalidPutReplyAttributeException(); - this.requestToken = requestToken; - } - - /** - * Method that returns the assigned request token. - */ - public TRequestToken requestToken() { - - return requestToken; - } - - public String toString() { - - return "requestToken=" + requestToken; - } -} diff --git a/src/main/java/it/grid/storm/asynch/SRMPutDoneReply.java b/src/main/java/it/grid/storm/asynch/SRMPutDoneReply.java deleted file mode 100644 index 215356c2b..000000000 --- a/src/main/java/it/grid/storm/asynch/SRMPutDoneReply.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.asynch; - -import it.grid.storm.srm.types.TReturnStatus; - -/** - * Class that represents the reply received from issuing an srmPutDone command. - * - * @author EGRID ICTP Trieste - * @version 1.0 - * @date August 2006 - */ -public class SRMPutDoneReply { - - private TReturnStatus overallRetStat = null; // overall request return status - - /** - * Constructor that requires the overall TReturnStatus of the reply. - */ - public SRMPutDoneReply(TReturnStatus overallRetStat) - throws InvalidPutDoneReplyAttributeException { - - if (overallRetStat == null) - throw new InvalidPutDoneReplyAttributeException(); - this.overallRetStat = overallRetStat; - } - - /** - * Method that returns the overll status of the request. - */ - public TReturnStatus overallRetStat() { - - return overallRetStat; - } - - public String toString() { - - return "SRMPutDoneReply: overall TReturnStatus is " + overallRetStat.toString(); - } -} diff --git a/src/main/java/it/grid/storm/asynch/SRMStatusOfPutRequestReply.java b/src/main/java/it/grid/storm/asynch/SRMStatusOfPutRequestReply.java deleted file mode 100644 index f304c6d81..000000000 --- a/src/main/java/it/grid/storm/asynch/SRMStatusOfPutRequestReply.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.asynch; - -import it.grid.storm.srm.types.TTURL; -import it.grid.storm.srm.types.TReturnStatus; - -/** - * Class that represents the reply returned from an invocation of SRMStatusOfPutRequest. It supplies - * methods for quering the toTURL assigned, and the returnStatus of the request. - * - * @author EGRID - ICTP Trieste - * @version 1.0 - * @date September 2005 - */ -public class SRMStatusOfPutRequestReply { - - private TTURL toTURL = null; // TTURL as supplied by the invoked server in the - // SRMStatusOfPutRequest - private TReturnStatus returnStatus = null; // returnStatus as supplied by the - // invoked server in the - // SRMStatusOfPutRequest - - public SRMStatusOfPutRequestReply(TTURL toTURL, TReturnStatus returnStatus) - throws InvalidPutStatusAttributesException { - - if ((toTURL == null) || (returnStatus == null)) - throw new InvalidPutStatusAttributesException(toTURL, returnStatus); - this.toTURL = toTURL; - this.returnStatus = returnStatus; - } - - /** - * Method that returns the toTURL that the invoked server assigned to the put request. - */ - public TTURL toTURL() { - - return toTURL; - } - - /** - * Method that returns the TReturnStatus that the invoked server assigned to the put request. - */ - public TReturnStatus returnStatus() { - - return returnStatus; - } - - public String toString() { - - return "toTURL= " + toTURL + "; returnStatus=" + returnStatus; - } -} diff --git a/src/main/java/it/grid/storm/asynch/Suspendedable.java b/src/main/java/it/grid/storm/asynch/Suspendedable.java index cfd887e00..970996c54 100644 --- a/src/main/java/it/grid/storm/asynch/Suspendedable.java +++ b/src/main/java/it/grid/storm/asynch/Suspendedable.java @@ -17,7 +17,7 @@ package it.grid.storm.asynch; -import it.grid.storm.catalogs.RequestData; +import it.grid.storm.persistence.model.RequestData; import it.grid.storm.tape.recalltable.model.TapeRecallStatus; public interface Suspendedable { diff --git a/src/main/java/it/grid/storm/authz/AuthzDirector.java b/src/main/java/it/grid/storm/authz/AuthzDirector.java index 154001521..4e65b95f5 100644 --- a/src/main/java/it/grid/storm/authz/AuthzDirector.java +++ b/src/main/java/it/grid/storm/authz/AuthzDirector.java @@ -17,159 +17,44 @@ package it.grid.storm.authz; -import it.grid.storm.authz.path.PathAuthz; -import it.grid.storm.authz.path.conf.PathAuthzDBReader; -import it.grid.storm.authz.sa.SpaceDBAuthz; -import it.grid.storm.authz.sa.test.MockSpaceAuthz; -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.NamespaceInterface; -import it.grid.storm.namespace.VirtualFSInterface; -import it.grid.storm.namespace.model.SAAuthzType; -import it.grid.storm.srm.types.TSpaceToken; - -import java.io.File; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.Map; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class AuthzDirector { - - private static final Logger log = LoggerFactory - .getLogger(AuthzDirector.class); - private static String configurationPATH; - - // Map between 'SpaceToken' and the related 'SpaceAuthz' - private static Map spaceAuthzs = null; - - // PathAuthz is only one, shared by all SAs - private static PathAuthzInterface pathAuthz = null; - - /** - * Scan the Namespace.xml to retrieve the list of file AuthZDB to digest - */ - private static Map buildSpaceAuthzsMAP() { - - HashMap spaceAuthzMap = new HashMap(); - - // Retrieve the list of VFS from Namespace - NamespaceInterface ns = NamespaceDirector.getNamespace(); - ArrayList vfss; - try { - vfss = new ArrayList(ns.getAllDefinedVFS()); - for (VirtualFSInterface vfs : vfss) { - String vfsName = vfs.getAliasName(); - SAAuthzType authzTp = vfs.getStorageAreaAuthzType(); - String authzName = ""; - if (authzTp.equals(SAAuthzType.AUTHZDB)) { - // The Space Authz is based on Authz DB - authzName = vfs.getStorageAreaAuthzDB(); - log.debug("Loading AuthzDB '{}'", authzName); - if (existsAuthzDBFile(authzName)) { - // Digest the Space AuthzDB File - TSpaceToken spaceToken = vfs.getSpaceToken(); - SpaceAuthzInterface spaceAuthz = new SpaceDBAuthz(authzName); - spaceAuthzMap.put(spaceToken, spaceAuthz); - } else { - log.error("File AuthzDB '{}' related to '{}' does not exists.", - authzName, vfsName); - } - } else { - authzName = vfs.getStorageAreaAuthzFixed(); - } - log.debug("VFS ['{}'] = {} : {}", vfsName, authzTp, authzName); - } - } catch (NamespaceException e) { - log.error("Unable to initialize AUTHZ DB! Error: {}", e.getMessage(), e); - } - - return spaceAuthzMap; - } - - /** - * Utility method - * - * @param dbFileName - * @return - * @throws AuthzDBReaderException - */ - private static boolean existsAuthzDBFile(String dbFileName) { - - String fileName = configurationPATH + File.separator + dbFileName; - boolean exists = (new File(fileName)).exists(); - if (!exists) { - log.warn("The AuthzDB File '{}' does not exists", dbFileName); - } - return exists; - } - - // **************************************** - // PUBLIC METHODS - // **************************************** - - /****************************** - * SPACE AUTHORIZATION ENGINE - ******************************/ - public static void initializeSpaceAuthz() { - - // Build Space Authzs MAP - spaceAuthzs = buildSpaceAuthzsMAP(); - } - - /** - * Retrieve the Space Authorization module related to the Space Token - * - * @param token - * @return - */ - public static SpaceAuthzInterface getSpaceAuthz(TSpaceToken token) { - - SpaceAuthzInterface spaceAuthz = new MockSpaceAuthz(); - // Retrieve the SpaceAuthz related to the Space Token - if ((spaceAuthzs != null) && (spaceAuthzs.containsKey(token))) { - spaceAuthz = spaceAuthzs.get(token); - log.debug("Space Authz related to S.Token ='{}' is '{}'", token, - spaceAuthz.getSpaceAuthzID()); - } else { - log.debug("Space Authz related to S.Token ='{}' does not exists. " - + "Use the MOCK one.", token); - } - return spaceAuthz; - } - - /****************************** - * PATH AUTHORIZATION ENGINE - ******************************/ - - /** - * Initializating the Path Authorization engine - * - * @param pathAuthz2 - */ - public static void initializePathAuthz(String pathAuthzDBFileName) - throws DirectorException { - - PathAuthzDBReader authzDBReader; - try { - authzDBReader = new PathAuthzDBReader(pathAuthzDBFileName); - } catch (Exception e) { - log.error("Unable to build a PathAuthzDBReader: {}", e.getMessage(), e); - throw new DirectorException("Unable to build a PathAuthzDBReader"); - } - AuthzDirector.pathAuthz = new PathAuthz(authzDBReader.getPathAuthzDB()); - } +import it.grid.storm.authz.path.PathAuthz; +import it.grid.storm.authz.path.conf.PathAuthzDBReader; - /** - * Retrieve the Path Authorization module - * - * @todo: To implement this. - */ - public static PathAuthzInterface getPathAuthz() { +public class AuthzDirector { - return AuthzDirector.pathAuthz; - } + private static final Logger log = LoggerFactory.getLogger(AuthzDirector.class); + + // PathAuthz is only one, shared by all SAs + private static PathAuthzInterface pathAuthz = null; + + /** + * Initialize the Path Authorization engine + * + * @param pathAuthz2 + */ + public static void initializePathAuthz(String pathAuthzDBFileName) throws DirectorException { + + PathAuthzDBReader authzDBReader; + try { + authzDBReader = new PathAuthzDBReader(pathAuthzDBFileName); + } catch (Exception e) { + log.error("Unable to build a PathAuthzDBReader: {}", e.getMessage(), e); + throw new DirectorException("Unable to build a PathAuthzDBReader"); + } + AuthzDirector.pathAuthz = new PathAuthz(authzDBReader.getPathAuthzDB()); + } + + /** + * Retrieve the Path Authorization module + * + * @todo: To implement this. + */ + public static PathAuthzInterface getPathAuthz() { + + return AuthzDirector.pathAuthz; + } } diff --git a/src/main/java/it/grid/storm/authz/AuthzException.java b/src/main/java/it/grid/storm/authz/AuthzException.java index 945f41e4c..69d2e848a 100644 --- a/src/main/java/it/grid/storm/authz/AuthzException.java +++ b/src/main/java/it/grid/storm/authz/AuthzException.java @@ -26,28 +26,18 @@ */ public class AuthzException extends RuntimeException { - /** - * - */ - private static final long serialVersionUID = 1L; + /** + * + */ + private static final long serialVersionUID = 1L; - public AuthzException() { + public AuthzException() { - super(); - } + super(); + } - public AuthzException(String message) { + public AuthzException(String message) { - super(message); - } - - public AuthzException(String message, Throwable cause) { - - super(message, cause); - } - - public AuthzException(Throwable cause) { - - super(cause); - } + super(message); + } } diff --git a/src/main/java/it/grid/storm/authz/DirectorException.java b/src/main/java/it/grid/storm/authz/DirectorException.java index 7e7382651..a84d37099 100644 --- a/src/main/java/it/grid/storm/authz/DirectorException.java +++ b/src/main/java/it/grid/storm/authz/DirectorException.java @@ -2,28 +2,18 @@ public class DirectorException extends Exception { - /** - * - */ - private static final long serialVersionUID = 8391356294029256927L; + /** + * + */ + private static final long serialVersionUID = 8391356294029256927L; - public DirectorException() { + public DirectorException() { - } + } - public DirectorException(String message) { + public DirectorException(String message) { - super(message); - } - - public DirectorException(Throwable cause) { - - super(cause); - } - - public DirectorException(String message, Throwable cause) { - - super(message, cause); - } + super(message); + } } diff --git a/src/main/java/it/grid/storm/authz/path/conf/PathAuthzDB.java b/src/main/java/it/grid/storm/authz/path/conf/PathAuthzDB.java index 917c8875d..ecbc9e84b 100644 --- a/src/main/java/it/grid/storm/authz/path/conf/PathAuthzDB.java +++ b/src/main/java/it/grid/storm/authz/path/conf/PathAuthzDB.java @@ -46,13 +46,6 @@ public PathAuthzDB(String pathAuthzDBID, PathAuthzEvaluationAlgorithm algorithm, this.authzDB.addAll(aces); } - public PathAuthzDB(String pathAuthzDBID, List aces) { - - this.pathAuthzDBID = pathAuthzDBID; - this.evaluationAlg = DEFAULT_ALGORITHM; - this.authzDB.addAll(aces); - } - /** * Empty constructor. Use it only if there is not */ @@ -63,11 +56,6 @@ public PathAuthzDB() { this.authzDB.add(PathACE.PERMIT_ALL); } - public void addPathACE(PathACE pathAce) { - - authzDB.add(pathAce); - } - public List getACL() { return authzDB; diff --git a/src/main/java/it/grid/storm/authz/path/conf/PathAuthzDBReader.java b/src/main/java/it/grid/storm/authz/path/conf/PathAuthzDBReader.java index 7b9b0aebf..ad2f5da1c 100644 --- a/src/main/java/it/grid/storm/authz/path/conf/PathAuthzDBReader.java +++ b/src/main/java/it/grid/storm/authz/path/conf/PathAuthzDBReader.java @@ -20,23 +20,22 @@ */ package it.grid.storm.authz.path.conf; -import it.grid.storm.authz.AuthzException; -import it.grid.storm.authz.path.model.PathACE; -import it.grid.storm.authz.path.model.PathAuthzEvaluationAlgorithm; -import it.grid.storm.config.Configuration; - import java.io.BufferedReader; import java.io.File; +import java.io.FileNotFoundException; import java.io.FileReader; import java.io.IOException; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.util.LinkedList; -import java.io.FileNotFoundException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import it.grid.storm.authz.AuthzException; +import it.grid.storm.authz.path.model.PathACE; +import it.grid.storm.authz.path.model.PathAuthzEvaluationAlgorithm; + /** * @author zappi */ @@ -44,7 +43,6 @@ public class PathAuthzDBReader { private static final Logger log = LoggerFactory.getLogger(PathAuthzDBReader.class); - private final String authzDBFilename; private PathAuthzDB pathAuthzDB; private static enum LineType { @@ -53,31 +51,9 @@ private static enum LineType { public PathAuthzDBReader(String filename) throws Exception { - log.info("Path Authorization : Initializing..."); - if (!(existsAuthzDBFile(filename))) { - String configurationPATH = Configuration.getInstance().namespaceConfigPath(); - if (configurationPATH.length() == 0) { - String userDir = System.getProperty("user.dir"); - log.debug("Unable to found the configuration path. Assume: '{}'", userDir); - configurationPATH = userDir + File.separator + "etc"; - } - authzDBFilename = configurationPATH + File.separator + filename; - } else { - authzDBFilename = filename; - } - log.debug("Loading Path Authz DB : '{}'", authzDBFilename); - pathAuthzDB = loadPathAuthzDB(authzDBFilename); + log.debug("Loading Path Authz DB : '{}'", filename); + pathAuthzDB = loadPathAuthzDB(filename); log.info("Path Authz DB ('{}') loaded.", pathAuthzDB.getPathAuthzDBID()); - log.info(pathAuthzDB.toString()); - } - - public void refreshPathAuthzDB() throws Exception { - - log.debug(" Start refreshing."); - pathAuthzDB = loadPathAuthzDB(authzDBFilename); - log.debug(" End refreshing."); - log.info("Path Authz DB ('{}') RE-loaded.", pathAuthzDB.getPathAuthzDBID()); - log.info(pathAuthzDB.toString()); } public PathAuthzDB getPathAuthzDB() { diff --git a/src/main/java/it/grid/storm/authz/path/model/PathACE.java b/src/main/java/it/grid/storm/authz/path/model/PathACE.java index 4c98713ad..656bad1f6 100644 --- a/src/main/java/it/grid/storm/authz/path/model/PathACE.java +++ b/src/main/java/it/grid/storm/authz/path/model/PathACE.java @@ -59,18 +59,6 @@ public class PathACE { // =========== CONSTRUCTORs ============ - /** - * Quite similar to clone - * - * @throws AuthzException - */ - public static PathACE build(PathACE other) throws AuthzException { - - PathACE result = new PathACE(other.localGroupName, other.getStorageFileName(), - other.getPathAccessMask(), other.isPermitAce()); - return result; - } - private static PathACE buildPermitAllPathACE() throws IllegalStateException { try { diff --git a/src/main/java/it/grid/storm/authz/path/model/PathAuthzAlgBestMatch.java b/src/main/java/it/grid/storm/authz/path/model/PathAuthzAlgBestMatch.java index 84e791fd5..9cc821191 100644 --- a/src/main/java/it/grid/storm/authz/path/model/PathAuthzAlgBestMatch.java +++ b/src/main/java/it/grid/storm/authz/path/model/PathAuthzAlgBestMatch.java @@ -35,6 +35,8 @@ */ public class PathAuthzAlgBestMatch extends PathAuthzEvaluationAlgorithm { + private static PathAuthzAlgBestMatch instance; + public static PathAuthzEvaluationAlgorithm getInstance() { if (instance == null) { diff --git a/src/main/java/it/grid/storm/authz/path/model/PathAuthzEvaluationAlgorithm.java b/src/main/java/it/grid/storm/authz/path/model/PathAuthzEvaluationAlgorithm.java index 4abf781fb..95a1d1bac 100644 --- a/src/main/java/it/grid/storm/authz/path/model/PathAuthzEvaluationAlgorithm.java +++ b/src/main/java/it/grid/storm/authz/path/model/PathAuthzEvaluationAlgorithm.java @@ -28,17 +28,6 @@ */ public abstract class PathAuthzEvaluationAlgorithm { - public static PathAuthzEvaluationAlgorithm instance = null; - - public static PathAuthzEvaluationAlgorithm getInstance() throws Exception { - - if (instance == null) { - throw new Exception( - "Unable to provide the instance, my comcrete subclass as not provided any"); - } - return instance; - } - public abstract AuthzDecision evaluate(String subject, StFN fileName, SRMFileRequest pathOperation, List acl); diff --git a/src/main/java/it/grid/storm/authz/path/model/PathOperation.java b/src/main/java/it/grid/storm/authz/path/model/PathOperation.java index cf4cd53f8..245ad44b1 100644 --- a/src/main/java/it/grid/storm/authz/path/model/PathOperation.java +++ b/src/main/java/it/grid/storm/authz/path/model/PathOperation.java @@ -25,12 +25,15 @@ */ public enum PathOperation { - WRITE_FILE('W', "WRITE_FILE", "Write data"), READ_FILE('R', "READ_FILE", "Read data", - true), RENAME('F', "RENAME", "Rename a file or a directory"), DELETE('D', "DELETE", - "Delete a file or a directory"), LIST_DIRECTORY('L', "LIST_DIRECTORY", - "Listing a directory", - true), MAKE_DIRECTORY('M', "CREATE_DIRECTORY", "Create a directory"), CREATE_FILE('N', - "CREATE_FILE", "Create a new file"), UNDEFINED('?', "UNDEFINED", "Undefined"); + + WRITE_FILE('W', "WRITE_FILE", "Write data"), + READ_FILE('R', "READ_FILE", "Read data", true), + RENAME('F', "RENAME", "Rename a file or a directory"), + DELETE('D', "DELETE", "Delete a file or a directory"), + LIST_DIRECTORY('L', "LIST_DIRECTORY", "Listing a directory", true), + MAKE_DIRECTORY('M', "CREATE_DIRECTORY", "Create a directory"), + CREATE_FILE('N', "CREATE_FILE", "Create a new file"), + UNDEFINED('?', "UNDEFINED", "Undefined"); private final char operation; private final String operationName; @@ -69,8 +72,6 @@ public static PathOperation getSpaceOperation(char op) { return RENAME; case 'D': return DELETE; - // case 'T': - // return TRAVERSE_DIRECTORY; case 'L': return LIST_DIRECTORY; case 'M': @@ -93,16 +94,6 @@ public char getSpaceOperationValue() { return operation; } - public PathOperation getSpaceOp(int ordinal) { - - PathOperation[] sp = PathOperation.values(); - if ((ordinal >= 0) && (ordinal < sp.length)) { - return sp[ordinal]; - } else { - return UNDEFINED; - } - } - public int getNumberOfPathOp() { return PathOperation.values().length - 1; diff --git a/src/main/java/it/grid/storm/authz/path/model/PathPrincipal.java b/src/main/java/it/grid/storm/authz/path/model/PathPrincipal.java deleted file mode 100644 index 3ade90820..000000000 --- a/src/main/java/it/grid/storm/authz/path/model/PathPrincipal.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/** - * - */ -package it.grid.storm.authz.path.model; - -/** - * @author zappi - * - */ -public class PathPrincipal { - - public static final String prefix = "@"; - - private String localGroupName; - private boolean principalCategory = false; - - public PathPrincipal(String principal) { - - principalCategory = principal.startsWith(prefix); - localGroupName = principal; - } - - public boolean isLocalGroup() { - - return !principalCategory; - } - - public String getLocalGroupName() { - - return localGroupName; - } - - public boolean equals(Object o) { - - if (o instanceof PathPrincipal) { - PathPrincipal op = (PathPrincipal) o; - if (op.isLocalGroup() && (isLocalGroup())) { - return (op.getLocalGroupName().equals(getLocalGroupName())); - } - } - return false; - } - - @Override - public int hashCode() { - - int result = 17; - result = 31 * result + (localGroupName != null ? localGroupName.hashCode() : 0); - result = 31 * result + (principalCategory ? 1 : 0); - return result; - } - -} diff --git a/src/main/java/it/grid/storm/authz/path/model/SRMFileRequest.java b/src/main/java/it/grid/storm/authz/path/model/SRMFileRequest.java index 7f49e3c69..e3e06c989 100644 --- a/src/main/java/it/grid/storm/authz/path/model/SRMFileRequest.java +++ b/src/main/java/it/grid/storm/authz/path/model/SRMFileRequest.java @@ -21,7 +21,6 @@ package it.grid.storm.authz.path.model; import java.util.ArrayList; -import java.util.HashMap; import java.util.List; /** @@ -161,55 +160,6 @@ public enum SRMFileRequest { private final String srmOp; private final PathAccessMask requestedPathOps; - private static HashMap ops = new HashMap() { - - /** - * - */ - private static final long serialVersionUID = 1L; - - { - put("PTP-Over", PTP_Overwrite); - put("srmPrepareToPut-overwrite", PTP_Overwrite); - put("PTP", PTP); - put("srmPrepareToPut", PTP); - put("PTG", PTG); - put("srmPrepareToGet", PTG); - put("CPto_Over", CPto_Overwrite); - put("srmCopy to-overwrite", CPto_Overwrite); - put("CPto", CPto); - put("srmCopy to", CPto); - put("CPFrom", CPfrom); - put("srmCopy from", CPfrom); - put("RM", RM); - put("srmRm", RM); - put("RMD", RMD); - put("srmRemoveDir", RM); - put("MD", MD); - put("srmMakeDir", MD); - put("LS", LS); - put("srmLs", LS); - put("MV-source", MV_source); - put("srmMove-source", MV_source); - put("MV-dest-Over", MV_dest_Overwrite); - put("srmMove-dest-overwrite", MV_dest_Overwrite); - put("MV-dest", MV_dest); - put("srmMove-dest", MV_dest); - } - }; - - /* - * Used only for testing - */ - public static SRMFileRequest buildFromString(String srmOp) { - - if (ops.containsKey(srmOp)) { - return ops.get(srmOp); - } else { - return null; - } - } - /** * SRMOperation */ diff --git a/src/main/java/it/grid/storm/authz/remote/resource/PermissionEvaluator.java b/src/main/java/it/grid/storm/authz/remote/resource/PermissionEvaluator.java index 3139f619b..584901e5a 100644 --- a/src/main/java/it/grid/storm/authz/remote/resource/PermissionEvaluator.java +++ b/src/main/java/it/grid/storm/authz/remote/resource/PermissionEvaluator.java @@ -35,18 +35,18 @@ import it.grid.storm.authz.path.model.PathOperation; import it.grid.storm.authz.path.model.SRMFileRequest; import it.grid.storm.authz.remote.Constants; -import it.grid.storm.catalogs.OverwriteModeConverter; import it.grid.storm.common.types.InvalidStFNAttributeException; import it.grid.storm.common.types.StFN; import it.grid.storm.config.Configuration; import it.grid.storm.griduser.FQAN; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.GridUserManager; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.VirtualFSInterface; import it.grid.storm.namespace.model.MappingRule; import it.grid.storm.namespace.model.Protocol; +import it.grid.storm.namespace.model.VirtualFS; +import it.grid.storm.persistence.converter.OverwriteModeConverter; import it.grid.storm.srm.types.TOverwriteMode; class PermissionEvaluator { @@ -55,8 +55,8 @@ class PermissionEvaluator { public static Boolean isOverwriteAllowed() { - return OverwriteModeConverter.getInstance() - .toSTORM(Configuration.getInstance().getDefaultOverwriteMode()) + return OverwriteModeConverter + .toSTORM(Configuration.getInstance().getDefaultOverwriteMode().toString()) .equals(TOverwriteMode.ALWAYS); } @@ -66,9 +66,9 @@ static Boolean evaluateVomsGridUserPermission(String DNDecoded, String FQANSDeco String[] FQANSArray = parseFQANS(FQANSDecoded); GridUserInterface gu = buildGridUser(DNDecoded, FQANSArray); - VirtualFSInterface fileVFS; + VirtualFS fileVFS; try { - fileVFS = NamespaceDirector.getNamespace().resolveVFSbyAbsolutePath(filePathDecoded); + fileVFS = Namespace.getInstance().resolveVFSbyAbsolutePath(filePathDecoded); } catch (NamespaceException e) { log.error("Unable to determine a VFS that maps the requested file " + "path '{}'. NamespaceException: {}", filePathDecoded, e.getMessage()); @@ -108,9 +108,9 @@ static Boolean evaluateVomsGridUserPermission(String DNDecoded, String FQANSDeco String[] FQANSArray = parseFQANS(FQANSDecoded); GridUserInterface gu = buildGridUser(DNDecoded, FQANSArray); - VirtualFSInterface fileVFS; + VirtualFS fileVFS; try { - fileVFS = NamespaceDirector.getNamespace().resolveVFSbyAbsolutePath(filePathDecoded); + fileVFS = Namespace.getInstance().resolveVFSbyAbsolutePath(filePathDecoded); } catch (NamespaceException e) { log.error("Unable to determine a VFS that maps the requested file " + "path '{}'. NamespaceException: {}", filePathDecoded, e.getMessage()); @@ -132,9 +132,9 @@ static Boolean evaluateVomsGridUserPermission(String DNDecoded, String FQANSDeco static Boolean evaluateAnonymousPermission(String filePathDecoded, PathOperation request) { - VirtualFSInterface fileVFS; + VirtualFS fileVFS; try { - fileVFS = NamespaceDirector.getNamespace().resolveVFSbyAbsolutePath(filePathDecoded); + fileVFS = Namespace.getInstance().resolveVFSbyAbsolutePath(filePathDecoded); } catch (NamespaceException e) { log.error("Unable to determine a VFS that maps the requested file " + "path '{}'. NamespaceException: {}", filePathDecoded, e.getMessage()); @@ -154,9 +154,9 @@ static Boolean evaluateAnonymousPermission(String filePathDecoded, PathOperation static Boolean evaluateAnonymousPermission(String filePathDecoded, SRMFileRequest request) { - VirtualFSInterface fileVFS; + VirtualFS fileVFS; try { - fileVFS = NamespaceDirector.getNamespace().resolveVFSbyAbsolutePath(filePathDecoded); + fileVFS = Namespace.getInstance().resolveVFSbyAbsolutePath(filePathDecoded); } catch (NamespaceException e) { log.error("Unable to determine a VFS that maps the requested file " + "path '{}'. NamespaceException: {}", filePathDecoded, e.getMessage()); @@ -200,7 +200,7 @@ private static Boolean evaluateDecision(AuthzDecision decision) { } } - private static StFN buildStFN(String filePathDecoded, VirtualFSInterface fileVFS) + private static StFN buildStFN(String filePathDecoded, VirtualFS fileVFS) throws WebApplicationException { String VFSRootPath; diff --git a/src/main/java/it/grid/storm/authz/sa/AuthzDBReaderException.java b/src/main/java/it/grid/storm/authz/sa/AuthzDBReaderException.java index 1a3522b5f..11107991b 100644 --- a/src/main/java/it/grid/storm/authz/sa/AuthzDBReaderException.java +++ b/src/main/java/it/grid/storm/authz/sa/AuthzDBReaderException.java @@ -33,14 +33,4 @@ public AuthzDBReaderException(String message) { super(message); } - - public AuthzDBReaderException(String message, Throwable cause) { - - super(message, cause); - } - - public AuthzDBReaderException(Throwable cause) { - - super(cause); - } } diff --git a/src/main/java/it/grid/storm/authz/sa/AuthzDBReaderInterface.java b/src/main/java/it/grid/storm/authz/sa/AuthzDBReaderInterface.java deleted file mode 100644 index 222cedb7a..000000000 --- a/src/main/java/it/grid/storm/authz/sa/AuthzDBReaderInterface.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.authz.sa; - -import java.util.List; - -public interface AuthzDBReaderInterface { - - public void addAuthzDB(String dbFileName) throws AuthzDBReaderException; - - public List getAuthzDBNames(); - - public void onChangeAuthzDB(String authzDBName) throws AuthzDBReaderException; - - public AuthzDBInterface getAuthzDB(String authzDBName) - throws AuthzDBReaderException; - - public long getLastParsed(String dbFileName) throws AuthzDBReaderException; - -} diff --git a/src/main/java/it/grid/storm/authz/sa/SpaceAuthz.java b/src/main/java/it/grid/storm/authz/sa/SpaceAuthz.java deleted file mode 100644 index 8cec4103e..000000000 --- a/src/main/java/it/grid/storm/authz/sa/SpaceAuthz.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.authz.sa; - -import it.grid.storm.authz.SpaceAuthzInterface; -import it.grid.storm.authz.sa.model.SRMSpaceRequest; -import it.grid.storm.griduser.GridUserInterface; - -public abstract class SpaceAuthz implements SpaceAuthzInterface { - - private AuthzDBInterface authzDB; - - /** - * @todo: 1) IMPLEMENT AUHTZ ENGINE - * @todo: 2) IMPLEMENT CACHE - * @todo: 3) IMPLEMENT PRINCIPAL LIST PERSISTENCE - * @todo: 4) IMPLEMENT RECALCULATE CACHE - */ - - public SpaceAuthz() { - - super(); - } - - public abstract boolean authorize(GridUserInterface guser, SRMSpaceRequest srmSpaceOp); - - public void setAuthzDB(AuthzDBInterface authzDB) { - - this.authzDB = authzDB; - } - - public AuthzDBInterface getAuthzDB() { - - return authzDB; - } - -} diff --git a/src/main/java/it/grid/storm/authz/sa/SpaceDBAuthz.java b/src/main/java/it/grid/storm/authz/sa/SpaceDBAuthz.java deleted file mode 100644 index 8c891db0b..000000000 --- a/src/main/java/it/grid/storm/authz/sa/SpaceDBAuthz.java +++ /dev/null @@ -1,133 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/** - * - */ -package it.grid.storm.authz.sa; - -import java.io.File; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import it.grid.storm.authz.sa.model.SRMSpaceRequest; -import it.grid.storm.config.Configuration; -import it.grid.storm.griduser.GridUserInterface; - -/** - * @author zappi - */ -public class SpaceDBAuthz extends SpaceAuthz { - - private static final Logger log = LoggerFactory.getLogger(SpaceDBAuthz.class); - - public static final String UNDEF = "undef-SpaceAuthzDB"; - - private String spaceAuthzDBID = "not-defined"; - private static String configurationPATH; - private String dbFileName; - - public SpaceDBAuthz() { - - } - - /** - * @return - */ - public static SpaceDBAuthz makeEmpty() { - - SpaceDBAuthz result = new SpaceDBAuthz(); - result.setSpaceAuthzDBID("default-SpaceAuthzDB"); - return result; - } - - public SpaceDBAuthz(String dbFileName) { - - Configuration config = Configuration.getInstance(); - configurationPATH = config.namespaceConfigPath(); - if (existsAuthzDBFile(dbFileName)) { - this.dbFileName = dbFileName; - spaceAuthzDBID = dbFileName; - } - } - - /** - * @param string - */ - void setSpaceAuthzDBID(String id) { - - spaceAuthzDBID = id; - } - - /** - * - */ - @Override - public boolean authorize(GridUserInterface guser, SRMSpaceRequest srmSpaceOp) { - - return false; - } - - @Override - public boolean authorizeAnonymous(SRMSpaceRequest srmSpaceOp) { - - return false; - } - - - /********************************************************************** - * BUILDINGs METHODS - */ - - /** - * Check the existence of the AuthzDB file - */ - private boolean existsAuthzDBFile(String dbFileName) { - - String fileName = configurationPATH + File.separator + dbFileName; - boolean exists = (new File(fileName)).exists(); - if (!(exists)) { - log.error("The AuthzDB File '{}' does not exists", dbFileName); - } - return exists; - } - - /** - * Return the AuthzDB FileName - * - * @return - */ - String getAuthzDBFileName() { - - return dbFileName; - } - - public String getSpaceAuthzID() { - - return spaceAuthzDBID; - } - - /** - * - */ - public void refresh() { - - // empty - } - -} diff --git a/src/main/java/it/grid/storm/authz/sa/SpaceFixedAuthz.java b/src/main/java/it/grid/storm/authz/sa/SpaceFixedAuthz.java deleted file mode 100644 index 3ccec5bb4..000000000 --- a/src/main/java/it/grid/storm/authz/sa/SpaceFixedAuthz.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/** - * - */ -package it.grid.storm.authz.sa; - -import it.grid.storm.authz.sa.model.AuthzDBFixed; -import it.grid.storm.authz.sa.model.SRMSpaceRequest; -import it.grid.storm.griduser.GridUserInterface; - -/** - * @author zappi - */ -public class SpaceFixedAuthz extends SpaceAuthz { - - private static final String FIXED_ID = "fixed-space-authz"; - - public SpaceFixedAuthz(AuthzDBFixed fixedAuthzDB) - throws AuthzDBReaderException { - - } - - @Override - public boolean authorize(GridUserInterface guser, SRMSpaceRequest srmSpaceOp) { - - // @todo : implement the simple algorithm. - return true; - } - - @Override - public boolean authorizeAnonymous(SRMSpaceRequest srmSpaceOp) { - - // TODO Auto-generated method stub - return true; - } - - public String getSpaceAuthzID() { - - return FIXED_ID; - } - - public void refresh() { - - } - -} diff --git a/src/main/java/it/grid/storm/authz/sa/model/AuthzDBFixed.java b/src/main/java/it/grid/storm/authz/sa/model/AuthzDBFixed.java deleted file mode 100644 index 99298d91b..000000000 --- a/src/main/java/it/grid/storm/authz/sa/model/AuthzDBFixed.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/** - * - */ -package it.grid.storm.authz.sa.model; - -import it.grid.storm.authz.sa.AuthzDBInterface; -import it.grid.storm.namespace.model.SAAuthzType; - -import java.util.List; - -/** - * @author zappi - * - */ -public abstract class AuthzDBFixed implements AuthzDBInterface { - - /* - * (non-Javadoc) - * - * @see it.grid.storm.authz.sa.AuthzDBInterface#getAuthzDBType() - */ - public SAAuthzType getAuthzDBType() { - - return SAAuthzType.FIXED; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.authz.sa.AuthzDBInterface#getOrderedListOfACE() - */ - public abstract List getOrderedListOfACE(); - -} diff --git a/src/main/java/it/grid/storm/authz/sa/model/DNEveryonePattern.java b/src/main/java/it/grid/storm/authz/sa/model/DNEveryonePattern.java deleted file mode 100644 index 6a1ec5aa3..000000000 --- a/src/main/java/it/grid/storm/authz/sa/model/DNEveryonePattern.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/** - * - */ -package it.grid.storm.authz.sa.model; - -import it.grid.storm.authz.sa.AuthzDBReaderException; -import it.grid.storm.griduser.DistinguishedName; -import it.grid.storm.griduser.SubjectAttribute; - -/** - * @author zappi - * - */ -public class DNEveryonePattern extends DNPattern implements Everyone { - - /** - * CONSTRUCTOR - */ - - public DNEveryonePattern() throws AuthzDBReaderException { - - super("*"); - this.checkValidity = false; - init("*", "*", "*", "*", "*", "*"); - } - - /* - * Return always true because the pattern is built programmatically, and it is supposed to be - * valid. - * - * @see it.grid.storm.authz.sa.model.SubjectPattern#isValidPattern() - */ - @Override - public boolean isValidPattern() throws AuthzDBReaderException { - - return true; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.authz.sa.model.SubjectPattern#match(it.grid.storm.griduser - * .SubjectAttribute) - */ - // @Override - @Override - public boolean match(SubjectAttribute subjectAttribute) { - - if (subjectAttribute instanceof DistinguishedName) { - return true; - } - return false; - } - -} diff --git a/src/main/java/it/grid/storm/authz/sa/model/FQANEveryonePattern.java b/src/main/java/it/grid/storm/authz/sa/model/FQANEveryonePattern.java deleted file mode 100644 index 6c733f08d..000000000 --- a/src/main/java/it/grid/storm/authz/sa/model/FQANEveryonePattern.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/** - * - */ -package it.grid.storm.authz.sa.model; - -import it.grid.storm.authz.sa.AuthzDBReaderException; -import it.grid.storm.griduser.FQAN; -import it.grid.storm.griduser.SubjectAttribute; - -/** - * @author zappi - * - */ -public class FQANEveryonePattern extends FQANPattern implements Everyone { - - /* - * Return always true because the pattern is built programmatically, and it is supposed to be - * valid. - * - * @see it.grid.storm.authz.sa.model.SubjectPattern#isValidPattern() - */ - @Override - public boolean isValidPattern() throws AuthzDBReaderException { - - return true; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.authz.sa.model.SubjectPattern#match(it.grid.storm.griduser .FQAN) - */ - @Override - public boolean match(SubjectAttribute sa) { - - boolean result = false; - if (sa instanceof FQAN) { - result = true; - } - return result; - } - - @Override - public String toString() { - - return Everyone.EVERYONE; - } - -} diff --git a/src/main/java/it/grid/storm/authz/sa/model/FileAuthzDB.java b/src/main/java/it/grid/storm/authz/sa/model/FileAuthzDB.java deleted file mode 100644 index 785eb77c2..000000000 --- a/src/main/java/it/grid/storm/authz/sa/model/FileAuthzDB.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.authz.sa.model; - -import it.grid.storm.authz.sa.AuthzDBInterface; -import it.grid.storm.authz.sa.AuthzDBReaderException; -import it.grid.storm.namespace.model.SAAuthzType; - -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.StringTokenizer; - -import org.apache.commons.configuration.PropertiesConfiguration; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class FileAuthzDB implements AuthzDBInterface { - - private static final Logger log = LoggerFactory.getLogger(FileAuthzDB.class); - private final PropertiesConfiguration authzDB; - - private final String acePrefix = "ace"; - private int majorVersion = -1; - private int minorVersion = -1; - private String versionDescription = "Unknown"; - private SAAuthzType authzDBType = SAAuthzType.UNKNOWN; - private List spaceACL = null; - - public FileAuthzDB(PropertiesConfiguration authzDB) throws AuthzDBReaderException { - - this.authzDB = authzDB; - populateHeader(); - spaceACL = populateACL(); - } - - public int getMajorVersion() { - - return this.majorVersion; - } - - public int getMinorVersion() { - - return this.minorVersion; - } - - public String getVersionDescription() { - - return this.versionDescription; - } - - public SAAuthzType getAuthzDBType() { - - return this.authzDBType; - } - - public String getHeader() { - - return "" + getMajorVersion() + "." + getMinorVersion() + " - " + versionDescription + " [" - + authzDBType + "]"; - } - - public List getOrderedListOfACE() { - - return spaceACL; - } - - // *************** PRIVATE METHODS *************** - - private void populateHeader() { - - this.authzDBType = SAAuthzType.getSAType(authzDB.getString("Type")); - String[] version = authzDB.getStringArray("Version"); - if (version != null) { - String versionNr = version[0]; - StringTokenizer versionsNr = new StringTokenizer(versionNr, ".", false); - if (versionsNr.countTokens() > 0) { - this.majorVersion = Integer.parseInt(versionsNr.nextToken()); - this.minorVersion = Integer.parseInt(versionsNr.nextToken()); - } - if (version.length > 1) { - this.versionDescription = version[1]; - } - } - } - - private List populateACL() { - - spaceACL = new ArrayList(); - Iterator scanKeys = authzDB.getKeys(acePrefix); - while (scanKeys.hasNext()) { - String key = scanKeys.next(); - String value = authzDB.getString(key); - log.debug("KEY: {} VALUE: {}", key, value); - /** @todo IMPLEMENT PARSING OF VALUE */ - - } - /** - * @todo Add the default ACL - */ - return spaceACL; - } - -} diff --git a/src/main/java/it/grid/storm/authz/sa/model/SubjectPatternOld.java b/src/main/java/it/grid/storm/authz/sa/model/SubjectPatternOld.java deleted file mode 100644 index 1992ee0cc..000000000 --- a/src/main/java/it/grid/storm/authz/sa/model/SubjectPatternOld.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.authz.sa.model; - -import it.grid.storm.authz.sa.AuthzDBReaderException; -import it.grid.storm.griduser.DNMatchingRule; - -public class SubjectPatternOld { - - private String dnPatternStr = null; - private String fqanPatternStr = null; - private DNMatchingRule dnMR = null; - private EGEEFQANPattern fqanMR = null; - - private static DNMatchingRule DEFAULT_DN_PATTERN = DNMatchingRule.buildMatchAllDNMatchingRule(); - - public SubjectPatternOld(String dnPattern, String fqanPattern) throws AuthzDBReaderException { - - this.dnPatternStr = dnPattern; - this.dnMR = new DNMatchingRule(dnPattern); - this.fqanPatternStr = fqanPattern; - this.fqanMR = new EGEEFQANPattern(fqanPattern); - } - - public SubjectPatternOld(String fqanPattern) { - - this.dnPatternStr = ".*"; - this.dnMR = SubjectPatternOld.DEFAULT_DN_PATTERN; - this.fqanPatternStr = fqanPattern; - } - - public String getDNPatternStr() { - - return this.dnPatternStr; - } - - public String getFQANPatternStr() { - - return this.fqanPatternStr; - } - - public DNMatchingRule getDNPattern() { - - return this.dnMR; - } - - public EGEEFQANPattern getFQANPattern() { - - return this.fqanMR; - } - -} diff --git a/src/main/java/it/grid/storm/authz/util/PathAuthzConfigurationWatcher.java b/src/main/java/it/grid/storm/authz/util/PathAuthzConfigurationWatcher.java deleted file mode 100644 index be5849ed3..000000000 --- a/src/main/java/it/grid/storm/authz/util/PathAuthzConfigurationWatcher.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/** - * - */ -package it.grid.storm.authz.util; - -import java.io.File; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * @author ritz - */ -public class PathAuthzConfigurationWatcher extends ConfigurationWatcher { - - private static final Logger log = LoggerFactory.getLogger(PathAuthzConfigurationWatcher.class); - - /** - * @param file - */ - public PathAuthzConfigurationWatcher(File file) { - - super(file); - log.debug("Watcher manages the configuration file: {}", file); - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.authz.util.ConfigurationWatcher#onChange() - */ - @Override - protected void onChange() { - - log.info("Path Authorization DB is changed! Going to reload it"); - // Force the reload of the configuration file - - } - -} diff --git a/src/main/java/it/grid/storm/balancer/ftp/GridFtpConnectionStatus.java b/src/main/java/it/grid/storm/balancer/ftp/GridFtpConnectionStatus.java index 34f844e33..e30371f74 100644 --- a/src/main/java/it/grid/storm/balancer/ftp/GridFtpConnectionStatus.java +++ b/src/main/java/it/grid/storm/balancer/ftp/GridFtpConnectionStatus.java @@ -36,7 +36,7 @@ public boolean isGridFtpConnectionValid() throws Exception { public void setMessageParsingResponse(boolean response) { - this.messageParsingResponse = new Boolean(response); + this.messageParsingResponse = Boolean.valueOf(response); } diff --git a/src/main/java/it/grid/storm/catalogs/AnonymousFileTransferData.java b/src/main/java/it/grid/storm/catalogs/AnonymousFileTransferData.java deleted file mode 100644 index 70da88a0b..000000000 --- a/src/main/java/it/grid/storm/catalogs/AnonymousFileTransferData.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TTURL; - -/** - * @author Michele Dibenedetto - * - */ - -public abstract class AnonymousFileTransferData extends - SurlMultyOperationRequestData implements FileTransferData { - - protected TURLPrefix transferProtocols; - protected TTURL transferURL; - - public AnonymousFileTransferData(TSURL toSURL, TURLPrefix transferProtocols, - TReturnStatus status, TTURL transferURL) - throws InvalidFileTransferDataAttributesException, - InvalidSurlRequestDataAttributesException { - - super(toSURL, status); - if (transferProtocols == null || transferURL == null) { - throw new InvalidFileTransferDataAttributesException(toSURL, - transferProtocols, status, transferURL); - } - this.transferProtocols = transferProtocols; - this.transferURL = transferURL; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.catalogs.FileTransferData#getTransferProtocols() - */ - @Override - public final TURLPrefix getTransferProtocols() { - - return transferProtocols; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.catalogs.FileTransferData#getTransferURL() - */ - @Override - public final TTURL getTransferURL() { - - return transferURL; - } - - /* - * (non-Javadoc) - * - * @see - * it.grid.storm.catalogs.FileTransferData#setTransferURL(it.grid.storm.srm - * .types.TTURL) - */ - @Override - public final void setTransferURL(final TTURL turl) { - - if (turl != null) { - transferURL = turl; - } - } -} diff --git a/src/main/java/it/grid/storm/catalogs/AnonymousPtGData.java b/src/main/java/it/grid/storm/catalogs/AnonymousPtGData.java deleted file mode 100644 index 615c590fe..000000000 --- a/src/main/java/it/grid/storm/catalogs/AnonymousPtGData.java +++ /dev/null @@ -1,236 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.srm.types.TDirOption; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TStatusCode; -import it.grid.storm.srm.types.TTURL; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class represents a PrepareToGetChunkData, that is part of a multifile - * PrepareToGet srm request. It contains data about: the requestToken, the - * fromSURL, the requested lifeTime of pinning, the TDirOption which tells - * whether the requested SURL is a directory and if it must be recursed at all - * levels, as well as the desired number of levels to recurse, the desired - * transferProtocols in order of preference, the fileSize, and the transferURL - * for the supplied SURL. - * - * @author EGRID - ICTP Trieste - * @date March 21st, 2005 - * @version 3.0 - */ -public class AnonymousPtGData extends AnonymousFileTransferData implements - PtGData { - - private static final Logger log = LoggerFactory - .getLogger(AnonymousPtGData.class); - - /** requested lifetime of TURL: it is the pin time! */ - protected TLifeTimeInSeconds pinLifeTime; - /** specifies if the request regards a directory and related info */ - protected TDirOption dirOption; - /** size of file */ - protected TSizeInBytes fileSize; - - /** - * @param requestToken - * @param fromSURL - * @param lifeTime - * @param dirOption - * @param desiredProtocols - * @param fileSize - * @param status - * @param transferURL - * @throws InvalidPtGDataAttributesException - */ - public AnonymousPtGData(TSURL SURL, TLifeTimeInSeconds lifeTime, - TDirOption dirOption, TURLPrefix desiredProtocols, TSizeInBytes fileSize, - TReturnStatus status, TTURL transferURL) - throws InvalidPtGDataAttributesException, - InvalidFileTransferDataAttributesException, - InvalidSurlRequestDataAttributesException { - - super(SURL, desiredProtocols, status, transferURL); - if (lifeTime == null || dirOption == null || fileSize == null) { - log.debug("Invalid arguments: lifeTime={}, dirOption={}, fileSize={}", - lifeTime, dirOption, fileSize); - throw new InvalidPtGDataAttributesException(SURL, lifeTime, dirOption, - desiredProtocols, fileSize, status, transferURL); - - } - this.pinLifeTime = lifeTime; - this.dirOption = dirOption; - this.fileSize = fileSize; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.catalogs.PtGData#getPinLifeTime() - */ - @Override - public TLifeTimeInSeconds getPinLifeTime() { - - return pinLifeTime; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.catalogs.PtGData#getDirOption() - */ - @Override - public TDirOption getDirOption() { - - return dirOption; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.catalogs.PtGData#getFileSize() - */ - @Override - public TSizeInBytes getFileSize() { - - return fileSize; - } - - /* - * (non-Javadoc) - * - * @see - * it.grid.storm.catalogs.PtGData#setFileSize(it.grid.storm.srm.types.TSizeInBytes - * ) - */ - @Override - public void setFileSize(TSizeInBytes size) { - - if (size != null) { - fileSize = size; - } - } - - /* - * (non-Javadoc) - * - * @see - * it.grid.storm.catalogs.PtGData#changeStatusSRM_FILE_PINNED(java.lang.String - * ) - */ - @Override - public void changeStatusSRM_FILE_PINNED(String explanation) { - - setStatus(TStatusCode.SRM_FILE_PINNED, explanation); - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder.append("PtGChunkData [pinLifeTime="); - builder.append(pinLifeTime); - builder.append(", dirOption="); - builder.append(dirOption); - builder.append(", fileSize="); - builder.append(fileSize); - builder.append(", transferProtocols="); - builder.append(transferProtocols); - builder.append(", SURL="); - builder.append(SURL); - builder.append(", status="); - builder.append(status); - builder.append(", transferURL="); - builder.append(transferURL); - builder.append("]"); - return builder.toString(); - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#hashCode() - */ - @Override - public int hashCode() { - - final int prime = 31; - int result = super.hashCode(); - result = prime * result + ((dirOption == null) ? 0 : dirOption.hashCode()); - result = prime * result + ((fileSize == null) ? 0 : fileSize.hashCode()); - result = prime * result - + ((pinLifeTime == null) ? 0 : pinLifeTime.hashCode()); - return result; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#equals(java.lang.Object) - */ - @Override - public boolean equals(Object obj) { - - if (this == obj) { - return true; - } - if (!super.equals(obj)) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - AnonymousPtGData other = (AnonymousPtGData) obj; - if (dirOption == null) { - if (other.dirOption != null) { - return false; - } - } else if (!dirOption.equals(other.dirOption)) { - return false; - } - if (fileSize == null) { - if (other.fileSize != null) { - return false; - } - } else if (!fileSize.equals(other.fileSize)) { - return false; - } - if (pinLifeTime == null) { - if (other.pinLifeTime != null) { - return false; - } - } else if (!pinLifeTime.equals(other.pinLifeTime)) { - return false; - } - return true; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/AnonymousPtPData.java b/src/main/java/it/grid/storm/catalogs/AnonymousPtPData.java deleted file mode 100644 index dca2d5af5..000000000 --- a/src/main/java/it/grid/storm/catalogs/AnonymousPtPData.java +++ /dev/null @@ -1,240 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TSpaceToken; -import it.grid.storm.srm.types.TStatusCode; -import it.grid.storm.srm.types.TTURL; - -/** - * @author Michele Dibenedetto - * - */ -public class AnonymousPtPData extends AnonymousFileTransferData implements - PtPData { - - private static final Logger log = LoggerFactory.getLogger(AnonymousPtPData.class); - - protected TSpaceToken spaceToken; - protected TLifeTimeInSeconds pinLifetime; - protected TLifeTimeInSeconds fileLifetime; - protected TFileStorageType fileStorageType; - protected TOverwriteMode overwriteOption; - protected TSizeInBytes expectedFileSize; - - public AnonymousPtPData(TSURL toSURL, TLifeTimeInSeconds pinLifetime, - TLifeTimeInSeconds fileLifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TSizeInBytes expectedFileSize, - TURLPrefix transferProtocols, TOverwriteMode overwriteOption, - TReturnStatus status, TTURL transferURL) - throws InvalidPtPDataAttributesException, - InvalidFileTransferDataAttributesException, - InvalidSurlRequestDataAttributesException { - - super(toSURL, transferProtocols, status, transferURL); - if (pinLifetime == null || fileLifetime == null || spaceToken == null - || fileStorageType == null || expectedFileSize == null - || overwriteOption == null) { - log.debug("Invalid arguments: pinLifetime={}, fileLifetime={}, " - + "spaceToken={}, fileStorageType={}, expectedFileSize={}, " - + "overwriteOption={}", pinLifetime, fileLifetime, spaceToken, - fileStorageType, expectedFileSize, overwriteOption); - throw new InvalidPtPDataAttributesException(toSURL, pinLifetime, - fileLifetime, fileStorageType, spaceToken, expectedFileSize, - transferProtocols, overwriteOption, status, transferURL); - } - this.spaceToken = spaceToken; - this.pinLifetime = pinLifetime; - this.fileLifetime = fileLifetime; - this.fileStorageType = fileStorageType; - this.expectedFileSize = expectedFileSize; - this.overwriteOption = overwriteOption; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.catalogs.PtPData#getSpaceToken() - */ - @Override - public final TSpaceToken getSpaceToken() { - - return spaceToken; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.catalogs.PtPData#pinLifetime() - */ - @Override - public TLifeTimeInSeconds pinLifetime() { - - return pinLifetime; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.catalogs.PtPData#fileLifetime() - */ - @Override - public TLifeTimeInSeconds fileLifetime() { - - return fileLifetime; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.catalogs.PtPData#fileStorageType() - */ - @Override - public TFileStorageType fileStorageType() { - - return fileStorageType; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.catalogs.PtPData#expectedFileSize() - */ - @Override - public TSizeInBytes expectedFileSize() { - - return expectedFileSize; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.catalogs.PtPData#overwriteOption() - */ - @Override - public TOverwriteMode overwriteOption() { - - return overwriteOption; - } - - /** - * Method that sets the status of this request to SRM_SPACE_AVAILABLE; it - * needs the explanation String which describes the situation in greater - * detail; if a null is passed, then an empty String is used as explanation. - */ - @Override - public void changeStatusSRM_SPACE_AVAILABLE(String explanation) { - - setStatus(TStatusCode.SRM_SPACE_AVAILABLE, explanation); - } - - /** - * Method that sets the status of this request to SRM_DUPLICATION_ERROR; it - * needs the explanation String which describes the situation in greater - * detail; if a null is passed, then an empty String is used as explanation. - */ - @Override - public void changeStatusSRM_DUPLICATION_ERROR(String explanation) { - - setStatus(TStatusCode.SRM_DUPLICATION_ERROR, explanation); - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("PtPChunkData\n"); - sb.append("toSURL="); - sb.append(SURL); - sb.append("; "); - sb.append("pinLifetime="); - sb.append(pinLifetime); - sb.append("; "); - sb.append("fileLifetime="); - sb.append(fileLifetime); - sb.append("; "); - sb.append("fileStorageType="); - sb.append(fileStorageType); - sb.append("; "); - sb.append("spaceToken="); - sb.append(spaceToken); - sb.append("; "); - sb.append("expectedFileSize="); - sb.append(expectedFileSize); - sb.append("; "); - sb.append("transferProtocols="); - sb.append(transferProtocols); - sb.append("; "); - sb.append("overwriteOption="); - sb.append(overwriteOption); - sb.append("; "); - sb.append("status="); - sb.append(status); - sb.append("; "); - sb.append("transferURL="); - sb.append(transferURL); - sb.append("; "); - return sb.toString(); - } - - @Override - public int hashCode() { - - int hash = 17; - hash = 37 * hash + SURL.hashCode(); - hash = 37 * hash + pinLifetime.hashCode(); - hash = 37 * hash + fileLifetime.hashCode(); - hash = 37 * hash + fileStorageType.hashCode(); - hash = 37 * hash + spaceToken.hashCode(); - hash = 37 * hash + expectedFileSize.hashCode(); - hash = 37 * hash + transferProtocols.hashCode(); - hash = 37 * hash + overwriteOption.hashCode(); - hash = 37 * hash + status.hashCode(); - hash = 37 * hash + transferURL.hashCode(); - return hash; - } - - @Override - public boolean equals(Object o) { - - if (o == this) { - return true; - } - if (!(o instanceof AnonymousPtPData)) { - return false; - } - AnonymousPtPData cd = (AnonymousPtPData) o; - return SURL.equals(cd.SURL) && pinLifetime.equals(cd.pinLifetime) - && fileLifetime.equals(cd.fileLifetime) - && fileStorageType.equals(cd.fileStorageType) - && spaceToken.equals(cd.spaceToken) - && expectedFileSize.equals(cd.expectedFileSize) - && transferProtocols.equals(cd.transferProtocols) - && overwriteOption.equals(cd.overwriteOption) && status.equals(cd.status) - && transferURL.equals(cd.transferURL); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/BoLChunkCatalog.java b/src/main/java/it/grid/storm/catalogs/BoLChunkCatalog.java index 31b6a1407..78e8fda9c 100644 --- a/src/main/java/it/grid/storm/catalogs/BoLChunkCatalog.java +++ b/src/main/java/it/grid/storm/catalogs/BoLChunkCatalog.java @@ -17,14 +17,31 @@ package it.grid.storm.catalogs; +import static it.grid.storm.srm.types.TStatusCode.SRM_FAILURE; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.common.types.SizeUnit; import it.grid.storm.common.types.TURLPrefix; import it.grid.storm.common.types.TimeUnit; import it.grid.storm.config.Configuration; -import it.grid.storm.griduser.GridUserInterface; -// import it.grid.storm.namespace.SurlStatusStore; +import it.grid.storm.persistence.converter.PinLifetimeConverter; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.converter.TransferProtocolListConverter; +import it.grid.storm.persistence.dao.BoLChunkDAO; +import it.grid.storm.persistence.exceptions.InvalidReducedBoLChunkDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.persistence.impl.mysql.BoLChunkDAOMySql; +import it.grid.storm.persistence.model.BoLChunkDataTO; +import it.grid.storm.persistence.model.BoLPersistentChunkData; +import it.grid.storm.persistence.model.ReducedBoLChunkData; +import it.grid.storm.persistence.model.ReducedBoLChunkDataTO; import it.grid.storm.srm.types.InvalidTDirOptionAttributesException; -import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException; import it.grid.storm.srm.types.InvalidTSURLAttributesException; import it.grid.storm.srm.types.InvalidTSizeAttributesException; import it.grid.storm.srm.types.TDirOption; @@ -36,20 +53,10 @@ import it.grid.storm.srm.types.TStatusCode; import it.grid.storm.srm.types.TTURL; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.Timer; -import java.util.TimerTask; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** - * Class that represents StoRMs BoLChunkCatalog: it collects BoLChunkData and - * provides methods for looking up a BoLChunkData based on TRequestToken, as - * well as for adding a new entry and removing an existing one. + * Class that represents StoRMs BoLChunkCatalog: it collects BoLChunkData and provides methods for + * looking up a BoLChunkData based on TRequestToken, as well as for adding a new entry and removing + * an existing one. * * @author CNAF * @date Aug 2009 @@ -57,762 +64,306 @@ */ public class BoLChunkCatalog { - private static final Logger log = LoggerFactory - .getLogger(BoLChunkCatalog.class); - - /* only instance of BoLChunkCatalog present in StoRM! */ - private static final BoLChunkCatalog cat = new BoLChunkCatalog(); - private final BoLChunkDAO dao = BoLChunkDAO.getInstance(); - - /* - * Timer object in charge of transiting expired requests from SRM_FILE_PINNED - * to SRM_RELEASED! - */ - private final Timer transiter = new Timer(); - /* Delay time before starting cleaning thread! */ - private final long delay = Configuration.getInstance() - .getTransitInitialDelay() * 1000; - /* Period of execution of cleaning! */ - private final long period = Configuration.getInstance() - .getTransitTimeInterval() * 1000; - - /** - * Private constructor that starts the internal timer needed to periodically - * check and transit requests whose pinLifetime has expired and are in - * SRM_FILE_PINNED, to SRM_RELEASED. - */ - private BoLChunkCatalog() { - - TimerTask transitTask = new TimerTask() { - - @Override - public void run() { - - transitExpiredSRM_SUCCESS(); - } - }; - transiter.scheduleAtFixedRate(transitTask, delay, period); - } - - /** - * Method that returns the only instance of BoLChunkCatalog available. - */ - public static BoLChunkCatalog getInstance() { - - return cat; - } - - /** - * Method that returns a Collection of BoLChunkData Objects matching the - * supplied TRequestToken. - * - * If any of the data associated to the TRequestToken is not well formed and - * so does not allow a BoLChunkData Object to be created, then that part of - * the request is dropped and gets logged, and the processing continues with - * the next part. All valid chunks get returned: the others get dropped. - * - * If there are no chunks to process then an empty Collection is returned, and - * a message gets logged. - */ - synchronized public Collection lookup(TRequestToken rt) { - - Collection chunkCollection = dao.find(rt); - log.debug("BoL CHUNK CATALOG: retrieved data {}", chunkCollection); - List list = new ArrayList(); - - if (chunkCollection.isEmpty()) { - log.warn("BoL CHUNK CATALOG! No chunks found in persistence for specified " - + "request: {}", rt); - return list; - } - - BoLPersistentChunkData chunk; - for (BoLChunkDataTO chunkTO : chunkCollection) { - chunk = makeOne(chunkTO, rt); - if (chunk == null) { - continue; - } - list.add(chunk); - if (isComplete(chunkTO)) { - continue; - } - try { - dao.updateIncomplete(completeTO(chunkTO, chunk)); - } catch (InvalidReducedBoLChunkDataAttributesException e) { - log.warn("BoL CHUNK CATALOG! unable to add missing informations on DB " - + "to the request: {}", e.getMessage()); - } - } - log.debug("BoL CHUNK CATALOG: returning " + list); - return list; - } - - /** - * Generates a BoLChunkData from the received BoLChunkDataTO - * - * @param auxTO - * @param rt - * @return - */ - private BoLPersistentChunkData makeOne(BoLChunkDataTO auxTO, TRequestToken rt) { - - StringBuilder errorSb = new StringBuilder(); - TSURL fromSURL = null; - try { - fromSURL = TSURL.makeFromStringValidate(auxTO.getFromSURL()); - } catch (InvalidTSURLAttributesException e) { - errorSb.append(e); - } - if (auxTO.normalizedStFN() != null) { - fromSURL.setNormalizedStFN(auxTO.normalizedStFN()); - } - if (auxTO.sulrUniqueID() != null) { - fromSURL.setUniqueID(auxTO.sulrUniqueID().intValue()); - } - // lifeTime - TLifeTimeInSeconds lifeTime = null; - try { - long pinLifeTime = PinLifetimeConverter.getInstance().toStoRM( - auxTO.getLifeTime()); - // Check for max value allowed - long max = Configuration.getInstance().getPinLifetimeMaximum(); - if (pinLifeTime > max) { - log.warn("PinLifeTime is greater than the max value allowed. " - + "Drop the value to the max = {} seconds", max); - pinLifeTime = max; - } - lifeTime = TLifeTimeInSeconds.make(pinLifeTime, TimeUnit.SECONDS); - } catch (IllegalArgumentException e) { - errorSb.append("\n"); - errorSb.append(e); - } - // dirOption - TDirOption dirOption = null; - try { - dirOption = new TDirOption(auxTO.getDirOption(), - auxTO.getAllLevelRecursive(), auxTO.getNumLevel()); - } catch (InvalidTDirOptionAttributesException e) { - errorSb.append("\n"); - errorSb.append(e); - } - // transferProtocols - TURLPrefix transferProtocols = TransferProtocolListConverter.toSTORM(auxTO - .getProtocolList()); - if (transferProtocols.size() == 0) { - errorSb.append("\nEmpty list of TransferProtocols or" - + " could not translate TransferProtocols!"); - /* fail construction of BoLChunkData! */ - transferProtocols = null; - } - // fileSize - TSizeInBytes fileSize = null; - try { - fileSize = TSizeInBytes.make(auxTO.getFileSize(), SizeUnit.BYTES); - } catch (InvalidTSizeAttributesException e) { - errorSb.append("\n"); - errorSb.append(e); - } - // status - TReturnStatus status = null; - TStatusCode code = StatusCodeConverter.getInstance().toSTORM( - auxTO.getStatus()); - if (code == TStatusCode.EMPTY) { - errorSb.append("\nRetrieved StatusCode was not recognised: " - + auxTO.getStatus()); - } else { - status = new TReturnStatus(code, auxTO.getErrString()); - } - // transferURL - /* - * whatever is read is just meaningless because BoL will fill it in!!! So - * create an Empty TTURL by default! Vital to avoid problems with unknown - * DPM NULL/EMPTY logic policy! - */ - TTURL transferURL = TTURL.makeEmpty(); - // make BoLChunkData - BoLPersistentChunkData aux = null; - try { - aux = new BoLPersistentChunkData(rt, fromSURL, lifeTime, dirOption, - transferProtocols, fileSize, status, transferURL, - auxTO.getDeferredStartTime()); - aux.setPrimaryKey(auxTO.getPrimaryKey()); - } catch (InvalidSurlRequestDataAttributesException e) { - dao.signalMalformedBoLChunk(auxTO); - log.warn("BoL CHUNK CATALOG! Retrieved malformed BoL " - + "chunk data from persistence. Dropping chunk from request {}", rt); - log.warn(e.getMessage(), e); - log.warn(errorSb.toString()); - } - // end... - return aux; - } - - /** - * - * Adds to the received BoLChunkDataTO the normalized StFN and the SURL unique - * ID taken from the BoLChunkData - * - * @param chunkTO - * @param chunk - */ - private void completeTO(ReducedBoLChunkDataTO chunkTO, - final ReducedBoLChunkData chunk) { - - chunkTO.setNormalizedStFN(chunk.fromSURL().normalizedStFN()); - chunkTO.setSurlUniqueID(new Integer(chunk.fromSURL().uniqueId())); - } - - /** - * - * Creates a ReducedBoLChunkDataTO from the received BoLChunkDataTO and - * completes it with the normalized StFN and the SURL unique ID taken from the - * PtGChunkData - * - * @param chunkTO - * @param chunk - * @return - * @throws InvalidReducedBoLChunkDataAttributesException - */ - private ReducedBoLChunkDataTO completeTO(BoLChunkDataTO chunkTO, - final BoLPersistentChunkData chunk) - throws InvalidReducedBoLChunkDataAttributesException { - - ReducedBoLChunkDataTO reducedChunkTO = this.reduce(chunkTO); - this.completeTO(reducedChunkTO, this.reduce(chunk)); - return reducedChunkTO; - } - - /** - * Creates a ReducedBoLChunkData from the data contained in the received - * BoLChunkData - * - * @param chunk - * @return - * @throws InvalidReducedBoLChunkDataAttributesException - */ - private ReducedBoLChunkData reduce(BoLPersistentChunkData chunk) - throws InvalidReducedBoLChunkDataAttributesException { - - ReducedBoLChunkData reducedChunk = new ReducedBoLChunkData(chunk.getSURL(), - chunk.getStatus()); - reducedChunk.setPrimaryKey(chunk.getPrimaryKey()); - return reducedChunk; - } - - /** - * Creates a ReducedBoLChunkDataTO from the data contained in the received - * BoLChunkDataTO - * - * @param chunkTO - * @return - */ - private ReducedBoLChunkDataTO reduce(BoLChunkDataTO chunkTO) { - - ReducedBoLChunkDataTO reducedChunkTO = new ReducedBoLChunkDataTO(); - reducedChunkTO.setPrimaryKey(chunkTO.getPrimaryKey()); - reducedChunkTO.setFromSURL(chunkTO.getFromSURL()); - reducedChunkTO.setNormalizedStFN(chunkTO.normalizedStFN()); - reducedChunkTO.setSurlUniqueID(chunkTO.sulrUniqueID()); - reducedChunkTO.setStatus(chunkTO.getStatus()); - reducedChunkTO.setErrString(chunkTO.getErrString()); - return reducedChunkTO; - } - - /** - * Checks if the received BoLChunkDataTO contains the fields not set by the - * front end but required - * - * @param chunkTO - * @return - */ - private boolean isComplete(BoLChunkDataTO chunkTO) { - - return (chunkTO.normalizedStFN() != null) - && (chunkTO.sulrUniqueID() != null); - } - - /** - * Checks if the received ReducedBoLChunkDataTO contains the fields not set by - * the front end but required - * - * @param reducedChunkTO - * @return - */ - // TODO MICHELE USER_SURL new method - private boolean isComplete(ReducedBoLChunkDataTO reducedChunkTO) { - - return (reducedChunkTO.normalizedStFN() != null) - && (reducedChunkTO.surlUniqueID() != null); - } - - /** - * Method used to update into Persistence a retrieved BoLChunkData. In case - * any error occurs, the operation does not proceed but no Exception is - * thrown. Error messages get logged. - * - * Only fileSize, StatusCode, errString and transferURL are updated. Likewise - * for the request pinLifetime. - */ - synchronized public void update(BoLPersistentChunkData cd) { - - BoLChunkDataTO to = new BoLChunkDataTO(); - /* Primary key needed by DAO Object */ - to.setPrimaryKey(cd.getPrimaryKey()); - to.setFileSize(cd.getFileSize().value()); - to.setStatus(StatusCodeConverter.getInstance().toDB( - cd.getStatus().getStatusCode())); - to.setErrString(cd.getStatus().getExplanation()); - to.setLifeTime(PinLifetimeConverter.getInstance().toDB( - cd.getLifeTime().value())); - // TODO MICHELE USER_SURL fill new fields - to.setNormalizedStFN(cd.getSURL().normalizedStFN()); - to.setSurlUniqueID(new Integer(cd.getSURL().uniqueId())); - - dao.update(to); - // TODO MICHELE SURL STORE - // SurlStatusStore.getInstance().storeSurlStatus(cd.getSURL(), - // cd.getStatus().getStatusCode()); - } - - /** - * Refresh method. TODO THIS IS A WORK IN PROGRESS!!!! This method have to - * synch the ChunkData information with the database status. - * - * @param auxTO - * @param BoLPersistentChunkData - * inputChunk - * @return BoLChunkData outputChunk - */ - synchronized public BoLPersistentChunkData refreshStatus( - BoLPersistentChunkData inputChunk) { - - /* Currently not used */ - // Call the dao refresh method to synch with the db status - BoLChunkDataTO auxTO = dao.refresh(inputChunk.getPrimaryKey()); - - log.debug("BoL CHUNK CATALOG: retrieved data {}", auxTO); - if (auxTO == null) { - log.warn("BoL CHUNK CATALOG! Empty TO found in persistence for specified " - + "request: {}", inputChunk.getPrimaryKey()); - return inputChunk; - } - - /* - * In this first version the only field updated is the Status. Once - * updated, the new status is rewritten into the input ChunkData - */ - - // status - TReturnStatus status = null; - TStatusCode code = StatusCodeConverter.getInstance().toSTORM(auxTO.getStatus()); - if (code != TStatusCode.EMPTY) { - status = new TReturnStatus(code, auxTO.getErrString()); - } - inputChunk.setStatus(status); - return inputChunk; - } - - /** - * Method that returns a Collection of ReducedBoLChunkData Objects associated - * to the supplied TRequestToken. - * - * If any of the data retrieved for a given chunk is not well formed and so - * does not allow a ReducedBoLChunkData Object to be created, then that chunk - * is dropped and gets logged, while processing continues with the next one. - * All valid chunks get returned: the others get dropped. - * - * If there are no chunks associated to the given TRequestToken, then an empty - * Collection is returned and a messagge gets logged. - */ - synchronized public Collection lookupReducedBoLChunkData( - TRequestToken rt) { - - Collection reducedChunkDataTOs = dao.findReduced(rt - .getValue()); - log.debug("BoL CHUNK CATALOG: retrieved data {}", reducedChunkDataTOs); - ArrayList list = new ArrayList(); - if (reducedChunkDataTOs.isEmpty()) { - log.debug("BoL CHUNK CATALOG! No chunks found in persistence for {}", rt); - } else { - ReducedBoLChunkData reducedChunkData = null; - for (ReducedBoLChunkDataTO reducedChunkDataTO : reducedChunkDataTOs) { - reducedChunkData = makeOneReduced(reducedChunkDataTO); - if (reducedChunkData != null) { - list.add(reducedChunkData); - if (!this.isComplete(reducedChunkDataTO)) { - completeTO(reducedChunkDataTO, reducedChunkData); - dao.updateIncomplete(reducedChunkDataTO); - } - } - } - log.debug("BoL CHUNK CATALOG: returning {}", list); - } - return list; - } - - public Collection lookupReducedBoLChunkData( - TRequestToken requestToken, Collection surls) { - - int[] surlsUniqueIDs = new int[surls.size()]; - String[] surlsArray = new String[surls.size()]; - int index = 0; - for (TSURL tsurl : surls) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surlsArray[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOCollection = dao.findReduced( - requestToken, surlsUniqueIDs, surlsArray); - return buildReducedChunkDataList(chunkDataTOCollection); - } - - public Collection lookupBoLChunkData(TSURL surl, - GridUserInterface user) { - - return lookupBoLChunkData(Arrays.asList(new TSURL[] { surl }), user); - } - - public Collection lookupBoLChunkData(TSURL surl) { - - return lookupBoLChunkData(Arrays.asList(new TSURL[] { surl })); - } - - private Collection lookupBoLChunkData( - List surls, GridUserInterface user) { - - int[] surlsUniqueIDs = new int[surls.size()]; - String[] surlsArray = new String[surls.size()]; - int index = 0; - for (TSURL tsurl : surls) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surlsArray[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOCollection = dao.find(surlsUniqueIDs, - surlsArray, user.getDn()); - log.debug("BoL CHUNK CATALOG: retrieved data {}", chunkDataTOCollection); - return buildChunkDataList(chunkDataTOCollection); - } - - public Collection lookupBoLChunkData(List surls) { - - int[] surlsUniqueIDs = new int[surls.size()]; - String[] surlsArray = new String[surls.size()]; - int index = 0; - for (TSURL tsurl : surls) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surlsArray[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOCollection = dao.find(surlsUniqueIDs, - surlsArray); - log.debug("BoL CHUNK CATALOG: retrieved data {}", chunkDataTOCollection); - return buildChunkDataList(chunkDataTOCollection); - } - - private Collection buildChunkDataList( - Collection chunkDataTOCollection) { - - List list = new ArrayList(); - BoLPersistentChunkData chunk; - for (BoLChunkDataTO chunkTO : chunkDataTOCollection) { - chunk = makeOne(chunkTO); - if (chunk == null) { - continue; - } - list.add(chunk); - if (isComplete(chunkTO)) { - continue; - } - try { - dao.updateIncomplete(this.completeTO(chunkTO, chunk)); - } catch (InvalidReducedBoLChunkDataAttributesException e) { - log.warn("BoL CHUNK CATALOG! unable to add missing informations " - + "on DB to the request: {}", e.getMessage()); - } - } - log.debug("BoL CHUNK CATALOG: returning {}", list); - return list; - } - - private BoLPersistentChunkData makeOne(BoLChunkDataTO chunkTO) { - - try { - return makeOne(chunkTO, new TRequestToken(chunkTO.getRequestToken(), - chunkTO.getTimeStamp())); - } catch (InvalidTRequestTokenAttributesException e) { - throw new IllegalStateException( - "Unexpected InvalidTRequestTokenAttributesException in TRequestToken: " - + e); - } - } - - /** - * Method that returns a Collection of ReducedBoLChunkData Objects matching - * the supplied GridUser and Collection of TSURLs. - * - * If any of the data retrieved for a given chunk is not well formed and so - * does not allow a ReducedBoLChunkData Object to be created, then that chunk - * is dropped and gets logged, while processing continues with the next one. - * All valid chunks get returned: the others get dropped. - * - * If there are no chunks associated to the given GridUser and Collection of - * TSURLs, then an empty Collection is returned and a message gets logged. - */ - synchronized public Collection lookupReducedBoLChunkData( - GridUserInterface gu, Collection tsurlCollection) { - - int[] surlsUniqueIDs = new int[tsurlCollection.size()]; - String[] surls = new String[tsurlCollection.size()]; - int index = 0; - for (TSURL tsurl : tsurlCollection) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surls[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOCollection = dao.findReduced( - gu.getDn(), surlsUniqueIDs, surls); - log.debug("BoL CHUNK CATALOG: retrieved data {}", chunkDataTOCollection); - return buildReducedChunkDataList(chunkDataTOCollection); - } - - private Collection buildReducedChunkDataList( - Collection chunkDataTOCollection) { - - ArrayList list = new ArrayList(); - ReducedBoLChunkData reducedChunkData; - for (ReducedBoLChunkDataTO reducedChunkDataTO : chunkDataTOCollection) { - reducedChunkData = makeOneReduced(reducedChunkDataTO); - if (reducedChunkData != null) { - list.add(reducedChunkData); - if (!this.isComplete(reducedChunkDataTO)) { - this.completeTO(reducedChunkDataTO, reducedChunkData); - dao.updateIncomplete(reducedChunkDataTO); - } - } - } - log.debug("BoL CHUNK CATALOG: returning {}", list); - return list; - } - - /** - * @param auxTO - * @return - */ - private ReducedBoLChunkData makeOneReduced( - ReducedBoLChunkDataTO reducedChunkDataTO) { - - StringBuilder errorSb = new StringBuilder(); - // fromSURL - TSURL fromSURL = null; - try { - fromSURL = TSURL.makeFromStringValidate(reducedChunkDataTO.fromSURL()); - } catch (InvalidTSURLAttributesException e) { - errorSb.append(e); - } - if (reducedChunkDataTO.normalizedStFN() != null) { - fromSURL.setNormalizedStFN(reducedChunkDataTO.normalizedStFN()); - } - if (reducedChunkDataTO.surlUniqueID() != null) { - fromSURL.setUniqueID(reducedChunkDataTO.surlUniqueID().intValue()); - } - // status - TReturnStatus status = null; - TStatusCode code = StatusCodeConverter.getInstance().toSTORM( - reducedChunkDataTO.status()); - if (code == TStatusCode.EMPTY) { - errorSb.append("\nRetrieved StatusCode was not recognised: " - + reducedChunkDataTO.status()); - } else { - status = new TReturnStatus(code, reducedChunkDataTO.errString()); - } - // make ReducedBoLChunkData - ReducedBoLChunkData aux = null; - try { - aux = new ReducedBoLChunkData(fromSURL, status); - aux.setPrimaryKey(reducedChunkDataTO.primaryKey()); - } catch (InvalidReducedBoLChunkDataAttributesException e) { - log.warn("BoL CHUNK CATALOG! Retrieved malformed " - + "Reduced BoL chunk data from persistence: dropping reduced chunk..."); - log.warn(e.getMessage(), e); - log.warn(errorSb.toString()); - } - // end... - return aux; - } - - /** - * Method used to add into Persistence a new entry. The supplied BoLChunkData - * gets the primary key changed to the value assigned in Persistence. - * - * This method is intended to be used by a recursive BoL request: the parent - * request supplies a directory which must be expanded, so all new children - * requests resulting from the files in the directory are added into - * persistence. - * - * So this method does _not_ add a new SRM prepare_to_get request into the DB! - * - * The only children data written into the DB are: sourceSURL, TDirOption, - * statusCode and explanation. - * - * In case of any error the operation does not proceed, but no Exception is - * thrown! Proper messages get logged by underlaying DAO. - */ - synchronized public void addChild(BoLPersistentChunkData chunkData) { - - BoLChunkDataTO to = new BoLChunkDataTO(); - // needed for now to find ID of request! Must be changed soon! - to.setRequestToken(chunkData.getRequestToken().toString()); - to.setFromSURL(chunkData.getSURL().toString()); - to.setNormalizedStFN(chunkData.getSURL().normalizedStFN()); - to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId())); - - to.setAllLevelRecursive(chunkData.getDirOption().isAllLevelRecursive()); - to.setDirOption(chunkData.getDirOption().isDirectory()); - to.setNumLevel(chunkData.getDirOption().getNumLevel()); - to.setStatus(StatusCodeConverter.getInstance().toDB( - chunkData.getStatus().getStatusCode())); - to.setErrString(chunkData.getStatus().getExplanation()); - to.setDeferredStartTime(chunkData.getDeferredStartTime()); - - /* add the entry and update the Primary Key field */ - dao.addChild(to); - chunkData.setPrimaryKey(to.getPrimaryKey()); - } - - /** - * Method used to add into Persistence a new entry. The supplied BoLChunkData - * gets the primary key changed to the value assigned in the Persistence. The - * method requires the GridUser to whom associate the added request. - * - * This method is intended to be used by an srmCopy request in push mode which - * implies a local srmBoL. The only fields from BoLChunkData that are - * considered are: the requestToken, the sourceSURL, the pinLifetime, the - * dirOption, the protocolList, the status and error string. - * - * So this method _adds_ a new SRM prepare_to_get request into the DB! - * - * In case of any error the operation does not proceed, but no Exception is - * thrown! The underlaying DAO logs proper error messages. - */ - synchronized public void add(BoLPersistentChunkData chunkData, - GridUserInterface gu) { - - /* Currently NOT used */ - BoLChunkDataTO to = new BoLChunkDataTO(); - to.setRequestToken(chunkData.getRequestToken().toString()); - to.setFromSURL(chunkData.getSURL().toString()); - // TODO MICHELE USER_SURL fill new fields - to.setNormalizedStFN(chunkData.getSURL().normalizedStFN()); - to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId())); - - to.setLifeTime(new Long(chunkData.getLifeTime().value()).intValue()); - to.setAllLevelRecursive(chunkData.getDirOption().isAllLevelRecursive()); - to.setDirOption(chunkData.getDirOption().isDirectory()); - to.setNumLevel(chunkData.getDirOption().getNumLevel()); - to.setProtocolList(TransferProtocolListConverter.toDB(chunkData - .getTransferProtocols())); - to.setStatus(StatusCodeConverter.getInstance().toDB( - chunkData.getStatus().getStatusCode())); - to.setErrString(chunkData.getStatus().getExplanation()); - to.setDeferredStartTime(chunkData.getDeferredStartTime()); - - /* add the entry and update the Primary Key field! */ - dao.addNew(to, gu.getDn()); - chunkData.setPrimaryKey(to.getPrimaryKey()); - } - - /** - * Method used to establish if in Persistence there is a BoLChunkData working - * on the supplied SURL, and whose state is SRM_FILE_PINNED, in which case - * true is returned. In case none are found or there is any problem, false is - * returned. This method is intended to be used by srmMv. - */ - synchronized public boolean isSRM_FILE_PINNED(TSURL surl) { - - return (dao.numberInSRM_SUCCESS(surl.uniqueId()) > 0); - } - - /** - * Method used to transit the specified Collection of ReducedBoLChunkData from - * SRM_FILE_PINNED to SRM_RELEASED. Chunks in any other starting state are not - * transited. In case of any error nothing is done, but proper error messages - * get logged by the underlaying DAO. - */ - synchronized public void transitSRM_SUCCESStoSRM_RELEASED( - Collection chunks, TRequestToken token) { - - if (chunks == null || chunks.isEmpty()) { - return; - } - - long[] primaryKeys = new long[chunks.size()]; - int index = 0; - for (ReducedBoLChunkData chunkData : chunks) { - if (chunkData != null) { - primaryKeys[index] = chunkData.primaryKey(); - index++; - } - } - dao.transitSRM_SUCCESStoSRM_RELEASED(primaryKeys, token); - } - - /** - * This method is intended to be used by srmRm to transit all BoL chunks on - * the given SURL which are in the SRM_FILE_PINNED state, to SRM_ABORTED. The - * supplied String will be used as explanation in those chunks return status. - * The global status of the request is _not_ changed. - * - * The TURL of those requests will automatically be set to empty. Notice that - * both removeAllJit(SURL) and removeVolatile(SURL) are automatically invoked - * on PinnedFilesCatalog, to remove any entry and corresponding physical ACLs. - * - * Beware, that the chunks may be part of requests that have finished, or that - * still have not finished because other chunks are being processed. - */ - synchronized public void transitSRM_SUCCESStoSRM_ABORTED(TSURL surl, - String explanation) { - - /* Currently NOT used */ - if (explanation == null) { - explanation = ""; - } - dao.transitSRM_SUCCESStoSRM_ABORTED(surl.uniqueId(), surl.toString(), - explanation); - } - - /** - * Method used to force transition to SRM_RELEASED from SRM_FILE_PINNED, of - * all BoL Requests whose pinLifetime has expired and the state still has not - * been changed (a user forgot to run srmReleaseFiles)! - */ - synchronized public void transitExpiredSRM_SUCCESS() { - - dao.transitExpiredSRM_SUCCESS(); - } - - public void updateFromPreviousStatus(TRequestToken requestToken, - TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation) { - - dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode, - newStatusCode, explanation); - } - - public void updateFromPreviousStatus(TRequestToken requestToken, - List surlList, TStatusCode expectedStatusCode, - TStatusCode newStatusCode) { - - int[] surlsUniqueIDs = new int[surlList.size()]; - String[] surls = new String[surlList.size()]; - int index = 0; - for (TSURL tsurl : surlList) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surls[index] = tsurl.rawSurl(); - index++; - } - dao.updateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls, - expectedStatusCode, newStatusCode); - } + private static final Logger log = LoggerFactory.getLogger(BoLChunkCatalog.class); + + private final BoLChunkDAO dao; + + private static BoLChunkCatalog instance; + + public static synchronized BoLChunkCatalog getInstance() { + if (instance == null) { + instance = new BoLChunkCatalog(); + } + return instance; + } + + /** + * Private constructor that starts the internal timer needed to periodically check and transit + * requests whose pinLifetime has expired and are in SRM_FILE_PINNED, to SRM_RELEASED. + */ + private BoLChunkCatalog() { + + dao = BoLChunkDAOMySql.getInstance(); + } + + /** + * Method that returns a Collection of BoLChunkData Objects matching the supplied TRequestToken. + * + * If any of the data associated to the TRequestToken is not well formed and so does not allow a + * BoLChunkData Object to be created, then that part of the request is dropped and gets logged, + * and the processing continues with the next part. All valid chunks get returned: the others get + * dropped. + * + * If there are no chunks to process then an empty Collection is returned, and a message gets + * logged. + */ + synchronized public Collection lookup(TRequestToken rt) { + + Collection chunkCollection = dao.find(rt); + log.debug("BoL CHUNK CATALOG: retrieved data {}", chunkCollection); + List list = new ArrayList(); + + if (chunkCollection.isEmpty()) { + log.warn("BoL CHUNK CATALOG! No chunks found in persistence for specified request: {}", rt); + return list; + } + + BoLPersistentChunkData chunk; + for (BoLChunkDataTO chunkTO : chunkCollection) { + chunk = makeOne(chunkTO, rt); + if (chunk == null) { + continue; + } + list.add(chunk); + if (isComplete(chunkTO)) { + continue; + } + try { + dao.updateIncomplete(completeTO(chunkTO, chunk)); + } catch (InvalidReducedBoLChunkDataAttributesException e) { + log.warn( + "BoL CHUNK CATALOG! unable to add missing informations on DB " + "to the request: {}", + e.getMessage()); + } + } + log.debug("BoL CHUNK CATALOG: returning " + list); + return list; + } + + /** + * Generates a BoLChunkData from the received BoLChunkDataTO + * + * @param auxTO + * @param rt + * @return + */ + private BoLPersistentChunkData makeOne(BoLChunkDataTO auxTO, TRequestToken rt) { + + StringBuilder errorSb = new StringBuilder(); + TSURL fromSURL = null; + try { + fromSURL = TSURL.makeFromStringValidate(auxTO.getFromSURL()); + } catch (InvalidTSURLAttributesException e) { + errorSb.append(e); + } + if (auxTO.normalizedStFN() != null) { + fromSURL.setNormalizedStFN(auxTO.normalizedStFN()); + } + if (auxTO.sulrUniqueID() != null) { + fromSURL.setUniqueID(auxTO.sulrUniqueID().intValue()); + } + // lifeTime + TLifeTimeInSeconds lifeTime = null; + try { + long pinLifeTime = PinLifetimeConverter.getInstance().toStoRM(auxTO.getLifeTime()); + // Check for max value allowed + long max = Configuration.getInstance().getPinLifetimeMaximum(); + if (pinLifeTime > max) { + log.warn("PinLifeTime is greater than the max value allowed. " + + "Drop the value to the max = {} seconds", max); + pinLifeTime = max; + } + lifeTime = TLifeTimeInSeconds.make(pinLifeTime, TimeUnit.SECONDS); + } catch (IllegalArgumentException e) { + errorSb.append("\n"); + errorSb.append(e); + } + // dirOption + TDirOption dirOption = null; + try { + dirOption = + new TDirOption(auxTO.getDirOption(), auxTO.getAllLevelRecursive(), auxTO.getNumLevel()); + } catch (InvalidTDirOptionAttributesException e) { + errorSb.append("\n"); + errorSb.append(e); + } + // transferProtocols + TURLPrefix transferProtocols = TransferProtocolListConverter.toSTORM(auxTO.getProtocolList()); + if (transferProtocols.size() == 0) { + errorSb + .append("\nEmpty list of TransferProtocols or" + " could not translate TransferProtocols!"); + /* fail construction of BoLChunkData! */ + transferProtocols = null; + } + // fileSize + TSizeInBytes fileSize = null; + try { + fileSize = TSizeInBytes.make(auxTO.getFileSize(), SizeUnit.BYTES); + } catch (InvalidTSizeAttributesException e) { + errorSb.append("\n"); + errorSb.append(e); + } + // status + TReturnStatus status = null; + TStatusCode code = StatusCodeConverter.getInstance().toSTORM(auxTO.getStatus()); + if (code == TStatusCode.EMPTY) { + errorSb.append("\nRetrieved StatusCode was not recognised: " + auxTO.getStatus()); + } else { + status = new TReturnStatus(code, auxTO.getErrString()); + } + // transferURL + /* + * whatever is read is just meaningless because BoL will fill it in!!! So create an Empty TTURL + * by default! Vital to avoid problems with unknown DPM NULL/EMPTY logic policy! + */ + TTURL transferURL = TTURL.makeEmpty(); + // make BoLChunkData + BoLPersistentChunkData aux = null; + try { + aux = new BoLPersistentChunkData(rt, fromSURL, lifeTime, dirOption, transferProtocols, + fileSize, status, transferURL, auxTO.getDeferredStartTime()); + aux.setPrimaryKey(auxTO.getPrimaryKey()); + } catch (InvalidSurlRequestDataAttributesException e) { + dao.updateStatus(auxTO, SRM_FAILURE, "Request is malformed!"); + log.warn("BoL CHUNK CATALOG! Retrieved malformed BoL " + + "chunk data from persistence. Dropping chunk from request {}", rt); + log.warn(e.getMessage(), e); + log.warn(errorSb.toString()); + } + // end... + return aux; + } + + /** + * + * Adds to the received BoLChunkDataTO the normalized StFN and the SURL unique ID taken from the + * BoLChunkData + * + * @param chunkTO + * @param chunk + */ + private void completeTO(ReducedBoLChunkDataTO chunkTO, final ReducedBoLChunkData chunk) { + + chunkTO.setNormalizedStFN(chunk.fromSURL().normalizedStFN()); + chunkTO.setSurlUniqueID(Integer.valueOf(chunk.fromSURL().uniqueId())); + } + + /** + * + * Creates a ReducedBoLChunkDataTO from the received BoLChunkDataTO and completes it with the + * normalized StFN and the SURL unique ID taken from the PtGChunkData + * + * @param chunkTO + * @param chunk + * @return + * @throws InvalidReducedBoLChunkDataAttributesException + */ + private ReducedBoLChunkDataTO completeTO(BoLChunkDataTO chunkTO, + final BoLPersistentChunkData chunk) throws InvalidReducedBoLChunkDataAttributesException { + + ReducedBoLChunkDataTO reducedChunkTO = this.reduce(chunkTO); + this.completeTO(reducedChunkTO, this.reduce(chunk)); + return reducedChunkTO; + } + + /** + * Creates a ReducedBoLChunkData from the data contained in the received BoLChunkData + * + * @param chunk + * @return + * @throws InvalidReducedBoLChunkDataAttributesException + */ + private ReducedBoLChunkData reduce(BoLPersistentChunkData chunk) + throws InvalidReducedBoLChunkDataAttributesException { + + ReducedBoLChunkData reducedChunk = new ReducedBoLChunkData(chunk.getSURL(), chunk.getStatus()); + reducedChunk.setPrimaryKey(chunk.getPrimaryKey()); + return reducedChunk; + } + + /** + * Creates a ReducedBoLChunkDataTO from the data contained in the received BoLChunkDataTO + * + * @param chunkTO + * @return + */ + private ReducedBoLChunkDataTO reduce(BoLChunkDataTO chunkTO) { + + ReducedBoLChunkDataTO reducedChunkTO = new ReducedBoLChunkDataTO(); + reducedChunkTO.setPrimaryKey(chunkTO.getPrimaryKey()); + reducedChunkTO.setFromSURL(chunkTO.getFromSURL()); + reducedChunkTO.setNormalizedStFN(chunkTO.normalizedStFN()); + reducedChunkTO.setSurlUniqueID(chunkTO.sulrUniqueID()); + reducedChunkTO.setStatus(chunkTO.getStatus()); + reducedChunkTO.setErrString(chunkTO.getErrString()); + return reducedChunkTO; + } + + /** + * Checks if the received BoLChunkDataTO contains the fields not set by the front end but required + * + * @param chunkTO + * @return + */ + private boolean isComplete(BoLChunkDataTO chunkTO) { + + return (chunkTO.normalizedStFN() != null) && (chunkTO.sulrUniqueID() != null); + } + + /** + * Method used to update into Persistence a retrieved BoLChunkData. In case any error occurs, the + * operation does not proceed but no Exception is thrown. Error messages get logged. + * + * Only fileSize, StatusCode, errString and transferURL are updated. Likewise for the request + * pinLifetime. + */ + synchronized public void update(BoLPersistentChunkData cd) { + + BoLChunkDataTO to = new BoLChunkDataTO(); + /* Primary key needed by DAO Object */ + to.setPrimaryKey(cd.getPrimaryKey()); + to.setFileSize(cd.getFileSize().value()); + to.setStatus(StatusCodeConverter.getInstance().toDB(cd.getStatus().getStatusCode())); + to.setErrString(cd.getStatus().getExplanation()); + to.setLifeTime(PinLifetimeConverter.getInstance().toDB(cd.getLifeTime().value())); + // TODO MICHELE USER_SURL fill new fields + to.setNormalizedStFN(cd.getSURL().normalizedStFN()); + to.setSurlUniqueID(Integer.valueOf(cd.getSURL().uniqueId())); + + dao.update(to); + } + + /** + * Method used to add into Persistence a new entry. The supplied BoLChunkData gets the primary key + * changed to the value assigned in Persistence. + * + * This method is intended to be used by a recursive BoL request: the parent request supplies a + * directory which must be expanded, so all new children requests resulting from the files in the + * directory are added into persistence. + * + * So this method does _not_ add a new SRM prepare_to_get request into the DB! + * + * The only children data written into the DB are: sourceSURL, TDirOption, statusCode and + * explanation. + * + * In case of any error the operation does not proceed, but no Exception is thrown! Proper + * messages get logged by underlaying DAO. + */ + synchronized public void addChild(BoLPersistentChunkData chunkData) { + + BoLChunkDataTO to = new BoLChunkDataTO(); + // needed for now to find ID of request! Must be changed soon! + to.setRequestToken(chunkData.getRequestToken().toString()); + to.setFromSURL(chunkData.getSURL().toString()); + to.setNormalizedStFN(chunkData.getSURL().normalizedStFN()); + to.setSurlUniqueID(Integer.valueOf(chunkData.getSURL().uniqueId())); + + to.setAllLevelRecursive(chunkData.getDirOption().isAllLevelRecursive()); + to.setDirOption(chunkData.getDirOption().isDirectory()); + to.setNumLevel(chunkData.getDirOption().getNumLevel()); + to.setStatus(StatusCodeConverter.getInstance().toDB(chunkData.getStatus().getStatusCode())); + to.setErrString(chunkData.getStatus().getExplanation()); + to.setDeferredStartTime(chunkData.getDeferredStartTime()); + + /* add the entry and update the Primary Key field */ + dao.addChild(to); + chunkData.setPrimaryKey(to.getPrimaryKey()); + } + + public void updateFromPreviousStatus(TRequestToken requestToken, TStatusCode expectedStatusCode, + TStatusCode newStatusCode, String explanation) { + + dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode, newStatusCode, explanation); + } } diff --git a/src/main/java/it/grid/storm/catalogs/BoLChunkDAO.java b/src/main/java/it/grid/storm/catalogs/BoLChunkDAO.java deleted file mode 100644 index 660340b6c..000000000 --- a/src/main/java/it/grid/storm/catalogs/BoLChunkDAO.java +++ /dev/null @@ -1,1701 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.config.Configuration; -import it.grid.storm.ea.StormEA; -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.StoRI; -import it.grid.storm.namespace.naming.SURL; -import it.grid.storm.srm.types.InvalidTSURLAttributesException; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TRequestType; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TStatusCode; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.SQLWarning; -import java.sql.Statement; -import java.sql.Timestamp; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Date; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map.Entry; -import java.util.Timer; -import java.util.TimerTask; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * DAO class for BoLChunkCatalog. This DAO is specifically designed to connect - * to a MySQL DB. The raw data found in those tables is pre-treated in order to - * turn it into the Object Model of StoRM. See Method comments for further info. - * BEWARE! DAO Adjusts for extra fields in the DB that are not present in the - * object model. - * - * @author CNAF - * @version 1.0 - * @date Aug 2009 - */ -public class BoLChunkDAO { - - private static final Logger log = LoggerFactory.getLogger(BoLChunkDAO.class); - - /** String with the name of the class for the DB driver */ - private final String driver = Configuration.getInstance().getDBDriver(); - /** String referring to the URL of the DB */ - private final String url = Configuration.getInstance().getStormDbURL(); - /** String with the password for the DB */ - private final String password = Configuration.getInstance().getDBPassword(); - /** String with the name for the DB */ - private final String name = Configuration.getInstance().getDBUserName(); - /** Connection to DB - WARNING!!! It is kept open all the time! */ - private Connection con = null; - private final static BoLChunkDAO dao = new BoLChunkDAO(); - - /** - * timer thread that will run a taask to alert when reconnecting is necessary! - */ - private Timer clock = null; - /** - * timer task that will update the boolean signaling that a reconnection is - * needed! - */ - private TimerTask clockTask = null; - /** milliseconds that must pass before reconnecting to DB */ - private final long period = Configuration.getInstance() - .getDBReconnectPeriod() * 1000; - /** initial delay in milliseconds before starting timer */ - private final long delay = Configuration.getInstance().getDBReconnectDelay() * 1000; - /** boolean that tells whether reconnection is needed because of MySQL bug! */ - private boolean reconnect = false; - - private BoLChunkDAO() { - - setUpConnection(); - - clock = new Timer(); - clockTask = new TimerTask() { - - @Override - public void run() { - - reconnect = true; - } - }; // clock task - clock.scheduleAtFixedRate(clockTask, delay, period); - } - - /** - * Method that returns the only instance of the BoLChunkDAO. - */ - public static BoLChunkDAO getInstance() { - - return dao; - } - - /** - * Method used to add a new record to the DB: the supplied BoLChunkDataTO gets - * its primaryKey changed to the one assigned by the DB. The supplied - * BoLChunkData is used to fill in only the DB table where file specific info - * gets recorded: it does _not_ add a new request! So if spurious data is - * supplied, it will just stay there because of a lack of a parent request! - */ - public synchronized void addChild(BoLChunkDataTO to) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: addChild - unable to get a valid connection!"); - return; - } - String str = null; - PreparedStatement id = null; // statement to find out the ID associated to - // the request token - ResultSet rsid = null; // result set containing the ID of the request. - // insertion - try { - - /* WARNING!!!! We are forced to run a query to get the ID of the request, - * which should NOT be so because the corresponding request object should - * have been changed with the extra field! However, it is not possible - * at the moment to perform such change because of strict deadline and - * the change could wreak havoc the code. So we are forced to make this - * query!!! - */ - - // begin transaction - con.setAutoCommit(false); - logWarnings(con.getWarnings()); - - // find ID of request corresponding to given RequestToken - str = "SELECT rq.ID FROM request_queue rq WHERE rq.r_token=?"; - - id = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - id.setString(1, to.getRequestToken()); - logWarnings(id.getWarnings()); - - log.debug("BoL CHUNK DAO: addChild; {}", id.toString()); - rsid = id.executeQuery(); - logWarnings(id.getWarnings()); - - /* ID of request in request_process! */ - int request_id = extractID(rsid); - int id_s = fillBoLTables(to, request_id); - - // end transaction! - con.commit(); - logWarnings(con.getWarnings()); - con.setAutoCommit(true); - logWarnings(con.getWarnings()); - - // update primary key reading the generated key - to.setPrimaryKey(id_s); - } catch (SQLException e) { - log.error("BoL CHUNK DAO: unable to complete addChild! BoLChunkDataTO: {}; " - + "exception received: {}", to, e.getMessage(), e); - rollback(con); - } catch (Exception e) { - log.error("BoL CHUNK DAO: unable to complete addChild! BoLChunkDataTO: {}; " - + "exception received: {}", to, e.getMessage(), e); - rollback(con); - } finally { - close(rsid); - close(id); - } - } - - /** - * Method used to add a new record to the DB: the supplied BoLChunkDataTO gets - * its primaryKey changed to the one assigned by the DB. The client_dn must - * also be supplied as a String. The supplied BoLChunkData is used to fill in - * all the DB tables where file specific info gets recorded: it _adds_ a new - * request! - */ - public synchronized void addNew(BoLChunkDataTO to, String client_dn) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: addNew - unable to get a valid connection!"); - return; - } - String str = null; - /* Result set containing the ID of the inserted new request */ - ResultSet rs_new = null; - /* Insert new request into process_request */ - PreparedStatement addNew = null; - /* Insert protocols for request. */ - PreparedStatement addProtocols = null; // insert protocols for request. - try { - // begin transaction - con.setAutoCommit(false); - logWarnings(con.getWarnings()); - - // add to request_queue... - str = "INSERT INTO request_queue (config_RequestTypeID,client_dn,pinLifetime,status,errstring,r_token,nbreqfiles,timeStamp,deferredStartTime) VALUES (?,?,?,?,?,?,?,?,?)"; - addNew = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS); - logWarnings(con.getWarnings()); - /* request type set to bring online */ - addNew.setString(1, - RequestTypeConverter.getInstance().toDB(TRequestType.BRING_ON_LINE)); - logWarnings(addNew.getWarnings()); - - addNew.setString(2, client_dn); - logWarnings(addNew.getWarnings()); - - addNew.setInt(3, to.getLifeTime()); - logWarnings(addNew.getWarnings()); - - addNew.setInt( - 4, - StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_REQUEST_INPROGRESS)); - logWarnings(addNew.getWarnings()); - - addNew.setString(5, "New BoL Request resulting from srmCopy invocation."); - logWarnings(addNew.getWarnings()); - - addNew.setString(6, to.getRequestToken()); - logWarnings(addNew.getWarnings()); - - addNew.setInt(7, 1); // number of requested files set to 1! - logWarnings(addNew.getWarnings()); - - addNew.setTimestamp(8, new Timestamp(new Date().getTime())); - logWarnings(addNew.getWarnings()); - - addNew.setInt(9, to.getDeferredStartTime()); - logWarnings(addNew.getWarnings()); - - log.trace("BoL CHUNK DAO: addNew; {}", addNew.toString()); - addNew.execute(); - logWarnings(addNew.getWarnings()); - - rs_new = addNew.getGeneratedKeys(); - int id_new = extractID(rs_new); - - // add protocols... - str = "INSERT INTO request_TransferProtocols (request_queueID,config_ProtocolsID) VALUES (?,?)"; - addProtocols = con.prepareStatement(str); - logWarnings(con.getWarnings()); - for (Iterator i = to.getProtocolList().iterator(); i.hasNext();) { - addProtocols.setInt(1, id_new); - logWarnings(addProtocols.getWarnings()); - - addProtocols.setString(2, i.next()); - logWarnings(addProtocols.getWarnings()); - - log.trace("BoL CHUNK DAO: addNew; {}", addProtocols.toString()); - addProtocols.execute(); - logWarnings(addProtocols.getWarnings()); - } - - // addChild... - int id_s = fillBoLTables(to, id_new); - - // end transaction! - con.commit(); - logWarnings(con.getWarnings()); - con.setAutoCommit(true); - logWarnings(con.getWarnings()); - - // update primary key reading the generated key - to.setPrimaryKey(id_s); - } catch (SQLException e) { - log.error("BoL CHUNK DAO: Rolling back! Unable to complete addNew! " - + "BoLChunkDataTO: {}; exception received: {}", to, e.getMessage(), e); - rollback(con); - } catch (Exception e) { - log.error("BoL CHUNK DAO: unable to complete addNew! BoLChunkDataTO: {}; " - + "exception received: {}", to, e.getMessage(), e); - rollback(con); - } finally { - close(rs_new); - close(addNew); - close(addProtocols); - } - } - - /** - * To be used inside a transaction - * - * @param to - * @param requestQueueID - * @return - * @throws SQLException - * @throws Exception - */ - private synchronized int fillBoLTables(BoLChunkDataTO to, int requestQueueID) - throws SQLException, Exception { - - String str = null; - /* Result set containing the ID of the inserted */ - ResultSet rs_do = null; - /* Result set containing the ID of the inserted */ - ResultSet rs_b = null; - /* Result set containing the ID of the inserted */ - ResultSet rs_s = null; - /* insert TDirOption for request */ - PreparedStatement addDirOption = null; - /* insert request_Bol for request */ - PreparedStatement addBoL = null; - PreparedStatement addChild = null; - - try { - // first fill in TDirOption - str = "INSERT INTO request_DirOption (isSourceADirectory,allLevelRecursive,numOfLevels) VALUES (?,?,?)"; - addDirOption = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS); - logWarnings(con.getWarnings()); - addDirOption.setBoolean(1, to.getDirOption()); - logWarnings(addDirOption.getWarnings()); - - addDirOption.setBoolean(2, to.getAllLevelRecursive()); - logWarnings(addDirOption.getWarnings()); - - addDirOption.setInt(3, to.getNumLevel()); - logWarnings(addDirOption.getWarnings()); - - log.trace("BoL CHUNK DAO: addNew; {}", addDirOption.toString()); - addDirOption.execute(); - logWarnings(addDirOption.getWarnings()); - - rs_do = addDirOption.getGeneratedKeys(); - int id_do = extractID(rs_do); - - // second fill in request_BoL... sourceSURL and TDirOption! - str = "INSERT INTO request_BoL (request_DirOptionID,request_queueID,sourceSURL,normalized_sourceSURL_StFN,sourceSURL_uniqueID) VALUES (?,?,?,?,?)"; - addBoL = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS); - logWarnings(con.getWarnings()); - addBoL.setInt(1, id_do); - logWarnings(addBoL.getWarnings()); - - addBoL.setInt(2, requestQueueID); - logWarnings(addBoL.getWarnings()); - - addBoL.setString(3, to.getFromSURL()); - logWarnings(addBoL.getWarnings()); - - addBoL.setString(4, to.normalizedStFN()); - logWarnings(addBoL.getWarnings()); - - addBoL.setInt(5, to.sulrUniqueID()); - logWarnings(addBoL.getWarnings()); - - log.trace("BoL CHUNK DAO: addNew; {}", addBoL.toString()); - addBoL.execute(); - logWarnings(addBoL.getWarnings()); - - rs_b = addBoL.getGeneratedKeys(); - int id_g = extractID(rs_b); - - // third fill in status_BoL... - str = "INSERT INTO status_BoL (request_BoLID,statusCode,explanation) VALUES (?,?,?)"; - addChild = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS); - logWarnings(con.getWarnings()); - addChild.setInt(1, id_g); - logWarnings(addChild.getWarnings()); - - addChild.setInt(2, to.getStatus()); - logWarnings(addChild.getWarnings()); - - addChild.setString(3, to.getErrString()); - logWarnings(addChild.getWarnings()); - - log.trace("BoL CHUNK DAO: addNew; " + addChild.toString()); - addChild.execute(); - logWarnings(addChild.getWarnings()); - - return id_g; - } finally { - close(rs_do); - close(rs_b); - close(rs_s); - close(addDirOption); - close(addBoL); - close(addChild); - } - } - - /** - * Method used to save the changes made to a retrieved BoLChunkDataTO, back - * into the MySQL DB. Only the fileSize, statusCode and explanation, of - * status_BoL table are written to the DB. Likewise for the request - * pinLifetime. In case of any error, an error message gets logged but no - * exception is thrown. - */ - public synchronized void update(BoLChunkDataTO to) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: update - unable to get a valid connection!"); - return; - } - PreparedStatement updateFileReq = null; - try { - // ready updateFileReq... - updateFileReq = con - .prepareStatement("UPDATE request_queue rq JOIN (status_BoL sb, request_BoL rb) ON (rq.ID=rb.request_queueID AND sb.request_BoLID=rb.ID)" - + " SET sb.fileSize=?, sb.statusCode=?, sb.explanation=?, rq.pinLifetime=?, rb.normalized_sourceSURL_StFN=?, rb.sourceSURL_uniqueID=?" - + " WHERE rb.ID=?"); - logWarnings(con.getWarnings()); - updateFileReq.setLong(1, to.getFileSize()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setInt(2, to.getStatus()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setString(3, to.getErrString()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setInt(4, to.getLifeTime()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setString(5, to.normalizedStFN()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setInt(6, to.sulrUniqueID()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setLong(7, to.getPrimaryKey()); - logWarnings(updateFileReq.getWarnings()); - // execute update - log.trace("BoL CHUNK DAO: update method; {}", updateFileReq.toString()); - updateFileReq.executeUpdate(); - logWarnings(updateFileReq.getWarnings()); - } catch (SQLException e) { - log.error("BoL CHUNK DAO: Unable to complete update! {}", e.getMessage(), e); - } finally { - close(updateFileReq); - } - } - - /** - * Updates the request_Bol represented by the received ReducedBoLChunkDataTO - * by setting its normalized_sourceSURL_StFN and sourceSURL_uniqueID - * - * @param chunkTO - */ - public synchronized void updateIncomplete(ReducedBoLChunkDataTO chunkTO) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: updateIncomplete - unable to get a valid connection!"); - return; - } - String str = "UPDATE request_BoL SET normalized_sourceSURL_StFN=?, " - + "sourceSURL_uniqueID=? WHERE ID=?"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - stmt.setString(1, chunkTO.normalizedStFN()); - logWarnings(stmt.getWarnings()); - - stmt.setInt(2, chunkTO.surlUniqueID()); - logWarnings(stmt.getWarnings()); - - stmt.setLong(3, chunkTO.primaryKey()); - logWarnings(stmt.getWarnings()); - - log.trace("BoL CHUNK DAO - update incomplete: {}", stmt.toString()); - stmt.executeUpdate(); - logWarnings(stmt.getWarnings()); - } catch (SQLException e) { - log.error("BoL CHUNK DAO: Unable to complete update incomplete! {}", - e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * TODO WARNING! THIS IS A WORK IN PROGRESS!!! Method used to refresh the - * BoLChunkDataTO information from the MySQL DB. In this first version, only - * the statusCode is reloaded from the DB. TODO The next version must contains - * all the information related to the Chunk! In case of any error, an error - * message gets logged but no exception is thrown. - */ - public synchronized BoLChunkDataTO refresh(long primary_key) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: refresh - unable to get a valid connection!"); - return null; - } - String str = null; - PreparedStatement find = null; - ResultSet rs = null; - - try { - // get chunks of the request - str = "SELECT statusCode " + "FROM status_BoL " - + "WHERE request_BoLID=?"; - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - find.setLong(1, primary_key); - - logWarnings(find.getWarnings()); - log.trace("BoL CHUNK DAO: refresh status method; " + find.toString()); - - rs = find.executeQuery(); - - logWarnings(find.getWarnings()); - BoLChunkDataTO aux = null; - while (rs.next()) { - aux = new BoLChunkDataTO(); - aux.setStatus(rs.getInt("statusCode")); - } - return aux; - } catch (SQLException e) { - log.error("BoL CHUNK DAO: {}", e.getMessage(), e); - return null; - } finally { - close(rs); - close(find); - } - } - - /** - * Method that queries the MySQL DB to find all entries matching the supplied - * TRequestToken. The Collection contains the corresponding BoLChunkDataTO - * objects. An initial simple query establishes the list of protocols - * associated with the request. A second complex query establishes all chunks - * associated with the request, by properly joining request_queue, - * request_BoL, status_BoL and request_DirOption. The considered fields are: - * (1) From status_BoL: the ID field which becomes the TOs primary key, and - * statusCode. (2) From request_BoL: sourceSURL (3) From request_queue: - * pinLifetime (4) From request_DirOption: isSourceADirectory, - * alLevelRecursive, numOfLevels In case of any error, a log gets written and - * an empty collection is returned. No exception is thrown. NOTE! Chunks in - * SRM_ABORTED status are NOT returned! - */ - public synchronized Collection find(TRequestToken requestToken) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: find - unable to get a valid connection!"); - return new ArrayList(); - } - String strToken = requestToken.toString(); - String str = null; - PreparedStatement find = null; - ResultSet rs = null; - try { - str = "SELECT tp.config_ProtocolsID " - + "FROM request_TransferProtocols tp JOIN request_queue rq ON tp.request_queueID=rq.ID " - + "WHERE rq.r_token=?"; - - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - ArrayList protocols = new ArrayList(); - find.setString(1, strToken); - logWarnings(find.getWarnings()); - - log.trace("BoL CHUNK DAO: find method; {}", find.toString()); - rs = find.executeQuery(); - logWarnings(find.getWarnings()); - while (rs.next()) { - protocols.add(rs.getString("tp.config_ProtocolsID")); - } - close(rs); - close(find); - - // get chunks of the request - str = "SELECT sb.statusCode, rq.timeStamp, rq.pinLifetime, rq.deferredStartTime, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, rb.sourceSURL_uniqueID, d.isSourceADirectory, d.allLevelRecursive, d.numOfLevels " - + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " - + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) " - + "LEFT JOIN request_DirOption d ON rb.request_DirOptionID=d.ID " - + "WHERE rq.r_token=? AND sb.statusCode<>?"; - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - ArrayList list = new ArrayList(); - find.setString(1, strToken); - logWarnings(find.getWarnings()); - - find.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED)); - logWarnings(find.getWarnings()); - - log.trace("BoL CHUNK DAO: find method; {}", find.toString()); - rs = find.executeQuery(); - logWarnings(find.getWarnings()); - - BoLChunkDataTO chunkDataTO = null; - while (rs.next()) { - chunkDataTO = new BoLChunkDataTO(); - chunkDataTO.setStatus(rs.getInt("sb.statusCode")); - chunkDataTO.setLifeTime(rs.getInt("rq.pinLifetime")); - chunkDataTO.setDeferredStartTime(rs.getInt("rq.deferredStartTime")); - chunkDataTO.setRequestToken(strToken); - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setPrimaryKey(rs.getLong("rb.ID")); - chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL")); - - chunkDataTO.setNormalizedStFN(rs - .getString("rb.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rb.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(new Integer(uniqueID)); - } - - chunkDataTO.setDirOption(rs.getBoolean("d.isSourceADirectory")); - chunkDataTO.setAllLevelRecursive(rs.getBoolean("d.allLevelRecursive")); - chunkDataTO.setNumLevel(rs.getInt("d.numOfLevels")); - chunkDataTO.setProtocolList(protocols); - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("BOL CHUNK DAO: {}", e.getMessage(), e); - /* Return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - /** - * Method that returns a Collection of ReducedBoLChunkDataTO associated to the - * given TRequestToken expressed as String. - */ - public synchronized Collection findReduced( - String reqtoken) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: findReduced - unable to get a valid connection!"); - return new ArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - try { - // get reduced chunks - String str = "SELECT sb.statusCode, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, rb.sourceSURL_uniqueID " - + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " - + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) " - + "WHERE rq.r_token=?"; - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - ArrayList list = new ArrayList(); - find.setString(1, reqtoken); - logWarnings(find.getWarnings()); - - log.trace("BoL CHUNK DAO! findReduced with request token; {}", find.toString()); - rs = find.executeQuery(); - logWarnings(find.getWarnings()); - - ReducedBoLChunkDataTO chunkDataTO = null; - while (rs.next()) { - chunkDataTO = new ReducedBoLChunkDataTO(); - chunkDataTO.setStatus(rs.getInt("sb.statusCode")); - chunkDataTO.setPrimaryKey(rs.getLong("rb.ID")); - chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL")); - chunkDataTO.setNormalizedStFN(rs - .getString("rb.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rb.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(uniqueID); - } - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("BOL CHUNK DAO: {}", e.getMessage(), e); - /* Return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - /** - * Method that returns a Collection of ReducedBoLChunkDataTO associated to the - * given griduser, and whose SURLs are contained in the supplied array of - * Strings. - */ - public synchronized Collection findReduced( - TRequestToken requestToken, int[] surlUniqueIDs, String[] surls) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: findReduced - unable to get a valid connection!"); - return new ArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - try { - /* - * NOTE: we search also on the fromSurl because otherwise we lost all - * request_Bol that have not the uniqueID set because are not yet been - * used by anybody - */ - // get reduced chunks - String str = "SELECT sb.statusCode, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, rb.sourceSURL_uniqueID " - + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " - + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) " - + "WHERE rq.r_token=? AND ( rb.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlUniqueIDs) - + " AND rb.sourceSURL IN " - + makeSurlString(surls) + " ) "; - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - ArrayList list = new ArrayList(); - find.setString(1, requestToken.getValue()); - logWarnings(find.getWarnings()); - - log.trace("BoL CHUNK DAO! findReduced with griduser+surlarray; {}", find.toString()); - rs = find.executeQuery(); - logWarnings(find.getWarnings()); - - ReducedBoLChunkDataTO chunkDataTO = null; - while (rs.next()) { - chunkDataTO = new ReducedBoLChunkDataTO(); - chunkDataTO.setStatus(rs.getInt("sb.statusCode")); - chunkDataTO.setPrimaryKey(rs.getLong("rb.ID")); - chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL")); - chunkDataTO.setNormalizedStFN(rs - .getString("rb.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rb.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(uniqueID); - } - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("BoL CHUNK DAO: {}", e.getMessage(), e); - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - /** - * Method that returns a Collection of ReducedBoLChunkDataTO associated to the - * given griduser, and whose SURLs are contained in the supplied array of - * Strings. - */ - public synchronized Collection findReduced( - String griduser, int[] surlUniqueIDs, String[] surls) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: findReduced - unable to get a valid connection!"); - return new ArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - try { - /* - * NOTE: we search also on the fromSurl because otherwise we lost all - * request_Bol that have not the uniqueID set because are not yet been - * used by anybody - */ - // get reduced chunks - String str = "SELECT sb.statusCode, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, rb.sourceSURL_uniqueID " - + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " - + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) " - + "WHERE rq.client_dn=? AND ( rb.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlUniqueIDs) - + " AND rb.sourceSURL IN " - + makeSurlString(surls) + " ) "; - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - ArrayList list = new ArrayList(); - find.setString(1, griduser); - logWarnings(find.getWarnings()); - - log.trace("BoL CHUNK DAO! findReduced with griduser+surlarray; {}", find.toString()); - rs = find.executeQuery(); - logWarnings(find.getWarnings()); - - ReducedBoLChunkDataTO chunkDataTO = null; - while (rs.next()) { - chunkDataTO = new ReducedBoLChunkDataTO(); - chunkDataTO.setStatus(rs.getInt("sb.statusCode")); - chunkDataTO.setPrimaryKey(rs.getLong("rb.ID")); - chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL")); - chunkDataTO.setNormalizedStFN(rs - .getString("rb.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rb.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(uniqueID); - } - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("BoL CHUNK DAO: {}", e.getMessage(), e); - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - /** - * Method that returns the number of BoL requests on the given SURL, that are - * in SRM_SUCCESS state. This method is intended to be used by BoLChunkCatalog - * in the isSRM_SUCCESS method invocation. In case of any error, 0 is - * returned. - */ - public synchronized int numberInSRM_SUCCESS(int surlUniqueID) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: numberInSRM_SUCCESS - unable to get a valid connection!"); - return 0; - } - String str = "SELECT COUNT(rb.ID) " - + "FROM status_BoL sb JOIN request_BoL rb " - + "ON (sb.request_BoLID=rb.ID) " - + "WHERE rb.sourceSURL_uniqueID=? AND sb.statusCode=?"; - PreparedStatement find = null; - ResultSet rs = null; - try { - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - /* Prepared statement spares DB-specific String notation! */ - find.setInt(1, surlUniqueID); - logWarnings(find.getWarnings()); - - find.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS)); - logWarnings(find.getWarnings()); - - log.trace("BoL CHUNK DAO - numberInSRM_SUCCESS method: {}", find.toString()); - rs = find.executeQuery(); - logWarnings(find.getWarnings()); - - int numberFileSuccessful = 0; - if (rs.next()) { - numberFileSuccessful = rs.getInt(1); - } - return numberFileSuccessful; - } catch (SQLException e) { - log.error("BoL CHUNK DAO! Unable to determine numberInSRM_SUCCESS! " - + "Returning 0! ", e.getMessage(), e); - return 0; - } finally { - close(rs); - close(find); - } - } - - /** - * Method used in extraordinary situations to signal that data retrieved from - * the DB was malformed and could not be translated into the StoRM object - * model. This method attempts to change the status of the request to - * SRM_FAILURE and record it in the DB. This operation could potentially fail - * because the source of the malformed problems could be a problematic DB; - * indeed, initially only log messages where recorded. Yet it soon became - * clear that the source of malformed data were the clients and/or FE - * recording info in the DB. In these circumstances the client would see its - * request as being in the SRM_IN_PROGRESS state for ever. Hence the pressing - * need to inform it of the encountered problems. - */ - public synchronized void signalMalformedBoLChunk(BoLChunkDataTO auxTO) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: signalMalformedBoLChunk - unable to get a valid connection!"); - return; - } - String signalSQL = "UPDATE status_BoL SET statusCode=" - + StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FAILURE) - + ", explanation=? WHERE request_BoLID=" + auxTO.getPrimaryKey(); - PreparedStatement signal = null; - try { - signal = con.prepareStatement(signalSQL); - logWarnings(con.getWarnings()); - /* Prepared statement spares DB-specific String notation! */ - signal.setString(1, "Request is malformed!"); - logWarnings(signal.getWarnings()); - - log.trace("BoL CHUNK DAO: signalMalformed; {}", signal.toString()); - signal.executeUpdate(); - logWarnings(signal.getWarnings()); - } catch (SQLException e) { - log.error("BoLChunkDAO! Unable to signal in DB that the request was " - + "malformed! Request: {}; Exception: {}", auxTO.toString(), - e.toString(), e); - } finally { - close(signal); - } - } - - /** - * Method that updates all expired requests in SRM_SUCCESS state, into - * SRM_RELEASED. This is needed when the client forgets to invoke - * srmReleaseFiles(). - * - * @return - */ - public synchronized List transitExpiredSRM_SUCCESS() { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: transitExpiredSRM_SUCCESS - unable to get a valid connection!"); - return new ArrayList(); - } - - HashMap expiredSurlMap = new HashMap(); - String str = null; - PreparedStatement prepStatement = null; - - /* Find all expired surls */ - try { - // start transaction - con.setAutoCommit(false); - - str = "SELECT rb.sourceSURL , rb.sourceSURL_uniqueID FROM " - + "request_BoL rb JOIN (status_BoL sb, request_queue rq) ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID " - + "WHERE sb.statusCode=?" - + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime "; - - prepStatement = con.prepareStatement(str); - prepStatement.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS)); - - ResultSet res = prepStatement.executeQuery(); - logWarnings(prepStatement.getWarnings()); - - while (res.next()) { - String sourceSURL = res.getString("rb.sourceSURL"); - Integer uniqueID = new Integer(res.getInt("rb.sourceSURL_uniqueID")); - /* If the uniqueID is not setted compute it */ - if (res.wasNull()) { - try { - TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL); - uniqueID = tsurl.uniqueId(); - } catch (InvalidTSURLAttributesException e) { - log.warn("BoLChunkDAO! unable to build the TSURL from {}: " - + "InvalidTSURLAttributesException ", sourceSURL, e.getMessage()); - } - } - expiredSurlMap.put(sourceSURL, uniqueID); - } - - if (expiredSurlMap.isEmpty()) { - commit(con); - log.trace("BoLChunkDAO! No chunk of BoL request was transited from " - + "SRM_SUCCESS to SRM_RELEASED."); - return new ArrayList(); - } - } catch (SQLException e) { - log.error("BoLChunkDAO! SQLException.", e.getMessage(), e); - rollback(con); - return new ArrayList(); - } finally { - close(prepStatement); - } - - /* Update status of all successful surls to SRM_RELEASED */ - - prepStatement = null; - try { - - str = "UPDATE " - + "status_BoL sb JOIN (request_BoL rb, request_queue rq) ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID " - + "SET sb.statusCode=? " - + "WHERE sb.statusCode=? AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime "; - - prepStatement = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - prepStatement.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_RELEASED)); - logWarnings(prepStatement.getWarnings()); - - prepStatement.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS)); - logWarnings(prepStatement.getWarnings()); - - log.trace("BoL CHUNK DAO - transitExpiredSRM_SUCCESS method: {}", - prepStatement.toString()); - - int count = prepStatement.executeUpdate(); - logWarnings(prepStatement.getWarnings()); - - if (count == 0) { - log.trace("BoLChunkDAO! No chunk of BoL request was" - + " transited from SRM_SUCCESS to SRM_RELEASED."); - } else { - log.info("BoLChunkDAO! {} chunks of BoL requests were transited from " - + "SRM_SUCCESS to SRM_RELEASED.", count); - } - } catch (SQLException e) { - log.error("BoLChunkDAO! Unable to transit expired SRM_SUCCESS chunks of " - + "BoL requests, to SRM_RELEASED! ", e.getMessage(), e); - rollback(con); - return new ArrayList(); - } finally { - close(prepStatement); - } - - /* - * in order to enhance performance here we can check if there is any file - * system with tape (T1D0, T1D1), if there is not any we can skip the - * following - */ - - /* Find all not expired surls from PtG */ - - HashSet pinnedSurlSet = new HashSet(); - try { - // SURLs pinned by BoLs - str = "SELECT rb.sourceSURL , rb.sourceSURL_uniqueID FROM " - + "request_BoL rb JOIN (status_BoL sb, request_queue rq) ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID " - + "WHERE sb.statusCode=" - + StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS) - + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) < rq.pinLifetime "; - - ResultSet res = null; - - prepStatement = con.prepareStatement(str); - res = prepStatement.executeQuery(); - logWarnings(prepStatement.getWarnings()); - - while (res.next()) { - String sourceSURL = res.getString("rb.sourceSURL"); - Integer uniqueID = new Integer(res.getInt("rb.sourceSURL_uniqueID")); - /* If the uniqueID is not setted compute it */ - if (res.wasNull()) { - try { - TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL); - uniqueID = tsurl.uniqueId(); - } catch (InvalidTSURLAttributesException e) { - log.warn("BoLChunkDAO! unable to build the TSURL from {}: " - + "InvalidTSURLAttributesException ", sourceSURL, e.getMessage()); - } - } - pinnedSurlSet.add(uniqueID); - } - - close(prepStatement); - - str = "SELECT rg.sourceSURL , rg.sourceSURL_uniqueID FROM " - + "request_Get rg JOIN (status_Get sg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "WHERE sg.statusCode=?" - + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) < rq.pinLifetime "; - - prepStatement = con.prepareStatement(str); - - prepStatement.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED)); - - res = prepStatement.executeQuery(); - logWarnings(prepStatement.getWarnings()); - - while (res.next()) { - String sourceSURL = res.getString("rg.sourceSURL"); - Integer uniqueID = new Integer(res.getInt("rg.sourceSURL_uniqueID")); - /* If the uniqueID is not setted compute it */ - if (res.wasNull()) { - try { - TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL); - uniqueID = tsurl.uniqueId(); - } catch (InvalidTSURLAttributesException e) { - log.warn("BoLChunkDAO! unable to build the TSURL from {}: " - + "InvalidTSURLAttributesException {}", sourceSURL, e.getMessage()); - } - } - pinnedSurlSet.add(uniqueID); - } - - commit(con); - - } catch (SQLException e) { - log.error("BoLChunkDAO! SQLException. {}", e.getMessage(), e); - rollback(con); - } finally { - close(prepStatement); - } - - /* Remove the Extended Attribute pinned if there is not a valid surl on it */ - ArrayList expiredSurlList = new ArrayList(); - TSURL surl; - for (Entry surlEntry : expiredSurlMap.entrySet()) { - if (!pinnedSurlSet.contains(surlEntry.getValue())) { - try { - surl = TSURL.makeFromStringValidate(surlEntry.getKey()); - } catch (InvalidTSURLAttributesException e) { - log.error("Invalid SURL, cannot release the pin " - + "(Extended Attribute): {}", surlEntry.getKey()); - continue; - } - expiredSurlList.add(surl); - StoRI stori; - try { - stori = NamespaceDirector.getNamespace().resolveStoRIbySURL(surl); - } catch (Throwable e) { - log.error("Invalid SURL {} cannot release the pin. {}: {}", - surlEntry.getKey(), e.getClass().getCanonicalName(), e.getMessage()); - continue; - } - - if (stori.getVirtualFileSystem().getStorageClassType().isTapeEnabled()) { - StormEA.removePinned(stori.getAbsolutePath()); - } - } - } - return expiredSurlList; - } - - /** - * Method that transits chunks in SRM_SUCCESS to SRM_ABORTED, for the given - * SURL: the overall request status of the requests containing that chunk, is - * not changed! The TURL is set to null. Beware, that the chunks may be part - * of requests that have finished, or that still have not finished because - * other chunks are still being processed. - */ - public synchronized void transitSRM_SUCCESStoSRM_ABORTED(int surlUniqueID, - String surl, String explanation) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: transitSRM_SUCCESStoSRM_ABORTED - unable to get a valid connection!"); - return; - } - String str = "UPDATE " - + "status_BoL sb JOIN request_BoL rb ON sb.request_BoLID=rb.ID " - + "SET sb.statusCode=?, sb.explanation=?, sb.transferURL=NULL " - + "WHERE sb.statusCode=? AND (rb.sourceSURL_uniqueID=? OR rb.targetSURL=?)"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - logWarnings(con.getWarnings()); - stmt.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED)); - logWarnings(stmt.getWarnings()); - - stmt.setString(2, explanation); - logWarnings(stmt.getWarnings()); - - stmt.setInt(3, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS)); - logWarnings(stmt.getWarnings()); - - stmt.setInt(4, surlUniqueID); - logWarnings(stmt.getWarnings()); - - stmt.setString(5, surl); - logWarnings(stmt.getWarnings()); - - log.trace("BoL CHUNK DAO - transitSRM_SUCCESStoSRM_ABORTED: {}", stmt.toString()); - int count = stmt.executeUpdate(); - logWarnings(stmt.getWarnings()); - if (count > 0) { - log.info("BoL CHUNK DAO! {} chunks were transited from SRM_SUCCESS " - + "to SRM_ABORTED.", count); - } else { - log.trace("BoL CHUNK DAO! No chunks were transited from SRM_SUCCESS " - + "to SRM_ABORTED."); - } - } catch (SQLException e) { - log.error("BoL CHUNK DAO! Unable to transitSRM_SUCCESStoSRM_ABORTED! {}", - e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Method that updates all chunks in SRM_SUCCESS state, into SRM_RELEASED. An - * array of long representing the primary key of each chunk is required: only - * they get the status changed provided their current status is SRM_SUCCESS. - * This method is used during srmReleaseFiles In case of any error nothing - * happens and no exception is thrown, but proper messages get logged. - */ - public synchronized void transitSRM_SUCCESStoSRM_RELEASED(long[] ids) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: transitSRM_SUCCESStoSRM_RELEASED - unable to get a valid connection!"); - return; - } - String str = "UPDATE status_BoL SET statusCode=? " - + "WHERE statusCode=? AND request_BoLID IN " + makeWhereString(ids); - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - logWarnings(con.getWarnings()); - stmt.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_RELEASED)); - logWarnings(stmt.getWarnings()); - - stmt.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS)); - logWarnings(stmt.getWarnings()); - - log.trace("BoL CHUNK DAO - transitSRM_SUCCESStoSRM_RELEASED: {}", - stmt.toString()); - int count = stmt.executeUpdate(); - logWarnings(stmt.getWarnings()); - if (count == 0) { - log.trace("BoL CHUNK DAO! No chunk of BoL request " - + "was transited from SRM_SUCCESS to SRM_RELEASED."); - } else { - log.info("BoL CHUNK DAO! {} chunks of BoL requests were transited " - + "from SRM_SUCCESS to SRM_RELEASED.", count); - } - } catch (SQLException e) { - log.error("BoL CHUNK DAO! Unable to transit chunks from SRM_SUCCESS " - + "to SRM_RELEASED! {}", e.getMessage(), e); - } finally { - close(stmt); - } - } - - public synchronized void transitSRM_SUCCESStoSRM_RELEASED(long[] ids, - TRequestToken token) { - - if (token == null) { - transitSRM_SUCCESStoSRM_RELEASED(ids); - } else { - /* - * If a request token has been specified, only the related BoL requests - * have to be released. This is done adding the r.r_token="..." clause in - * the where subquery. - */ - if (!checkConnection()) { - log.error("BoL CHUNK DAO: transitSRM_SUCCESStoSRM_RELEASED - unable to get a valid connection!"); - return; - } - String str = "UPDATE " - + "status_BoL sb JOIN (request_BoL rb, request_queue rq) ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID " - + "SET sb.statusCode=? " + "WHERE sb.statusCode=? AND rq.r_token='" - + token.toString() + "' AND rb.ID IN " + makeWhereString(ids); - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - logWarnings(con.getWarnings()); - stmt.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_RELEASED)); - logWarnings(stmt.getWarnings()); - - stmt.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS)); - logWarnings(stmt.getWarnings()); - - log.trace("BoL CHUNK DAO - transitSRM_SUCCESStoSRM_RELEASED: {}", - stmt.toString()); - int count = stmt.executeUpdate(); - logWarnings(stmt.getWarnings()); - if (count == 0) { - log.trace("BoL CHUNK DAO! No chunk of BoL request was " - + "transited from SRM_SUCCESS to SRM_RELEASED."); - } else { - log.info("BoL CHUNK DAO! {} chunks of BoL requests were transited " - + "from SRM_SUCCESS to SRM_RELEASED.", count); - } - } catch (SQLException e) { - log.error("BoL CHUNK DAO! Unable to transit chunks " - + "from SRM_SUCCESS to SRM_RELEASED! {}", e.getMessage(), e); - } finally { - close(stmt); - } - } - } - - /** - * Auxiliary method used to close a ResultSet - */ - private void close(ResultSet rset) { - - if (rset != null) { - try { - rset.close(); - } catch (Exception e) { - log.error("BoL CHUNK DAO! Unable to close ResultSet! Exception: " + e); - } - } - } - - /** - * Auxiliary method used to close a Statement - */ - private void close(Statement stmt) { - - if (stmt != null) { - try { - stmt.close(); - } catch (Exception e) { - log.error("BoL CHUNK DAO! Unable to close Statement {} - Exception: {}", - stmt.toString(), e.getMessage(), e); - } - } - } - - private void commit(Connection con) { - - if (con != null) { - try { - con.commit(); - con.setAutoCommit(true); - } catch (SQLException e) { - log.error("BoL, SQL EXception {}", e.getMessage(), e); - } - } - } - - /** - * Auxiliary method used to roll back a failed transaction - */ - private void rollback(Connection con) { - - if (con != null) { - try { - con.rollback(); - con.setAutoCommit(true); - log.error("BoL CHUNK DAO: roll back successful!"); - } catch (SQLException e2) { - log.error("BoL CHUNK DAO: roll back failed! {}", e2.getMessage(), e2); - } - } - } - - /** - * Private method that returns the generated ID: it throws an exception in - * case of any problem! - */ - private int extractID(ResultSet rs) throws Exception { - - if (rs == null) { - throw new Exception("BoL CHUNK DAO! Null ResultSet!"); - } - if (rs.next()) { - return rs.getInt(1); - } - log.error("BoL CHUNK DAO! It was not possible to establish " - + "the assigned autoincrement primary key!"); - throw new Exception( - "BoL CHUNK DAO! It was not possible to establish the assigned autoincrement primary key!"); - } - - /** - * Auxiliary private method that logs all SQL warnings. - */ - private void logWarnings(SQLWarning w) { - - if (w != null) { - log.debug("BoL CHUNK DAO: {}", w.toString()); - while ((w = w.getNextWarning()) != null) { - log.debug("BoL CHUNK DAO: {}", w.toString()); - } - } - } - - /** - * Method that returns a String containing all IDs. - */ - private String makeWhereString(long[] rowids) { - - StringBuilder sb = new StringBuilder("("); - int n = rowids.length; - for (int i = 0; i < n; i++) { - sb.append(rowids[i]); - if (i < (n - 1)) { - sb.append(","); - } - } - sb.append(")"); - return sb.toString(); - } - - /** - * Method that returns a String containing all Surl's IDs. - */ - private String makeSURLUniqueIDWhere(int[] surlUniqueIDs) { - - StringBuilder sb = new StringBuilder("("); - for (int i = 0; i < surlUniqueIDs.length; i++) { - if (i > 0) { - sb.append(","); - } - sb.append(surlUniqueIDs[i]); - } - sb.append(")"); - return sb.toString(); - } - - /** - * Method that returns a String containing all Surls. - */ - private String makeSurlString(String[] surls) { - - StringBuilder sb = new StringBuilder("("); - int n = surls.length; - - for (int i = 0; i < n; i++) { - - SURL requestedSURL; - - try { - requestedSURL = SURL.makeSURLfromString(surls[i]); - } catch (NamespaceException e) { - log.error(e.getMessage()); - log.debug("Skip '{}' during query creation", surls[i]); - continue; - } - - sb.append("'"); - sb.append(requestedSURL.getNormalFormAsString()); - sb.append("','"); - sb.append(requestedSURL.getQueryFormAsString()); - sb.append("'"); - - if (i < (n - 1)) { - sb.append(","); - } - } - - sb.append(")"); - return sb.toString(); - } - - /** - * Auxiliary method that sets up the connection to the DB, as well as the - * prepared statement. - */ - private boolean setUpConnection() { - - boolean response = false; - try { - Class.forName(driver); - con = DriverManager.getConnection(url, name, password); - logWarnings(con.getWarnings()); - response = con.isValid(0); - } catch (ClassNotFoundException | SQLException e) { - log.error("BoL CHUNK DAO! Exception in setUpConnection! {}", e.getMessage(), e); - } - return response; - } - - /** - * Auxiliary method that checks if time for resetting the connection has come, - * and eventually takes it down and up back again. - */ - private boolean checkConnection() { - - boolean response = true; - if (reconnect) { - log.debug("BoL CHUNK DAO! Reconnecting to DB! "); - takeDownConnection(); - response = setUpConnection(); - if (response) { - reconnect = false; - } - } - return response; - } - - /** - * Auxiliary method that tales down a connection to the DB. - */ - private void takeDownConnection() { - - if (con != null) { - try { - con.close(); - } catch (SQLException e) { - log.error("BoL CHUNK DAO! Exception in takeDownConnection method: {}", - e.getMessage(), e); - } - } - } - - public synchronized void updateStatusOnMatchingStatus( - TRequestToken requestToken, TStatusCode expectedStatusCode, - TStatusCode newStatusCode, String explanation) { - - if (requestToken == null || requestToken.getValue().trim().isEmpty() - || explanation == null) { - throw new IllegalArgumentException( - "Unable to perform the updateStatusOnMatchingStatus, " - + "invalid arguments: requestToken=" + requestToken + " explanation=" - + explanation); - } - doUpdateStatusOnMatchingStatus(requestToken, null, null, - expectedStatusCode, newStatusCode, explanation, true, false, true); - } - - public synchronized void updateStatusOnMatchingStatus( - TRequestToken requestToken, int[] surlsUniqueIDs, String[] surls, - TStatusCode expectedStatusCode, TStatusCode newStatusCode) - throws IllegalArgumentException { - - if (requestToken == null || requestToken.getValue().trim().isEmpty() - || surlsUniqueIDs == null || surls == null || surlsUniqueIDs.length == 0 - || surls.length == 0 || surlsUniqueIDs.length != surls.length) { - throw new IllegalArgumentException( - "Unable to perform the updateStatusOnMatchingStatus, " - + "invalid arguments: requestToken=" + requestToken - + "surlsUniqueIDs=" + surlsUniqueIDs + " surls=" + surls); - } - doUpdateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls, - expectedStatusCode, newStatusCode, null, true, true, false); - } - - public synchronized void doUpdateStatusOnMatchingStatus( - TRequestToken requestToken, int[] surlUniqueIDs, String[] surls, - TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation, boolean withRequestToken, boolean withSurls, - boolean withExplanation) throws IllegalArgumentException { - - if ((withRequestToken && requestToken == null) - || (withExplanation && explanation == null) - || (withSurls && (surlUniqueIDs == null || surls == null))) { - throw new IllegalArgumentException( - "Unable to perform the doUpdateStatusOnMatchingStatus, " - + "invalid arguments: withRequestToken=" + withRequestToken - + " requestToken=" + requestToken + " withSurls=" + withSurls - + " surlUniqueIDs=" + surlUniqueIDs + " surls=" + surls - + " withExplaination=" + withExplanation + " explanation=" - + explanation); - } - if (!checkConnection()) { - log - .error("BOL CHUNK DAO: updateStatusOnMatchingStatus - unable to get a valid connection!"); - return; - } - String str = "UPDATE status_BoL sb JOIN (request_BoL rb, request_queue rq) " - + "ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID " - + "SET sb.statusCode=? "; - if (withExplanation) { - str += " , " + buildExpainationSet(explanation); - } - str += " WHERE sb.statusCode=? "; - if (withRequestToken) { - str += " AND " + buildTokenWhereClause(requestToken); - } - if (withSurls) { - str += " AND " + buildSurlsWhereClause(surlUniqueIDs, surls); - } - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - logWarnings(con.getWarnings()); - stmt.setInt(1, StatusCodeConverter.getInstance().toDB(newStatusCode)); - logWarnings(stmt.getWarnings()); - - stmt - .setInt(2, StatusCodeConverter.getInstance().toDB(expectedStatusCode)); - logWarnings(stmt.getWarnings()); - - log.trace("BOL CHUNK DAO - updateStatusOnMatchingStatus: {}", stmt.toString()); - int count = stmt.executeUpdate(); - logWarnings(stmt.getWarnings()); - if (count == 0) { - log.trace("BOL CHUNK DAO! No chunk of BOL request was updated from {} " - + "to {}.", expectedStatusCode, newStatusCode); - } else { - log.debug("BOL CHUNK DAO! {} chunks of BOL requests were updated " - + "from {} to {}.", count, expectedStatusCode, newStatusCode); - } - } catch (SQLException e) { - log.error("BOL CHUNK DAO! Unable to updated from {} to {}!", - expectedStatusCode, newStatusCode, e); - } finally { - close(stmt); - } - } - - public Collection find(int[] surlsUniqueIDs, - String[] surlsArray, String dn) throws IllegalArgumentException { - - if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 - || surlsArray == null || surlsArray.length == 0 || dn == null) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray + " dn=" + dn); - } - return find(surlsUniqueIDs, surlsArray, dn, true); - } - - public Collection find(int[] surlsUniqueIDs, - String[] surlsArray) throws IllegalArgumentException { - - if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 - || surlsArray == null || surlsArray.length == 0) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray); - } - return find(surlsUniqueIDs, surlsArray, null, false); - } - - private synchronized Collection find(int[] surlsUniqueIDs, - String[] surlsArray, String dn, boolean withDn) - throws IllegalArgumentException { - - if ((withDn && dn == null) || surlsUniqueIDs == null - || surlsUniqueIDs.length == 0 || surlsArray == null - || surlsArray.length == 0) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray + " withDn=" + withDn + " dn=" + dn); - } - if (!checkConnection()) { - log.error("BoL CHUNK DAO: find - unable to get a valid connection!"); - return new ArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - try { - // get chunks of the request - String str = "SELECT rq.ID, rq.r_token, sb.statusCode, rq.timeStamp, rq.pinLifetime, " - + "rq.deferredStartTime, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, " - + "rb.sourceSURL_uniqueID, d.isSourceADirectory, d.allLevelRecursive, d.numOfLevels " - + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " - + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) " - + "LEFT JOIN request_DirOption d ON rb.request_DirOptionID=d.ID " - + "WHERE ( rb.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlsUniqueIDs) - + " AND rb.sourceSURL IN " - + makeSurlString(surlsArray) + " )"; - if (withDn) { - str += " AND rq.client_dn=\'" + dn + "\'"; - } - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - List list = new ArrayList(); - - log.trace("BOL CHUNK DAO - find method: {}", find.toString()); - rs = find.executeQuery(); - logWarnings(find.getWarnings()); - BoLChunkDataTO chunkDataTO = null; - while (rs.next()) { - - chunkDataTO = new BoLChunkDataTO(); - chunkDataTO.setStatus(rs.getInt("sb.statusCode")); - chunkDataTO.setLifeTime(rs.getInt("rq.pinLifetime")); - chunkDataTO.setDeferredStartTime(rs.getInt("rq.deferredStartTime")); - chunkDataTO.setRequestToken(rs.getString("rq.r_token")); - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setPrimaryKey(rs.getLong("rb.ID")); - chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL")); - - chunkDataTO.setNormalizedStFN(rs - .getString("rb.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rb.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(new Integer(uniqueID)); - } - - chunkDataTO.setDirOption(rs.getBoolean("d.isSourceADirectory")); - chunkDataTO.setAllLevelRecursive(rs.getBoolean("d.allLevelRecursive")); - chunkDataTO.setNumLevel(rs.getInt("d.numOfLevels")); - - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("BOL CHUNK DAO: {}", e.getMessage(), e); - /* return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - private String buildExpainationSet(String explanation) { - - return " sb.explanation='" + explanation + "' "; - } - - private String buildTokenWhereClause(TRequestToken requestToken) { - - return " rq.r_token='" + requestToken.toString() + "' "; - } - - private String buildSurlsWhereClause(int[] surlsUniqueIDs, String[] surls) { - - return " ( rb.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlsUniqueIDs) + " AND rb.sourceSURL IN " - + makeSurlString(surls) + " ) "; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/BoLChunkDataTO.java b/src/main/java/it/grid/storm/catalogs/BoLChunkDataTO.java deleted file mode 100644 index 3503e1c7a..000000000 --- a/src/main/java/it/grid/storm/catalogs/BoLChunkDataTO.java +++ /dev/null @@ -1,269 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.namespace.model.Protocol; -import it.grid.storm.srm.types.TStatusCode; - -import java.sql.Timestamp; -import java.util.List; - -/** - * Class that represents a row in the Persistence Layer: this is all raw data - * referring to the BoLChunkData proper, that is, String and primitive types. - * - * Each field is initialized with default values as per SRM 2.2 specification: - * protocolList GSIFTP dirOption false status SRM_REQUEST_QUEUED - * - * All other fields are 0 if int, or a white space if String. - * - * @author CNAF - * @version 1.0 - * @date Aug 2009 - */ -public class BoLChunkDataTO { - - /* Database table request_Bol fields BEGIN */ - private long primaryKey = -1; // ID primary key of record in DB - private String fromSURL = " "; - private boolean dirOption; // initialised in constructor - private String normalizedStFN = null; - private Integer surlUniqueID = null; - /* Database table request_Get fields END */ - - private String requestToken = " "; - private int lifetime = 0; - private boolean allLevelRecursive; // initialised in constructor - private int numLevel; // initialised in constructor - private List protocolList = null; // initialised in constructor - private long filesize = 0; - private int status; // initialised in constructor - private String errString = " "; - private int deferredStartTime = -1; - private Timestamp timeStamp = null; - - public BoLChunkDataTO() { - - TURLPrefix protocolPreferences = new TURLPrefix(); - protocolPreferences.addProtocol(Protocol.GSIFTP); - this.protocolList = TransferProtocolListConverter.toDB(protocolPreferences); - this.status = StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_REQUEST_QUEUED); - this.dirOption = false; - this.allLevelRecursive = false; - this.numLevel = 0; - } - - public boolean getAllLevelRecursive() { - - return allLevelRecursive; - } - - public int getDeferredStartTime() { - - return deferredStartTime; - } - - public boolean getDirOption() { - - return dirOption; - } - - public String getErrString() { - - return errString; - } - - public long getFileSize() { - - return filesize; - } - - public String getFromSURL() { - - return fromSURL; - } - - public int getLifeTime() { - - return lifetime; - } - - public int getNumLevel() { - - return numLevel; - } - - public long getPrimaryKey() { - - return primaryKey; - } - - public List getProtocolList() { - - return protocolList; - } - - public String getRequestToken() { - - return requestToken; - } - - public Timestamp getTimeStamp() { - - return timeStamp; - } - - public int getStatus() { - - return status; - } - - public void setAllLevelRecursive(boolean b) { - - allLevelRecursive = b; - } - - public void setDeferredStartTime(int deferredStartTime) { - - this.deferredStartTime = deferredStartTime; - } - - public void setDirOption(boolean b) { - - dirOption = b; - } - - public void setErrString(String s) { - - errString = s; - } - - public void setFileSize(long n) { - - filesize = n; - } - - public void setFromSURL(String s) { - - fromSURL = s; - } - - public void setLifeTime(int n) { - - lifetime = n; - } - - public void setNumLevel(int n) { - - numLevel = n; - } - - public void setPrimaryKey(long n) { - - primaryKey = n; - } - - public void setProtocolList(List l) { - - if ((l != null) && (!l.isEmpty())) { - protocolList = l; - } - } - - public void setRequestToken(String s) { - - requestToken = s; - } - - public void setTimeStamp(Timestamp timeStamp) { - - this.timeStamp = timeStamp; - } - - public void setStatus(int n) { - - status = n; - } - - /** - * @param normalizedStFN - * the normalizedStFN to set - */ - public void setNormalizedStFN(String normalizedStFN) { - - this.normalizedStFN = normalizedStFN; - } - - /** - * @return the normalizedStFN - */ - public String normalizedStFN() { - - return normalizedStFN; - } - - /** - * @param surlUniqueID - * the sURLUniqueID to set - */ - public void setSurlUniqueID(Integer surlUniqueID) { - - this.surlUniqueID = surlUniqueID; - } - - /** - * @return the sURLUniqueID - */ - public Integer sulrUniqueID() { - - return surlUniqueID; - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append(primaryKey); - sb.append(" "); - sb.append(requestToken); - sb.append(" "); - sb.append(fromSURL); - sb.append(" "); - sb.append(normalizedStFN); - sb.append(" "); - sb.append(surlUniqueID); - sb.append(" "); - sb.append(lifetime); - sb.append(" "); - sb.append(dirOption); - sb.append(" "); - sb.append(allLevelRecursive); - sb.append(" "); - sb.append(numLevel); - sb.append(" "); - sb.append(protocolList); - sb.append(" "); - sb.append(filesize); - sb.append(" "); - sb.append(status); - sb.append(" "); - sb.append(errString); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/BoLData.java b/src/main/java/it/grid/storm/catalogs/BoLData.java deleted file mode 100644 index a96bd1d6f..000000000 --- a/src/main/java/it/grid/storm/catalogs/BoLData.java +++ /dev/null @@ -1,157 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.common.types.TimeUnit; -import it.grid.storm.srm.types.TDirOption; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TStatusCode; -import it.grid.storm.srm.types.TTURL; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class represents a BringOnLineChunkData, that is part of a multifile - * BringOnLine srm request. It contains data about: the requestToken, the - * fromSURL, the requested lifeTime of pinning, the TDirOption which tells - * whether the requested SURL is a directory and if it must be recursed at all - * levels, as well as the desired number of levels to recurse, the desired - * transferProtocols in order of preference, the fileSize, and the transferURL - * for the supplied SURL. - * - * @author CNAF - * @version 1.0 - * @date Aug 2009 - */ -public class BoLData extends AnonymousFileTransferData { - - private static final Logger log = LoggerFactory.getLogger(BoLData.class); - - /** - * requested lifetime of TURL: it is the pin time! - */ - private TLifeTimeInSeconds lifeTime; - - /** - * specifies if the request regards a directory and related info - */ - private TDirOption dirOption; - - /** - * size of file - */ - private TSizeInBytes fileSize; - - /** - * how many seconds to wait before to make the lifeTime start consuming - */ - private int deferredStartTime = 0; - - public BoLData(TSURL fromSURL, TLifeTimeInSeconds lifeTime, - TDirOption dirOption, TURLPrefix desiredProtocols, TSizeInBytes fileSize, - TReturnStatus status, TTURL transferURL, int deferredStartTime) - throws InvalidFileTransferDataAttributesException, - InvalidBoLDataAttributesException, - InvalidSurlRequestDataAttributesException { - - super(fromSURL, desiredProtocols, status, transferURL); - if (lifeTime == null || dirOption == null || fileSize == null) { - throw new InvalidBoLDataAttributesException(fromSURL, lifeTime, - dirOption, desiredProtocols, fileSize, status, transferURL); - } - this.lifeTime = lifeTime; - this.dirOption = dirOption; - this.fileSize = fileSize; - this.deferredStartTime = deferredStartTime; - } - - /** - * Method that sets the status of this request to SRM_FILE_PINNED; it needs - * the explanation String which describes the situation in greater detail; if - * a null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_FILE_PINNED(String explanation) { - - setStatus(TStatusCode.SRM_FILE_PINNED, explanation); - } - - public int getDeferredStartTime() { - - return deferredStartTime; - } - - /** - * Method that returns the dirOption specified in the srm request. - */ - public TDirOption getDirOption() { - - return dirOption; - } - - /** - * Method that returns the file size for this chunk of the srm request. - */ - public TSizeInBytes getFileSize() { - - return fileSize; - } - - /** - * Method that returns the requested pin life time for this chunk of the srm - * request. - */ - public TLifeTimeInSeconds getLifeTime() { - - return lifeTime; - } - - public void setDeferredStartTime(int deferredStartTime) { - - this.deferredStartTime = deferredStartTime; - } - - /** - * Method used to set the size of the file corresponding to the requested - * SURL. If the supplied TSizeInByte is null, then nothing gets set! - */ - public void setFileSize(TSizeInBytes size) { - - if (size != null) { - fileSize = size; - } - } - - public void setLifeTime(long lifeTimeInSeconds) { - - TLifeTimeInSeconds lifeTime; - try { - lifeTime = TLifeTimeInSeconds.make(lifeTimeInSeconds, TimeUnit.SECONDS); - } catch (IllegalArgumentException e) { - log.error(e.getMessage(), e); - return; - } - - this.lifeTime = lifeTime; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/BoLPersistentChunkData.java b/src/main/java/it/grid/storm/catalogs/BoLPersistentChunkData.java deleted file mode 100644 index 651686cba..000000000 --- a/src/main/java/it/grid/storm/catalogs/BoLPersistentChunkData.java +++ /dev/null @@ -1,114 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.srm.types.TDirOption; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TTURL; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class represents a BringOnLineChunkData, that is part of a multifile - * BringOnLine srm request. It contains data about: the requestToken, the - * fromSURL, the requested lifeTime of pinning, the TDirOption which tells - * whether the requested SURL is a directory and if it must be recursed at all - * levels, as well as the desired number of levels to recurse, the desired - * transferProtocols in order of preference, the fileSize, and the transferURL - * for the supplied SURL. - * - * @author CNAF - * @version 1.0 - * @date Aug 2009 - */ -public class BoLPersistentChunkData extends BoLData implements - PersistentChunkData { - - private static final Logger log = LoggerFactory - .getLogger(BoLPersistentChunkData.class); - - /** - * long representing the primary key for the persistence layer, in the - * status_Put table - */ - private long primaryKey = -1; - - /** - * This is the requestToken of the multifile srm request to which this chunk - * belongs - */ - private final TRequestToken requestToken; - - public BoLPersistentChunkData(TRequestToken requestToken, TSURL fromSURL, - TLifeTimeInSeconds lifeTime, TDirOption dirOption, - TURLPrefix desiredProtocols, TSizeInBytes fileSize, TReturnStatus status, - TTURL transferURL, int deferredStartTime) - throws InvalidBoLPersistentChunkDataAttributesException, - InvalidFileTransferDataAttributesException, - InvalidBoLDataAttributesException, - InvalidSurlRequestDataAttributesException { - - super(fromSURL, lifeTime, dirOption, desiredProtocols, fileSize, status, - transferURL, deferredStartTime); - if (requestToken == null) { - log.debug("BoLPersistentChunkData: requestToken is null!"); - throw new InvalidBoLPersistentChunkDataAttributesException(requestToken, - fromSURL, lifeTime, dirOption, desiredProtocols, fileSize, status, - transferURL); - } - this.requestToken = requestToken; - } - - /** - * Method that returns the requestToken of the srm request to which this chunk - * belongs. - */ - public TRequestToken getRequestToken() { - - return requestToken; - } - - /** - * Method used to get the primary key used in the persistence layer! - */ - @Override - public long getPrimaryKey() { - - return primaryKey; - } - - /** - * Method used to set the primary key to be used in the persistence layer! - */ - public void setPrimaryKey(long l) { - - primaryKey = l; - } - - @Override - public long getIdentifier() { - - return getPrimaryKey(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/ChunkDAOUtils.java b/src/main/java/it/grid/storm/catalogs/ChunkDAOUtils.java index d7775310f..e8be1daf4 100644 --- a/src/main/java/it/grid/storm/catalogs/ChunkDAOUtils.java +++ b/src/main/java/it/grid/storm/catalogs/ChunkDAOUtils.java @@ -1,30 +1,9 @@ package it.grid.storm.catalogs; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.sql.SQLWarning; - public class ChunkDAOUtils { - private static final Logger log = LoggerFactory.getLogger(ChunkDAOUtils.class); - protected ChunkDAOUtils() {} - public static void printWarnings(SQLWarning warning) { - - if (warning != null) { - log.warn("---Warning---"); - - while (warning != null) { - log.warn("Message: {}", warning.getMessage()); - log.warn("SQLState: {}", warning.getSQLState()); - log.warn("Vendor error code: {}", warning.getErrorCode()); - warning = warning.getNextWarning(); - } - } - } - public static String buildInClauseForArray(int size) { StringBuilder b = new StringBuilder(); for (int i=1; i<=size; i++) { diff --git a/src/main/java/it/grid/storm/catalogs/ChunkData.java b/src/main/java/it/grid/storm/catalogs/ChunkData.java deleted file mode 100644 index c79c4f406..000000000 --- a/src/main/java/it/grid/storm/catalogs/ChunkData.java +++ /dev/null @@ -1,11 +0,0 @@ -package it.grid.storm.catalogs; - -public interface ChunkData extends RequestData { - - /** - * Method that returns the primary key in persistence, associated with This - * Chunk. - */ - public long getIdentifier(); - -} diff --git a/src/main/java/it/grid/storm/catalogs/CopyChunkCatalog.java b/src/main/java/it/grid/storm/catalogs/CopyChunkCatalog.java deleted file mode 100644 index e09798ce8..000000000 --- a/src/main/java/it/grid/storm/catalogs/CopyChunkCatalog.java +++ /dev/null @@ -1,489 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TimeUnit; -import it.grid.storm.griduser.GridUserInterface; -// import it.grid.storm.namespace.SurlStatusStore; -import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException; -import it.grid.storm.srm.types.InvalidTSURLAttributesException; -import it.grid.storm.srm.types.InvalidTSpaceTokenAttributesException; -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSpaceToken; -import it.grid.storm.srm.types.TStatusCode; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Class that represents StoRMs CopyChunkCatalog: it collects CopyChunkData and - * provides methods for looking up a CopyChunkData based on TRequestToken, as - * well as for updating an existing one. - * - * @author EGRID - ICTP Trieste - * @date september, 2005 - * @version 2.0 - */ -public class CopyChunkCatalog { - - private static final Logger log = LoggerFactory - .getLogger(CopyChunkCatalog.class); - - /* only instance of CopyChunkCatalog present in StoRM! */ - private static final CopyChunkCatalog cat = new CopyChunkCatalog(); - /* WARNING!!! TO BE MODIFIED WITH FACTORY!!! */ - private CopyChunkDAO dao = CopyChunkDAO.getInstance(); - - private CopyChunkCatalog() { - - } - - /** - * Method that returns the only instance of PtPChunkCatalog available. - */ - public static CopyChunkCatalog getInstance() { - - return cat; - } - - /** - * Method used to update into Persistence a retrieved CopyChunkData. In case - * any error occurs, the operation does not proceed and no Exception is - * thrown. - * - * Beware that the only fields updated into persistence are the StatusCode and - * the errorString. - */ - synchronized public void update(CopyPersistentChunkData cd) { - - CopyChunkDataTO to = new CopyChunkDataTO(); - /* primary key needed by DAO Object */ - to.setPrimaryKey(cd.getPrimaryKey()); - to.setLifeTime(FileLifetimeConverter.getInstance().toDB( - cd.getLifetime().value())); - to.setStatus(StatusCodeConverter.getInstance().toDB( - cd.getStatus().getStatusCode())); - to.setErrString(cd.getStatus().getExplanation()); - to.setFileStorageType(FileStorageTypeConverter.getInstance().toDB( - cd.getFileStorageType())); - to.setOverwriteOption(OverwriteModeConverter.getInstance().toDB( - cd.getOverwriteOption())); - to.setNormalizedSourceStFN(cd.getSURL().normalizedStFN()); - to.setSourceSurlUniqueID(new Integer(cd.getSURL().uniqueId())); - to.setNormalizedTargetStFN(cd.getDestinationSURL().normalizedStFN()); - to.setTargetSurlUniqueID(new Integer(cd.getDestinationSURL().uniqueId())); - - dao.update(to); - } - - /** - * Method that returns a Collection of CopyChunkData Objects matching the - * supplied TRequestToken. - * - * If any of the data associated to the TRequestToken is not well formed and - * so does not allow a CopyChunkData Object to be created, then that part of - * the request is dropped and gets logged, and the processing continues with - * the next part. All valid chunks get returned: the others get dropped. - * - * If there are no chunks to process then an empty Collection is returned, and - * a messagge gets logged. - */ - synchronized public Collection lookup( - TRequestToken rt) { - - Collection chunkDataTOs = dao.find(rt); - log.debug("COPY CHUNK CATALOG: retrieved data {}", chunkDataTOs); - return buildChunkDataList(chunkDataTOs, rt); - } - - private Collection buildChunkDataList( - Collection chunkDataTOs, TRequestToken rt) { - - ArrayList list = new ArrayList(); - CopyPersistentChunkData chunk; - for (CopyChunkDataTO chunkTO : chunkDataTOs) { - chunk = makeOne(chunkTO, rt); - if (chunk == null) { - continue; - } - list.add(chunk); - if (isComplete(chunkTO)) { - continue; - } - try { - dao.updateIncomplete(completeTO(chunkTO, chunk)); - } catch (InvalidReducedCopyChunkDataAttributesException e) { - log.warn("COPY CHUNK CATALOG! unable to add missing informations on " - + "DB to the request: {}", e.getMessage()); - } - } - log.debug("COPY CHUNK CATALOG: returning {}\n\n", list); - return list; - } - - private Collection buildChunkDataList( - Collection chunkDataTOs) { - - ArrayList list = new ArrayList(); - CopyPersistentChunkData chunk; - for (CopyChunkDataTO chunkTO : chunkDataTOs) { - chunk = makeOne(chunkTO); - if (chunk == null) { - continue; - } - list.add(chunk); - if (isComplete(chunkTO)) { - continue; - } - try { - dao.updateIncomplete(completeTO(chunkTO, chunk)); - } catch (InvalidReducedCopyChunkDataAttributesException e) { - log.warn("COPY CHUNK CATALOG! unable to add missing informations on DB " - + "to the request: {}", e.getMessage()); - } - } - log.debug("COPY CHUNK CATALOG: returning {}\n\n", list); - return list; - } - - public Collection lookupCopyChunkData( - TRequestToken requestToken, Collection surls) { - - int[] surlsUniqueIDs = new int[surls.size()]; - String[] surlsArray = new String[surls.size()]; - int index = 0; - for (TSURL tsurl : surls) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surlsArray[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOs = dao.find(requestToken, - surlsUniqueIDs, surlsArray); - return buildChunkDataList(chunkDataTOs, requestToken); - } - - public Collection lookupCopyChunkData(TSURL surl, - GridUserInterface user) { - - return lookupCopyChunkData(Arrays.asList(new TSURL[] { surl }), user); - } - - public Collection lookupCopyChunkData(TSURL surl) { - - return lookupCopyChunkData(Arrays.asList(new TSURL[] { surl })); - } - - private Collection lookupCopyChunkData( - List surls, GridUserInterface user) { - - int[] surlsUniqueIDs = new int[surls.size()]; - String[] surlsArray = new String[surls.size()]; - int index = 0; - for (TSURL tsurl : surls) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surlsArray[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOs = dao.find(surlsUniqueIDs, - surlsArray, user.getDn()); - return buildChunkDataList(chunkDataTOs); - } - - public Collection lookupCopyChunkData( - List surls) { - - int[] surlsUniqueIDs = new int[surls.size()]; - String[] surlsArray = new String[surls.size()]; - int index = 0; - for (TSURL tsurl : surls) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surlsArray[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOs = dao.find(surlsUniqueIDs, - surlsArray); - return buildChunkDataList(chunkDataTOs); - } - - private CopyPersistentChunkData makeOne(CopyChunkDataTO chunkTO) { - - try { - return makeOne(chunkTO, - new TRequestToken(chunkTO.requestToken(), chunkTO.timeStamp())); - } catch (InvalidTRequestTokenAttributesException e) { - throw new IllegalStateException( - "Unexpected InvalidTRequestTokenAttributesException in TRequestToken: " - + e); - } - } - - /** - * Generates a CopyChunkData from the received CopyChunkDataTO - * - * @param chunkDataTO - * @param rt - * @return - */ - private CopyPersistentChunkData makeOne(CopyChunkDataTO chunkDataTO, - TRequestToken rt) { - - StringBuilder errorSb = new StringBuilder(); - // fromSURL - TSURL fromSURL = null; - try { - fromSURL = TSURL.makeFromStringValidate(chunkDataTO.fromSURL()); - } catch (InvalidTSURLAttributesException e) { - errorSb.append(e); - } - if (chunkDataTO.normalizedSourceStFN() != null) { - fromSURL.setNormalizedStFN(chunkDataTO.normalizedSourceStFN()); - } - if (chunkDataTO.sourceSurlUniqueID() != null) { - fromSURL.setUniqueID(chunkDataTO.sourceSurlUniqueID().intValue()); - } - // toSURL - TSURL toSURL = null; - try { - toSURL = TSURL.makeFromStringValidate(chunkDataTO.toSURL()); - } catch (InvalidTSURLAttributesException e) { - errorSb.append(e); - } - if (chunkDataTO.normalizedTargetStFN() != null) { - toSURL.setNormalizedStFN(chunkDataTO.normalizedTargetStFN()); - } - if (chunkDataTO.targetSurlUniqueID() != null) { - toSURL.setUniqueID(chunkDataTO.targetSurlUniqueID().intValue()); - } - // lifeTime - TLifeTimeInSeconds lifeTime = null; - try { - lifeTime = TLifeTimeInSeconds.make(FileLifetimeConverter.getInstance() - .toStoRM(chunkDataTO.lifeTime()), TimeUnit.SECONDS); - } catch (IllegalArgumentException e) { - errorSb.append("\n"); - errorSb.append(e); - } - // fileStorageType - TFileStorageType fileStorageType = FileStorageTypeConverter.getInstance() - .toSTORM(chunkDataTO.fileStorageType()); - if (fileStorageType == TFileStorageType.EMPTY) { - log.error("\nTFileStorageType could not be translated from its String " - + "representation! String: {}", chunkDataTO.fileStorageType()); - // fail creation of PtPChunk! - fileStorageType = null; - } - // spaceToken! - // - // WARNING! Although this field is in common between StoRM and DPM, a - // converter is still used - // because DPM logic for NULL/EMPTY is not known. StoRM model does not - // allow for null, so it must - // be taken care of! - TSpaceToken spaceToken = null; - TSpaceToken emptyToken = TSpaceToken.makeEmpty(); - // convert empty string representation of DPM into StoRM representation; - String spaceTokenTranslation = SpaceTokenStringConverter.getInstance() - .toStoRM(chunkDataTO.spaceToken()); - if (emptyToken.toString().equals(spaceTokenTranslation)) { - spaceToken = emptyToken; - } else { - try { - spaceToken = TSpaceToken.make(spaceTokenTranslation); - } catch (InvalidTSpaceTokenAttributesException e) { - errorSb.append("\n"); - errorSb.append(e); - } - } - // overwriteOption! - TOverwriteMode globalOverwriteOption = OverwriteModeConverter.getInstance() - .toSTORM(chunkDataTO.overwriteOption()); - if (globalOverwriteOption == TOverwriteMode.EMPTY) { - errorSb.append("\nTOverwriteMode could not be " - + "translated from its String representation! String: " - + chunkDataTO.overwriteOption()); - globalOverwriteOption = null; - } - // status - TReturnStatus status = null; - TStatusCode code = StatusCodeConverter.getInstance().toSTORM( - chunkDataTO.status()); - if (code == TStatusCode.EMPTY) { - errorSb.append("\nRetrieved StatusCode was not recognised: " - + chunkDataTO.status()); - } else { - status = new TReturnStatus(code, chunkDataTO.errString()); - } - // make CopyChunkData - CopyPersistentChunkData aux = null; - try { - aux = new CopyPersistentChunkData(rt, fromSURL, toSURL, lifeTime, - fileStorageType, spaceToken, globalOverwriteOption, status); - aux.setPrimaryKey(chunkDataTO.primaryKey()); - } catch (InvalidSurlRequestDataAttributesException e) { - dao.signalMalformedCopyChunk(chunkDataTO); - log.warn("COPY CHUNK CATALOG! Retrieved malformed Copy" - + " chunk data from persistence. Dropping chunk from request: {}", rt); - log.warn(e.getMessage()); - log.warn(errorSb.toString()); - } - // end... - return aux; - } - - /** - * - * Adds to the received CopyChunkDataTO the normalized StFN and the SURL - * unique ID taken from the CopyChunkData - * - * @param chunkTO - * @param chunk - */ - private void completeTO(ReducedCopyChunkDataTO chunkTO, - final ReducedCopyChunkData chunk) { - - chunkTO.setNormalizedSourceStFN(chunk.fromSURL().normalizedStFN()); - chunkTO.setSourceSurlUniqueID(new Integer(chunk.fromSURL().uniqueId())); - chunkTO.setNormalizedTargetStFN(chunk.toSURL().normalizedStFN()); - chunkTO.setTargetSurlUniqueID(new Integer(chunk.toSURL().uniqueId())); - } - - /** - * - * Creates a ReducedCopyChunkDataTO from the received CopyChunkDataTO and - * completes it with the normalized StFN and the SURL unique ID taken from the - * PtGChunkData - * - * @param chunkTO - * @param chunk - * @return - * @throws InvalidReducedCopyChunkDataAttributesException - */ - private ReducedCopyChunkDataTO completeTO(CopyChunkDataTO chunkTO, - final CopyPersistentChunkData chunk) - throws InvalidReducedCopyChunkDataAttributesException { - - ReducedCopyChunkDataTO reducedChunkTO = this.reduce(chunkTO); - this.completeTO(reducedChunkTO, this.reduce(chunk)); - return reducedChunkTO; - } - - /** - * Creates a ReducedCopyChunkData from the data contained in the received - * CopyChunkData - * - * @param chunk - * @return - * @throws InvalidReducedPtGChunkDataAttributesException - */ - private ReducedCopyChunkData reduce(CopyPersistentChunkData chunk) - throws InvalidReducedCopyChunkDataAttributesException { - - ReducedCopyChunkData reducedChunk = new ReducedCopyChunkData( - chunk.getSURL(), chunk.getDestinationSURL(), chunk.getStatus()); - reducedChunk.setPrimaryKey(chunk.getPrimaryKey()); - return reducedChunk; - } - - /** - * Creates a ReducedCopyChunkDataTO from the data contained in the received - * CopyChunkDataTO - * - * @param chunkTO - * @return - */ - private ReducedCopyChunkDataTO reduce(CopyChunkDataTO chunkTO) { - - ReducedCopyChunkDataTO reducedChunkTO = new ReducedCopyChunkDataTO(); - reducedChunkTO.setPrimaryKey(chunkTO.primaryKey()); - reducedChunkTO.setFromSURL(chunkTO.fromSURL()); - reducedChunkTO.setNormalizedSourceStFN(chunkTO.normalizedSourceStFN()); - reducedChunkTO.setSourceSurlUniqueID(chunkTO.sourceSurlUniqueID()); - reducedChunkTO.setToSURL(chunkTO.toSURL()); - reducedChunkTO.setNormalizedTargetStFN(chunkTO.normalizedTargetStFN()); - reducedChunkTO.setTargetSurlUniqueID(chunkTO.targetSurlUniqueID()); - reducedChunkTO.setStatus(chunkTO.status()); - reducedChunkTO.setErrString(chunkTO.errString()); - return reducedChunkTO; - } - - /** - * Checks if the received CopyChunkDataTO contains the fields not set by the - * front end but required - * - * @param chunkTO - * @return - */ - private boolean isComplete(CopyChunkDataTO chunkTO) { - - return (chunkTO.normalizedSourceStFN() != null) - && (chunkTO.sourceSurlUniqueID() != null && chunkTO - .normalizedTargetStFN() != null) - && (chunkTO.targetSurlUniqueID() != null); - } - - /** - * Checks if the received ReducedPtGChunkDataTO contains the fields not set by - * the front end but required - * - * @param reducedChunkTO - * @return - */ - @SuppressWarnings("unused") - private boolean isComplete(ReducedCopyChunkDataTO reducedChunkTO) { - - return (reducedChunkTO.normalizedSourceStFN() != null) - && (reducedChunkTO.sourceSurlUniqueID() != null && reducedChunkTO - .normalizedTargetStFN() != null) - && (reducedChunkTO.targetSurlUniqueID() != null); - } - - public void updateFromPreviousStatus(TRequestToken requestToken, - TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation) { - - dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode, - newStatusCode, explanation); - } - - public void updateFromPreviousStatus(TRequestToken requestToken, - List surlList, TStatusCode expectedStatusCode, - TStatusCode newStatusCode) { - - int[] surlsUniqueIDs = new int[surlList.size()]; - String[] surls = new String[surlList.size()]; - int index = 0; - for (TSURL tsurl : surlList) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surls[index] = tsurl.rawSurl(); - index++; - } - dao.updateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls, - expectedStatusCode, newStatusCode); - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/CopyChunkDAO.java b/src/main/java/it/grid/storm/catalogs/CopyChunkDAO.java deleted file mode 100644 index 102af5197..000000000 --- a/src/main/java/it/grid/storm/catalogs/CopyChunkDAO.java +++ /dev/null @@ -1,786 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.config.Configuration; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TStatusCode; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.SQLWarning; -import java.sql.Statement; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Timer; -import java.util.TimerTask; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * DAO class for PtPChunkCatalog. This DAO is specifically designed to connect - * to a MySQL DB. The raw data found in those tables is pre-treated in order to - * turn it into the Object Model of StoRM. See Method comments for further info. - * - * BEWARE! DAO Adjusts for extra fields in the DB that are not present in the - * object model. - * - * @author EGRID - ICTP Trieste - * @version 2.0 - * @date September 2005 - */ -public class CopyChunkDAO { - - private static final Logger log = LoggerFactory.getLogger(CopyChunkDAO.class); - - /* String with the name of the class for the DB driver */ - private final String driver = Configuration.getInstance().getDBDriver(); - /* String referring to the URL of the DB */ - private final String url = Configuration.getInstance().getStormDbURL(); - /* String with the password for the DB */ - private final String password = Configuration.getInstance().getDBPassword(); - /* String with the name for the DB */ - private final String name = Configuration.getInstance().getDBUserName(); - - /* Connection to DB - WARNING!!! It is kept open all the time! */ - private Connection con = null; - /* boolean that tells whether reconnection is needed because of MySQL bug! */ - private boolean reconnect = false; - - /* Singleton instance */ - private final static CopyChunkDAO dao = new CopyChunkDAO(); - - /* timer thread that will run a task to alert when reconnecting is necessary! */ - private Timer clock = null; - /* - * timer task that will update the boolean signaling that a reconnection is - * needed! - */ - private TimerTask clockTask = null; - /* milliseconds that must pass before reconnecting to DB */ - private final long period = Configuration.getInstance().getDBReconnectPeriod() * 1000; - /* initial delay in milliseconds before starting timer */ - private final long delay = Configuration.getInstance().getDBReconnectDelay() * 1000; - - private CopyChunkDAO() { - - setUpConnection(); - clock = new Timer(); - clockTask = new TimerTask() { - - @Override - public void run() { - - reconnect = true; - } - }; // clock task - clock.scheduleAtFixedRate(clockTask, delay, period); - } - - /** - * Method that returns the only instance of the CopyChunkDAO. - */ - public static CopyChunkDAO getInstance() { - - return dao; - } - - /** - * Method used to save the changes made to a retrieved CopyChunkDataTO, back - * into the MySQL DB. - * - * Only statusCode and explanation, of status_Copy table get written to the - * DB. Likewise for fileLifetime of request_queue table. - * - * In case of any error, an error messagge gets logged but no exception is - * thrown. - */ - public synchronized void update(CopyChunkDataTO to) { - - if (!checkConnection()) { - log.error("COPY CHUNK DAO: update - unable to get a valid connection!"); - return; - } - PreparedStatement updateFileReq = null; - try { - // ready updateFileReq... - updateFileReq = con - .prepareStatement("UPDATE request_queue rq JOIN (status_Copy sc, request_Copy rc) " - + "ON (rq.ID=rc.request_queueID AND sc.request_CopyID=rc.ID) " - + "SET sc.statusCode=?, sc.explanation=?, rq.fileLifetime=?, rq.config_FileStorageTypeID=?, rq.config_OverwriteID=?, " - + "rc.normalized_sourceSURL_StFN=?, rc.sourceSURL_uniqueID=?, rc.normalized_targetSURL_StFN=?, rc.targetSURL_uniqueID=? " - + "WHERE rc.ID=?"); - logWarnings(con.getWarnings()); - - updateFileReq.setInt(1, to.status()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setString(2, to.errString()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setInt(3, to.lifeTime()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setString(4, to.fileStorageType()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setString(5, to.overwriteOption()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setString(6, to.normalizedSourceStFN()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setInt(7, to.sourceSurlUniqueID()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setString(8, to.normalizedTargetStFN()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setInt(9, to.targetSurlUniqueID()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setLong(10, to.primaryKey()); - logWarnings(updateFileReq.getWarnings()); - - // run updateFileReq - updateFileReq.executeUpdate(); - logWarnings(updateFileReq.getWarnings()); - } catch (SQLException e) { - log.error("COPY CHUNK DAO: Unable to complete update! {}", - e.getMessage(), e); - } finally { - close(updateFileReq); - } - } - - /** - * Updates the request_Get represented by the received ReducedPtGChunkDataTO - * by setting its normalized_sourceSURL_StFN and sourceSURL_uniqueID - * - * @param chunkTO - */ - public synchronized void updateIncomplete(ReducedCopyChunkDataTO chunkTO) { - - if (!checkConnection()) { - log - .error("COPY CHUNK DAO: updateIncomplete - unable to get a valid connection!"); - return; - } - String str = "UPDATE request_Copy SET normalized_sourceSURL_StFN=?, sourceSURL_uniqueID=?, normalized_targetSURL_StFN=?, targetSURL_uniqueID=? " - + "WHERE ID=?"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - stmt.setString(1, chunkTO.normalizedSourceStFN()); - logWarnings(stmt.getWarnings()); - - stmt.setInt(2, chunkTO.sourceSurlUniqueID()); - logWarnings(stmt.getWarnings()); - - stmt.setString(3, chunkTO.normalizedTargetStFN()); - logWarnings(stmt.getWarnings()); - - stmt.setInt(4, chunkTO.targetSurlUniqueID()); - logWarnings(stmt.getWarnings()); - - stmt.setLong(5, chunkTO.primaryKey()); - logWarnings(stmt.getWarnings()); - - log.trace("COPY CHUNK DAO - update incomplete: {}", stmt.toString()); - stmt.executeUpdate(); - logWarnings(stmt.getWarnings()); - } catch (SQLException e) { - log.error("COPY CHUNK DAO: Unable to complete update incomplete! {}", - e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Method that queries the MySQL DB to find all entries matching the supplied - * TRequestToken. The Collection contains the corresponding CopyChunkDataTO - * objects. - * - * A complex query establishes all chunks associated with the request token, - * by properly joining request_queue, request_Copy and status_Copy. The - * considered fields are: - * - * (1) From status_Copy: the ID field which becomes the TOs primary key, and - * statusCode. - * - * (2) From request_Copy: targetSURL and sourceSURL. - * - * (3) From request_queue: fileLifetime, config_FileStorageTypeID, s_token, - * config_OverwriteID. - * - * In case of any error, a log gets written and an empty collection is - * returned. No exception is returned. - * - * NOTE! Chunks in SRM_ABORTED status are NOT returned! - */ - public synchronized Collection find( - TRequestToken requestToken) { - - if (!checkConnection()) { - log.error("COPY CHUNK DAO: find - unable to get a valid connection!"); - return new ArrayList(); - } - String strToken = requestToken.toString(); - String str = null; - PreparedStatement find = null; - ResultSet rs = null; - try { - /* get chunks of the request */ - str = "SELECT rq.s_token, rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.fileLifetime, rc.ID, rc.sourceSURL, rc.targetSURL, rc.normalized_sourceSURL_StFN, rc.sourceSURL_uniqueID, rc.normalized_targetSURL_StFN, rc.targetSURL_uniqueID, d.isSourceADirectory, d.allLevelRecursive, d.numOfLevels " - + "FROM request_queue rq JOIN (request_Copy rc, status_Copy sc) " - + "ON (rc.request_queueID=rq.ID AND sc.request_CopyID=rc.ID) " - + "LEFT JOIN request_DirOption d ON rc.request_DirOptionID=d.ID " - + "WHERE rq.r_token=? AND sc.statusCode<>?"; - - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - ArrayList list = new ArrayList(); - find.setString(1, strToken); - logWarnings(find.getWarnings()); - - find.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED)); - logWarnings(find.getWarnings()); - - log.debug("COPY CHUNK DAO: find method; " + find.toString()); - rs = find.executeQuery(); - logWarnings(find.getWarnings()); - - CopyChunkDataTO chunkDataTO; - while (rs.next()) { - chunkDataTO = new CopyChunkDataTO(); - chunkDataTO.setRequestToken(strToken); - chunkDataTO.setSpaceToken(rs.getString("rq.s_token")); - chunkDataTO.setFileStorageType(rs - .getString("rq.config_FileStorageTypeID")); - chunkDataTO.setOverwriteOption(rs.getString("rq.config_OverwriteID")); - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setLifeTime(rs.getInt("rq.fileLifetime")); - chunkDataTO.setPrimaryKey(rs.getLong("rc.ID")); - chunkDataTO.setFromSURL(rs.getString("rc.sourceSURL")); - chunkDataTO.setNormalizedSourceStFN(rs - .getString("rc.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rc.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSourceSurlUniqueID(new Integer(uniqueID)); - } - - chunkDataTO.setToSURL(rs.getString("rc.targetSURL")); - chunkDataTO.setNormalizedTargetStFN(rs - .getString("rc.normalized_sourceSURL_StFN")); - uniqueID = rs.getInt("rc.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setTargetSurlUniqueID(new Integer(uniqueID)); - } - - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("COPY CHUNK DAO: {}", e.getMessage(), e); - /* return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - - } - - public synchronized Collection find( - TRequestToken requestToken, int[] surlUniqueIDs, String[] surls) { - - if (!checkConnection()) { - log.error("COPY CHUNK DAO: find - unable to get a valid connection!"); - return new ArrayList(); - } - String strToken = requestToken.toString(); - String str = null; - PreparedStatement find = null; - ResultSet rs = null; - try { - /* get chunks of the request */ - str = "SELECT rq.s_token, rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.fileLifetime, rc.ID, rc.sourceSURL, rc.targetSURL, rc.normalized_sourceSURL_StFN, rc.sourceSURL_uniqueID, rc.normalized_targetSURL_StFN, rc.targetSURL_uniqueID, d.isSourceADirectory, d.allLevelRecursive, d.numOfLevels " - + "FROM request_queue rq JOIN (request_Copy rc, status_Copy sc) " - + "ON (rc.request_queueID=rq.ID AND sc.request_CopyID=rc.ID) " - + "LEFT JOIN request_DirOption d ON rc.request_DirOptionID=d.ID " - + "WHERE rq.r_token=? AND ( rc.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlUniqueIDs) - + " AND rc.sourceSURL IN " - + makeSurlString(surls) + " ) "; - - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - ArrayList list = new ArrayList(); - find.setString(1, strToken); - logWarnings(find.getWarnings()); - - log.debug("COPY CHUNK DAO: find method; {}", find.toString()); - rs = find.executeQuery(); - logWarnings(find.getWarnings()); - - CopyChunkDataTO chunkDataTO; - while (rs.next()) { - chunkDataTO = new CopyChunkDataTO(); - chunkDataTO.setRequestToken(strToken); - chunkDataTO.setSpaceToken(rs.getString("rq.s_token")); - chunkDataTO.setFileStorageType(rs - .getString("rq.config_FileStorageTypeID")); - chunkDataTO.setOverwriteOption(rs.getString("rq.config_OverwriteID")); - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setLifeTime(rs.getInt("rq.fileLifetime")); - chunkDataTO.setPrimaryKey(rs.getLong("rc.ID")); - chunkDataTO.setFromSURL(rs.getString("rc.sourceSURL")); - chunkDataTO.setNormalizedSourceStFN(rs - .getString("rc.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rc.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSourceSurlUniqueID(new Integer(uniqueID)); - } - - chunkDataTO.setToSURL(rs.getString("rc.targetSURL")); - chunkDataTO.setNormalizedTargetStFN(rs - .getString("rc.normalized_sourceSURL_StFN")); - uniqueID = rs.getInt("rc.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setTargetSurlUniqueID(new Integer(uniqueID)); - } - - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("COPY CHUNK DAO: {}", e.getMessage(), e); - /* return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - - } - - /** - * Method used in extraordinary situations to signal that data retrieved from - * the DB was malformed and could not be translated into the StoRM object - * model. - * - * This method attempts to change the status of the request to SRM_FAILURE and - * record it in the DB. - * - * This operation could potentially fail because the source of the malformed - * problems could be a problematic DB; indeed, initially only log messagges - * where recorded. - * - * Yet it soon became clear that the source of malformed data were the clients - * and/or FE recording info in the DB. In these circumstances the client would - * its request as being in the SRM_IN_PROGRESS state for ever. Hence the - * pressing need to inform it of the encountered problems. - */ - public synchronized void signalMalformedCopyChunk(CopyChunkDataTO auxTO) { - - if (!checkConnection()) { - log - .error("COPY CHUNK DAO: signalMalformedCopyChunk - unable to get a valid connection!"); - return; - } - String signalSQL = "UPDATE status_Copy SET statusCode=" - + StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FAILURE) - + ", explanation=? WHERE request_CopyID=" + auxTO.primaryKey(); - - PreparedStatement signal = null; - try { - /* update storm_put_filereq */ - signal = con.prepareStatement(signalSQL); - logWarnings(con.getWarnings()); - - /* Prepared statement spares DB-specific String notation! */ - signal.setString(1, "Request is malformed!"); - logWarnings(signal.getWarnings()); - - signal.executeUpdate(); - logWarnings(signal.getWarnings()); - } catch (SQLException e) { - log.error("CopyChunkDAO! Unable to signal in DB that the request was " - + "malformed! Request: {}; Error: {}", auxTO.toString(), - e.getMessage(), e); - } finally { - close(signal); - } - } - - /** - * Auxiliary method used to close a Statement - */ - private void close(Statement stmt) { - - if (stmt != null) { - try { - stmt.close(); - } catch (Exception e) { - log.error("COPY CHUNK DAO! Unable to close Statement {} - Error: {}", - stmt.toString(), e.getMessage(), e); - } - } - } - - /** - * Auxiliary method used to close a ResultSet - */ - private void close(ResultSet rset) { - - if (rset != null) { - try { - rset.close(); - } catch (Exception e) { - log.error("COPY CHUNK DAO! Unable to close ResultSet! Error: {}", - e.getMessage(), e); - } - } - } - - /** - * Auxiliary private method that logs all SQL warnings. - */ - private void logWarnings(SQLWarning w) { - - if (w != null) { - log.debug("COPY CHUNK DAO: {}", w.toString()); - while ((w = w.getNextWarning()) != null) { - log.debug("COPY CHUNK DAO: {}", w.toString()); - } - } - } - - /** - * Auxiliary method that sets up the conenction to the DB. - */ - private boolean setUpConnection() { - - boolean response = false; - try { - Class.forName(driver); - con = DriverManager.getConnection(url, name, password); - logWarnings(con.getWarnings()); - response = con.isValid(0); - } catch (SQLException | ClassNotFoundException e) { - log.error("COPY CHUNK DAO! Exception in setUpConnection! {}", e.getMessage(), e); - } - return response; - } - - /** - * Auxiliary method that checks if time for resetting the connection has come, - * and eventually takes it down and up back again. - */ - private synchronized boolean checkConnection() { - - boolean response = true; - if (reconnect) { - log.debug("COPY CHUNK DAO! Reconnecting to DB! "); - takeDownConnection(); - response = setUpConnection(); - if (response) { - reconnect = false; - } - } - return response; - } - - /** - * Auxiliary method that takes down a conenctin to the DB. - */ - private void takeDownConnection() { - - if (con != null) { - try { - con.close(); - } catch (SQLException e) { - log.error("COPY CHUNK DAO! Exception in takeDownConnection method: {}", - e.getMessage(), e); - } - } - } - - public synchronized void updateStatusOnMatchingStatus( - TRequestToken requestToken, TStatusCode expectedStatusCode, - TStatusCode newStatusCode, String explanation) { - - if (requestToken == null || requestToken.getValue().trim().isEmpty() - || explanation == null) { - throw new IllegalArgumentException( - "Unable to perform the updateStatusOnMatchingStatus, " - + "invalid arguments: requestToken=" + requestToken + " explanation=" - + explanation); - } - doUpdateStatusOnMatchingStatus(requestToken, null, null, - expectedStatusCode, newStatusCode, explanation, true, false, true); - } - - public synchronized void updateStatusOnMatchingStatus( - TRequestToken requestToken, int[] surlsUniqueIDs, String[] surls, - TStatusCode expectedStatusCode, TStatusCode newStatusCode) - throws IllegalArgumentException { - - if (requestToken == null || requestToken.getValue().trim().isEmpty() - || surlsUniqueIDs == null || surls == null || surlsUniqueIDs.length == 0 - || surls.length == 0 || surlsUniqueIDs.length != surls.length) { - throw new IllegalArgumentException( - "Unable to perform the updateStatusOnMatchingStatus, " - + "invalid arguments: requestToken=" + requestToken - + "surlsUniqueIDs=" + surlsUniqueIDs + " surls=" + surls); - } - doUpdateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls, - expectedStatusCode, newStatusCode, null, true, true, false); - } - - public synchronized void doUpdateStatusOnMatchingStatus( - TRequestToken requestToken, int[] surlUniqueIDs, String[] surls, - TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation, boolean withRequestToken, boolean withSurls, - boolean withExplanation) throws IllegalArgumentException { - - if ((withRequestToken && requestToken == null) - || (withExplanation && explanation == null) - || (withSurls && (surlUniqueIDs == null || surls == null))) { - throw new IllegalArgumentException( - "Unable to perform the doUpdateStatusOnMatchingStatus, " - + "invalid arguments: withRequestToken=" + withRequestToken - + " requestToken=" + requestToken + " withSurls=" + withSurls - + " surlUniqueIDs=" + surlUniqueIDs + " surls=" + surls - + " withExplaination=" + withExplanation + " explanation=" - + explanation); - } - if (!checkConnection()) { - log - .error("COPY CHUNK DAO: updateStatusOnMatchingStatus - unable to get a valid connection!"); - return; - } - String str = "UPDATE request_queue rq JOIN (status_Copy sc, request_Copy rc) " - + "ON (rq.ID=rc.request_queueID AND sc.request_CopyID=rc.ID) " - + "SET sc.statusCode=? "; - if (withExplanation) { - str += " , " + buildExpainationSet(explanation); - } - str += " WHERE sc.statusCode=? "; - if (withRequestToken) { - str += " AND " + buildTokenWhereClause(requestToken); - } - if (withSurls) { - str += " AND " + buildSurlsWhereClause(surlUniqueIDs, surls); - } - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - logWarnings(con.getWarnings()); - stmt.setInt(1, StatusCodeConverter.getInstance().toDB(newStatusCode)); - logWarnings(stmt.getWarnings()); - - stmt - .setInt(2, StatusCodeConverter.getInstance().toDB(expectedStatusCode)); - logWarnings(stmt.getWarnings()); - - log.trace("COPY CHUNK DAO - updateStatusOnMatchingStatus: {}", stmt.toString()); - int count = stmt.executeUpdate(); - logWarnings(stmt.getWarnings()); - if (count == 0) { - log.trace("COPY CHUNK DAO! No chunk of COPY request was updated " - + "from {} to {}.", expectedStatusCode, newStatusCode); - } else { - log.debug("COPY CHUNK DAO! {} chunks of COPY requests were updated " - + "from {} to {}.", count, expectedStatusCode, newStatusCode); - } - } catch (SQLException e) { - log.error("COPY CHUNK DAO! Unable to updated from {} to {}! {}", - expectedStatusCode, newStatusCode, e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Method that returns a String containing all Surl's IDs. - */ - private String makeSURLUniqueIDWhere(int[] surlUniqueIDs) { - - StringBuilder sb = new StringBuilder("("); - for (int i = 0; i < surlUniqueIDs.length; i++) { - if (i > 0) { - sb.append(","); - } - sb.append(surlUniqueIDs[i]); - } - sb.append(")"); - return sb.toString(); - } - - /** - * Method that returns a String containing all Surls. - */ - private String makeSurlString(String[] surls) { - - StringBuilder sb = new StringBuilder("("); - int n = surls.length; - for (int i = 0; i < n; i++) { - sb.append("'"); - sb.append(surls[i]); - sb.append("'"); - if (i < (n - 1)) { - sb.append(","); - } - } - sb.append(")"); - return sb.toString(); - } - - public synchronized Collection find(int[] surlsUniqueIDs, - String[] surlsArray, String dn) throws IllegalArgumentException { - - if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 - || surlsArray == null || surlsArray.length == 0 || dn == null) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray + " dn=" + dn); - } - return find(surlsUniqueIDs, surlsArray, dn, true); - } - - public synchronized Collection find(int[] surlsUniqueIDs, - String[] surlsArray) throws IllegalArgumentException { - - if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 - || surlsArray == null || surlsArray.length == 0) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray); - } - return find(surlsUniqueIDs, surlsArray, null, false); - } - - private synchronized Collection find(int[] surlsUniqueIDs, - String[] surlsArray, String dn, boolean withDn) - throws IllegalArgumentException { - - if ((withDn && dn == null) || surlsUniqueIDs == null - || surlsUniqueIDs.length == 0 || surlsArray == null - || surlsArray.length == 0) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray + " withDn=" + withDn + " dn=" + dn); - } - if (!checkConnection()) { - log.error("COPY CHUNK DAO: find - unable to get a valid connection!"); - return new ArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - try { - String str = "SELECT rq.r_token, rq.s_token, rq.config_FileStorageTypeID, rq.config_OverwriteID, " - + "rq.fileLifetime, rc.ID, rc.sourceSURL, rc.targetSURL, rc.normalized_sourceSURL_StFN, " - + "rc.sourceSURL_uniqueID, rc.normalized_targetSURL_StFN, rc.targetSURL_uniqueID, d.isSourceADirectory, " - + "d.allLevelRecursive, d.numOfLevels " - + "FROM request_queue rq JOIN (request_Copy rc, status_Copy sc) " - + "ON (rc.request_queueID=rq.ID AND sc.request_CopyID=rc.ID) " - + "LEFT JOIN request_DirOption d ON rc.request_DirOptionID=d.ID " - + "WHERE ( rc.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlsUniqueIDs) - + " AND rc.sourceSURL IN " - + makeSurlString(surlsArray) + " )"; - if (withDn) { - str += " AND rq.client_dn=\'" + dn + "\'"; - } - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - List list = new ArrayList(); - - log.trace("COPY CHUNK DAO - find method: {}", find.toString()); - rs = find.executeQuery(); - logWarnings(find.getWarnings()); - CopyChunkDataTO chunkDataTO = null; - while (rs.next()) { - chunkDataTO = new CopyChunkDataTO(); - chunkDataTO.setRequestToken(rs.getString("rq.r_token")); - chunkDataTO.setSpaceToken(rs.getString("rq.s_token")); - chunkDataTO.setFileStorageType(rs - .getString("rq.config_FileStorageTypeID")); - chunkDataTO.setOverwriteOption(rs.getString("rq.config_OverwriteID")); - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setLifeTime(rs.getInt("rq.fileLifetime")); - chunkDataTO.setPrimaryKey(rs.getLong("rc.ID")); - chunkDataTO.setFromSURL(rs.getString("rc.sourceSURL")); - chunkDataTO.setNormalizedSourceStFN(rs - .getString("rc.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rc.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSourceSurlUniqueID(new Integer(uniqueID)); - } - - chunkDataTO.setToSURL(rs.getString("rc.targetSURL")); - chunkDataTO.setNormalizedTargetStFN(rs - .getString("rc.normalized_sourceSURL_StFN")); - uniqueID = rs.getInt("rc.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setTargetSurlUniqueID(new Integer(uniqueID)); - } - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("COPY CHUNK DAO: {}", e.getMessage(), e); - /* return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - private String buildExpainationSet(String explanation) { - - return " sc.explanation='" + explanation + "' "; - } - - private String buildTokenWhereClause(TRequestToken requestToken) { - - return " rq.r_token='" + requestToken.toString() + "' "; - } - - private String buildSurlsWhereClause(int[] surlsUniqueIDs, String[] surls) { - - return " ( rc.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlsUniqueIDs) + " AND rc.sourceSURL IN " - + makeSurlString(surls) + " ) "; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/CopyChunkDataTO.java b/src/main/java/it/grid/storm/catalogs/CopyChunkDataTO.java deleted file mode 100644 index 41e197eb1..000000000 --- a/src/main/java/it/grid/storm/catalogs/CopyChunkDataTO.java +++ /dev/null @@ -1,290 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import java.sql.Timestamp; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TStatusCode; - -/** - * Class that represents a row in the Persistence Layer: this is all raw data - * referring to the CopyChunkData proper, that is, String and primitive types. - * - * Each field is initialized with default values as per SRM 2.2 specification: - * fileStorageType VOLATILE overwriteMode NEVER status SRM_REQUEST_QUEUED - * - * All other fields are 0 if int, or a white space if String. - * - * @author EGRID ICTP - * @version 2.0 - * @date Semptember 2005 - */ -public class CopyChunkDataTO { - - /* Database table request_Get fields BEGIN */ - private long primaryKey = -1; // ID primary key of record in DB - private String fromSURL = " "; - private String toSURL = " "; - private String normalizedSourceStFN = null; - private Integer sourceSurlUniqueID = null; - private String normalizedTargetStFN = null; - private Integer targetSurlUniqueID = null; - /* Database table request_Get fields END */ - - private String requestToken = " "; - private int lifetime = 0; - private String fileStorageType = null; // initialised in constructor - private String spaceToken = " "; - private String overwriteOption = null; // initialised in constructor - private int status; // initialised in constructor - private String errString = " "; - private Timestamp timeStamp = null; - - public CopyChunkDataTO() { - - fileStorageType = FileStorageTypeConverter.getInstance().toDB( - TFileStorageType.VOLATILE); - overwriteOption = OverwriteModeConverter.getInstance().toDB( - TOverwriteMode.NEVER); - status = StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_REQUEST_QUEUED); - } - - public long primaryKey() { - - return primaryKey; - } - - public void setPrimaryKey(long n) { - - primaryKey = n; - } - - public String requestToken() { - - return requestToken; - } - - public void setRequestToken(String s) { - - requestToken = s; - } - - public Timestamp timeStamp() { - - return timeStamp; - } - - public void setTimeStamp(Timestamp timeStamp) { - - this.timeStamp = timeStamp; - } - - public String fromSURL() { - - return fromSURL; - } - - public void setFromSURL(String s) { - - fromSURL = s; - } - - /** - * @return the normalizedStFN - */ - public String normalizedSourceStFN() { - - return normalizedSourceStFN; - } - - /** - * @param normalizedStFN - * the normalizedStFN to set - */ - public void setNormalizedSourceStFN(String normalizedStFN) { - - this.normalizedSourceStFN = normalizedStFN; - } - - /** - * @return the surlUniqueID - */ - public Integer sourceSurlUniqueID() { - - return sourceSurlUniqueID; - } - - /** - * @param surlUniqueID - * the surlUniqueID to set - */ - public void setSourceSurlUniqueID(Integer surlUniqueID) { - - this.sourceSurlUniqueID = surlUniqueID; - } - - /** - * @return the normalizedStFN - */ - public String normalizedTargetStFN() { - - return normalizedTargetStFN; - } - - /** - * @param normalizedStFN - * the normalizedStFN to set - */ - public void setNormalizedTargetStFN(String normalizedStFN) { - - this.normalizedTargetStFN = normalizedStFN; - } - - /** - * @return the surlUniqueID - */ - public Integer targetSurlUniqueID() { - - return targetSurlUniqueID; - } - - /** - * @param surlUniqueID - * the surlUniqueID to set - */ - public void setTargetSurlUniqueID(Integer surlUniqueID) { - - this.targetSurlUniqueID = surlUniqueID; - } - - public String toSURL() { - - return toSURL; - } - - public void setToSURL(String s) { - - toSURL = s; - } - - public int lifeTime() { - - return lifetime; - } - - public void setLifeTime(int n) { - - lifetime = n; - } - - public String fileStorageType() { - - return fileStorageType; - } - - /** - * Method used to set the FileStorageType: if s is null nothing gets set; the - * internal default String is the one relative to Volatile FileStorageType. - */ - public void setFileStorageType(String s) { - - if (s != null) - fileStorageType = s; - } - - public String spaceToken() { - - return spaceToken; - } - - public void setSpaceToken(String s) { - - spaceToken = s; - } - - public String overwriteOption() { - - return overwriteOption; - } - - /** - * Method used to set the OverwriteMode: if s is null nothing gets set; the - * internal default String is the one relative to Never OverwriteMode. - */ - public void setOverwriteOption(String s) { - - if (s != null) - overwriteOption = s; - } - - public int status() { - - return status; - } - - public void setStatus(int n) { - - status = n; - } - - public String errString() { - - return errString; - } - - public void setErrString(String s) { - - errString = s; - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append(primaryKey); - sb.append(" "); - sb.append(requestToken); - sb.append(" "); - sb.append(fromSURL); - sb.append(" "); - sb.append(normalizedSourceStFN); - sb.append(" "); - sb.append(sourceSurlUniqueID); - sb.append(" "); - sb.append(toSURL); - sb.append(" "); - sb.append(normalizedTargetStFN); - sb.append(" "); - sb.append(targetSurlUniqueID); - sb.append(" "); - sb.append(lifetime); - sb.append(" "); - sb.append(fileStorageType); - sb.append(" "); - sb.append(spaceToken); - sb.append(" "); - sb.append(overwriteOption); - sb.append(" "); - sb.append(status); - sb.append(" "); - sb.append(errString); - sb.append(" "); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/CopyData.java b/src/main/java/it/grid/storm/catalogs/CopyData.java deleted file mode 100644 index 5d1437fcb..000000000 --- a/src/main/java/it/grid/storm/catalogs/CopyData.java +++ /dev/null @@ -1,156 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSpaceToken; -import it.grid.storm.srm.types.TStatusCode; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class represents a CopyChunkData, that is part of a multifile Copy srm - * request. It contains data about: the requestToken, the fromSURL, the toSURL, - * the target fileLifeTime, the target fileStorageType and any available target - * spaceToken, the target overwriteOption to be applied in case the file already - * exists, the fileSize of the existing file if any, return status of the file - * together with its error string. - * - * @author EGRID - ICTP Trieste - * @date September, 2005 - * @version 2.0 - */ -public class CopyData extends SurlMultyOperationRequestData { - - private static final Logger log = LoggerFactory.getLogger(CopyData.class); - - /** - * SURL to which the srmCopy will put the file - */ - protected TSURL destinationSURL; - - /** - * requested lifetime - BEWARE!!! It is the fileLifetime at destination in - * case of Volatile files! - */ - protected TLifeTimeInSeconds lifetime; - - /** - * TFileStorageType at destination - */ - protected TFileStorageType fileStorageType; - - /** - * SpaceToken to use for toSURL - */ - protected TSpaceToken spaceToken; - - /** - * specifies the behaviour in case of existing files for Put part of the copy - * (could be local or remote!) - */ - protected TOverwriteMode overwriteOption; - - public CopyData(TSURL fromSURL, TSURL destinationSURL, - TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TOverwriteMode overwriteOption, TReturnStatus status) - throws InvalidCopyDataAttributesException, - InvalidSurlRequestDataAttributesException { - - super(fromSURL, status); - if (destinationSURL == null || lifetime == null || fileStorageType == null - || spaceToken == null || overwriteOption == null) { - throw new InvalidCopyDataAttributesException(fromSURL, destinationSURL, - lifetime, fileStorageType, spaceToken, overwriteOption, status); - } - this.destinationSURL = destinationSURL; - this.lifetime = lifetime; - this.fileStorageType = fileStorageType; - this.spaceToken = spaceToken; - this.overwriteOption = overwriteOption; - } - - /** - * Method that returns the toSURL of the srm request to which this chunk - * belongs. - */ - public TSURL getDestinationSURL() { - - return destinationSURL; - } - - /** - * Method that returns the requested pin life time for this chunk of the srm - * request. - */ - public TLifeTimeInSeconds getLifetime() { - - return lifetime; - } - - /** - * Method that returns the fileStorageType for this chunk of the srm request. - */ - public TFileStorageType getFileStorageType() { - - return fileStorageType; - } - - /** - * Method that returns the space token supplied for this chunk of the srm - * request. - */ - public TSpaceToken getSpaceToken() { - - return spaceToken; - } - - /** - * Method that returns the overwriteOption specified in the srm request. - */ - public TOverwriteMode getOverwriteOption() { - - return overwriteOption; - } - - /** - * Method that sets the status of this request to SRM_DUPLICATION_ERROR; it - * needs the explanation String which describes the situation in greater - * detail; if a null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_DUPLICATION_ERROR(String explanation) { - - setStatus(TStatusCode.SRM_DUPLICATION_ERROR, explanation); - } - - /** - * Method that sets the status of this request to SRM_FATAL_INTERNAL_ERROR; it - * needs the explanation String which describes the situation in greater - * detail; if a null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_FATAL_INTERNAL_ERROR(String explanation) { - - setStatus(TStatusCode.SRM_FATAL_INTERNAL_ERROR, explanation); - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/CopyGlobalFlagConverter.java b/src/main/java/it/grid/storm/catalogs/CopyGlobalFlagConverter.java deleted file mode 100644 index 6f08504f5..000000000 --- a/src/main/java/it/grid/storm/catalogs/CopyGlobalFlagConverter.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import java.util.Map; -import java.util.HashMap; -import java.util.Iterator; -import it.grid.storm.srm.types.TOverwriteMode; - -/** - * Package private auxiliary class used to convert between DPM and StoRM - * representation of Copy TOverwriteMode+RemoveSourceFiles global information - * for the whole request, and Flags in storm_req. - * - * @author: EGRID - ICTP Trieste - * @version: 1.0 - * @date: September 2005 - */ -class CopyGlobalFlagConverter { - - private Map DPMtoSTORM = new HashMap(); - private Map STORMtoDPM = new HashMap(); - - private static CopyGlobalFlagConverter c = new CopyGlobalFlagConverter(); - - /** - * Private constructor that fills in the conversion table; in particular, DPM - * uses int values to represent the pair of values: - * - * 0 NEVER + DO NOT RemoveSourceFiles 1 ALWAYS + DO NOT RemoveSourceFiles 2 - * WHENFILESAREDIFFERENT + DO NOT RemoveSourceFiles 4 NEVER + - * RemoveSourceFiles 5 ALWAYS + RemoveSourceFiles 6 WHENFILESAREDIFFERENT + - * RemoveSourceFiles - */ - private CopyGlobalFlagConverter() { - - DPMtoSTORM.put(new Integer(0), new Object[] { TOverwriteMode.NEVER, - new Boolean(false) }); - DPMtoSTORM.put(new Integer(1), new Object[] { TOverwriteMode.ALWAYS, - new Boolean(false) }); - DPMtoSTORM.put(new Integer(2), new Object[] { - TOverwriteMode.WHENFILESAREDIFFERENT, new Boolean(false) }); - DPMtoSTORM.put(new Integer(4), new Object[] { TOverwriteMode.NEVER, - new Boolean(true) }); - DPMtoSTORM.put(new Integer(5), new Object[] { TOverwriteMode.ALWAYS, - new Boolean(true) }); - DPMtoSTORM.put(new Integer(6), new Object[] { - TOverwriteMode.WHENFILESAREDIFFERENT, new Boolean(true) }); - Object aux; - for (Iterator i = DPMtoSTORM.keySet().iterator(); i.hasNext();) { - aux = i.next(); - STORMtoDPM.put(DPMtoSTORM.get(aux), aux); - } - } - - /** - * Method that returns the only instance of OverwriteModeConverter. - */ - public static CopyGlobalFlagConverter getInstance() { - - return c; - } - - /** - * Method that returns the int used by DPM to represent the given - * TOverwriteMode and removeSourceFiles boolean. -1 is returned if no match is - * found. - */ - public int toDPM(TOverwriteMode om, boolean removeSourceFiles) { - - Integer aux = (Integer) STORMtoDPM.get(new Object[] { om, - new Boolean(removeSourceFiles) }); - if (aux == null) - return -1; - return aux.intValue(); - } - - /** - * Method that returns an Object[] containing the TOverwriteMode and the - * boolean used by StoRM to represent the supplied int representation of DPM. - * An empty Object[] is returned if no StoRM type is found. - */ - public Object[] toSTORM(int n) { - - Object[] aux = (Object[]) DPMtoSTORM.get(new Integer(n)); - if (aux == null) - return new Object[] {}; - return aux; - } - - public String toString() { - - return "OverWriteModeConverter.\nDPMtoSTORM map:" + DPMtoSTORM - + "\nSTORMtoDPM map:" + STORMtoDPM; - } -} diff --git a/src/main/java/it/grid/storm/catalogs/CopyPersistentChunkData.java b/src/main/java/it/grid/storm/catalogs/CopyPersistentChunkData.java deleted file mode 100644 index c9c1185aa..000000000 --- a/src/main/java/it/grid/storm/catalogs/CopyPersistentChunkData.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSpaceToken; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class represents a CopyChunkData, that is part of a multifile Copy srm - * request. It contains data about: the requestToken, the fromSURL, the toSURL, - * the target fileLifeTime, the target fileStorageType and any available target - * spaceToken, the target overwriteOption to be applied in case the file already - * exists, the fileSize of the existing file if any, return status of the file - * together with its error string. - * - * @author EGRID - ICTP Trieste - * @date September, 2005 - * @version 2.0 - */ -public class CopyPersistentChunkData extends CopyData implements - PersistentChunkData { - - private static final Logger log = LoggerFactory - .getLogger(CopyPersistentChunkData.class); - - /** - * long representing the primary key for the persistence layer! - */ - private long primaryKey = -1; - - /** - * This is the requestToken of the multifile srm request to which this chunk - * belongs - */ - private TRequestToken requestToken; - - public CopyPersistentChunkData(TRequestToken requestToken, TSURL fromSURL, - TSURL destinationSURL, TLifeTimeInSeconds lifetime, - TFileStorageType fileStorageType, TSpaceToken spaceToken, - TOverwriteMode overwriteOption, TReturnStatus status) - throws InvalidCopyPersistentChunkDataAttributesException, - InvalidCopyDataAttributesException, - InvalidSurlRequestDataAttributesException { - - super(fromSURL, destinationSURL, lifetime, fileStorageType, spaceToken, - overwriteOption, status); - if (requestToken == null) { - log.debug("CopyPersistentChunkData: requestToken is null!"); - throw new InvalidCopyPersistentChunkDataAttributesException(requestToken, - fromSURL, destinationSURL, lifetime, fileStorageType, spaceToken, - overwriteOption, status); - } - this.requestToken = requestToken; - } - - /** - * Method used to get the primary key used in the persistence layer! - */ - public long getPrimaryKey() { - - return primaryKey; - } - - /** - * Method used to set the primary key to be used in the persistence layer! - */ - public void setPrimaryKey(long l) { - - primaryKey = l; - } - - /** - * Method that returns the requestToken of the srm request to which this chunk - * belongs. - */ - public TRequestToken getRequestToken() { - - return requestToken; - } - - @Override - public long getIdentifier() { - - return getPrimaryKey(); - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/CopySpecificFlagConverter.java b/src/main/java/it/grid/storm/catalogs/CopySpecificFlagConverter.java deleted file mode 100644 index b83a7daa7..000000000 --- a/src/main/java/it/grid/storm/catalogs/CopySpecificFlagConverter.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import java.util.Map; -import java.util.HashMap; -import java.util.Iterator; -import it.grid.storm.srm.types.TOverwriteMode; - -/** - * Package private auxiliary class used to convert between DPM and StoRM - * representation of Copy TOverwriteMode+TDirOption request specific - * information, and Flags in storm_copy_filereq. - * - * @author: EGRID - ICTP Trieste - * @version: 1.0 - * @date: September 2005 - */ -class CopySpecificFlagConverter { - - private Map DPMtoSTORM = new HashMap(); - private Map STORMtoDPM = new HashMap(); - - private static CopySpecificFlagConverter c = new CopySpecificFlagConverter(); - - /** - * Private constructor that fills in the conversion table; in particular, DPM - * uses int values to represent the pair of values: - * - * 0 NEVER + source NOT directory 1 ALWAYS + source NOT directory 2 - * WHENFILESAREDIFFERENT + source NOT directory 4 NEVER + source is directory - * 5 ALWAYS + source is directory 6 WHENFILESAREDIFFERENT + source is - * directory - */ - private CopySpecificFlagConverter() { - - DPMtoSTORM.put(new Integer(0), new Object[] { TOverwriteMode.NEVER, - new Boolean(false) }); - DPMtoSTORM.put(new Integer(1), new Object[] { TOverwriteMode.ALWAYS, - new Boolean(false) }); - DPMtoSTORM.put(new Integer(2), new Object[] { - TOverwriteMode.WHENFILESAREDIFFERENT, new Boolean(false) }); - DPMtoSTORM.put(new Integer(4), new Object[] { TOverwriteMode.NEVER, - new Boolean(true) }); - DPMtoSTORM.put(new Integer(5), new Object[] { TOverwriteMode.ALWAYS, - new Boolean(true) }); - DPMtoSTORM.put(new Integer(6), new Object[] { - TOverwriteMode.WHENFILESAREDIFFERENT, new Boolean(true) }); - Object aux; - for (Iterator i = DPMtoSTORM.keySet().iterator(); i.hasNext();) { - aux = i.next(); - STORMtoDPM.put(DPMtoSTORM.get(aux), aux); - } - } - - /** - * Method that returns the only instance of CopySpecificFlagConverter. - */ - public static CopySpecificFlagConverter getInstance() { - - return c; - } - - /** - * Method that returns the int used by DPM to represent the given - * TOverwriteMode and isSourceADirectory boolean. -1 is returned if no match - * is found. - */ - public int toDPM(TOverwriteMode om, boolean isSourceADirectory) { - - Integer aux = (Integer) STORMtoDPM.get(new Object[] { om, - new Boolean(isSourceADirectory) }); - if (aux == null) - return -1; - return aux.intValue(); - } - - /** - * Method that returns an Object[] containing the TOverwriteMode and the - * Boolean used by StoRM to represent the supplied int representation of DPM. - * An empty Object[] is returned if no StoRM type is found. - */ - public Object[] toSTORM(int n) { - - Object[] aux = (Object[]) DPMtoSTORM.get(new Integer(n)); - if (aux == null) - return new Object[] {}; - return aux; - } - - public String toString() { - - return "OverWriteModeConverter.\nDPMtoSTORM map:" + DPMtoSTORM - + "\nSTORMtoDPM map:" + STORMtoDPM; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/DirOptionConverter.java b/src/main/java/it/grid/storm/catalogs/DirOptionConverter.java deleted file mode 100644 index 55391d4d7..000000000 --- a/src/main/java/it/grid/storm/catalogs/DirOptionConverter.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -/** - * Package private class that translates between DPM flag for TDirOption and - * StoRM TDirOption proper. - * - * In particular DPM uses the int 1 to denote a recursive call, yet it fails to - * distinguish between a chosen recursion level; in other words there is no way - * that DPM specifies the number of levels to recurse: so either you recurse - * till the end or nothing. - * - * @author EGRID - ICTP Trieste - * @version 1.0 - * @date August, 2005 - */ -class DirOptionConverter { - - static private DirOptionConverter converter = null; - - private DirOptionConverter() { - - } - - static public DirOptionConverter getInstance() { - - if (converter == null) - converter = new DirOptionConverter(); - return converter; - } - - /** - * Method that translates the int used by DPM as flag for TDirOption, into a - * boolean for isDirOption. - * - * 1 causes true to be returned; any other value returns 0. - */ - public boolean toSTORM(int n) { - - return (n == 1); - } - - /** - * Method used to translate the boolean isDirOption into an int used by DPM to - * express the same thing. - * - * true gets translated into 1; false into 0. - */ - public int toDPM(boolean isDirOption) { - - if (isDirOption) - return 1; - return 0; - } -} diff --git a/src/main/java/it/grid/storm/catalogs/FileLifetimeConverter.java b/src/main/java/it/grid/storm/catalogs/FileLifetimeConverter.java deleted file mode 100644 index d84d199cf..000000000 --- a/src/main/java/it/grid/storm/catalogs/FileLifetimeConverter.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.config.Configuration; - -/** - * Class that handles DB representation of a pinLifetime as expressed by a - * TLifetimeInSeconds objects; in particular it takes care of protocol - * specification: - * - * 0/null/negative are translated as default StoRM configurable values. StoRMs - * Empty TLifeTimeInSeconds is translated as 0. - * - * @author EGRID ICTP - * @version 1.0 - * @date March 2007 - */ -public class FileLifetimeConverter { - - private static FileLifetimeConverter stc = new FileLifetimeConverter(); // only - // instance - - private FileLifetimeConverter() { - - } - - /** - * Method that returns the only instance of SizeInBytesIntConverter - */ - public static FileLifetimeConverter getInstance() { - - return stc; - } - - /** - * Method that translates the Empty TLifeTimeInSeconds into the empty - * representation of DB which is 0. Any other value is left as is. - */ - public int toDB(long l) { - - if (l == TLifeTimeInSeconds.makeEmpty().value()) - return 0; - return new Long(l).intValue(); - } - - /** - * Method that returns the long corresponding to the int value in the DB, - * except if it is 0, NULL or negative; a configurable default value is - * returned instead, corresponding to the getFileLifetimeDefault() - * Configuration class method. - */ - public long toStoRM(int s) { - - if (s <= 0) - return Configuration.getInstance().getFileLifetimeDefault(); - return new Integer(s).longValue(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/FileStorageTypeConverter.java b/src/main/java/it/grid/storm/catalogs/FileStorageTypeConverter.java deleted file mode 100644 index 2e27fdb1a..000000000 --- a/src/main/java/it/grid/storm/catalogs/FileStorageTypeConverter.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import java.util.Map; -import java.util.HashMap; -import java.util.Iterator; -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.config.Configuration; - -/** - * Package private auxiliary class used to convert between DB raw data and StoRM - * object model representation of TFileStorageType. - * - * @author: EGRID ICTP - * @version: 2.0 - * @date: June 2005 - */ -class FileStorageTypeConverter { - - private Map DBtoSTORM = new HashMap(); - private Map STORMtoDB = new HashMap(); - - private static FileStorageTypeConverter c = new FileStorageTypeConverter(); - - /** - * Private constructor that fills in the conversion tables; - * - * V - VOLATILE P - PERMANENT D - DURABLE - */ - private FileStorageTypeConverter() { - - DBtoSTORM.put("V", TFileStorageType.VOLATILE); - DBtoSTORM.put("P", TFileStorageType.PERMANENT); - DBtoSTORM.put("D", TFileStorageType.DURABLE); - String aux; - for (Iterator i = DBtoSTORM.keySet().iterator(); i.hasNext();) { - aux = i.next(); - STORMtoDB.put(DBtoSTORM.get(aux), aux); - } - } - - /** - * Method that returns the only instance of FileStorageTypeConverter. - */ - public static FileStorageTypeConverter getInstance() { - - return c; - } - - /** - * Method that returns the String used in the DB to represent the given - * TFileStorageType. The empty String "" is returned if no match is found. - */ - public String toDB(TFileStorageType fst) { - - String aux = (String) STORMtoDB.get(fst); - if (aux == null) - return ""; - return aux; - } - - /** - * Method that returns the TFileStorageType used by StoRM to represent the - * supplied String representation in the DB. A configured default - * TFileStorageType is returned in case no corresponding StoRM type is found. - * TFileStorageType.EMPTY is returned if there are configuration errors. - */ - public TFileStorageType toSTORM(String s) { - - TFileStorageType aux = DBtoSTORM.get(s); - if (aux == null) - // This case is that the String s is different from V,P or D. - aux = DBtoSTORM.get(Configuration.getInstance() - .getDefaultFileStorageType()); - if (aux == null) - // This case should never happen, but in case we prefer ponder PERMANENT. - return TFileStorageType.EMPTY; - else - return aux; - } - - public String toString() { - - return "FileStorageTypeConverter.\nDBtoSTORM map:" + DBtoSTORM - + "\nSTORMtoDB map:" + STORMtoDB; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/FileTransferData.java b/src/main/java/it/grid/storm/catalogs/FileTransferData.java deleted file mode 100644 index 505b7cba0..000000000 --- a/src/main/java/it/grid/storm/catalogs/FileTransferData.java +++ /dev/null @@ -1,25 +0,0 @@ -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.srm.types.TTURL; - -public interface FileTransferData extends SynchMultyOperationRequestData { - - /** - * Method that returns a TURLPrefix containing the transfer protocols desired - * for this chunk of the srm request. - */ - public TURLPrefix getTransferProtocols(); - - /** - * Method that returns the TURL for this chunk of the srm request. - */ - public TTURL getTransferURL(); - - /** - * Method used to set the transferURL associated to the SURL of this chunk. If - * TTURL is null, then nothing gets set! - */ - public void setTransferURL(final TTURL turl); - -} diff --git a/src/main/java/it/grid/storm/catalogs/IdentityPtGData.java b/src/main/java/it/grid/storm/catalogs/IdentityPtGData.java deleted file mode 100644 index 7a06f1db9..000000000 --- a/src/main/java/it/grid/storm/catalogs/IdentityPtGData.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.griduser.GridUserInterface; -import it.grid.storm.srm.types.TDirOption; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TTURL; -import it.grid.storm.synchcall.data.IdentityInputData; - -public class IdentityPtGData extends AnonymousPtGData implements - IdentityInputData { - - private final GridUserInterface auth; - - /** - * @param requestToken - * @param fromSURL - * @param lifeTime - * @param dirOption - * @param desiredProtocols - * @param fileSize - * @param status - * @param transferURL - * @throws InvalidPtGDataAttributesException - */ - public IdentityPtGData(GridUserInterface auth, TSURL SURL, - TLifeTimeInSeconds lifeTime, TDirOption dirOption, - TURLPrefix desiredProtocols, TSizeInBytes fileSize, TReturnStatus status, - TTURL transferURL) throws InvalidPtGDataAttributesException, - InvalidFileTransferDataAttributesException, - InvalidSurlRequestDataAttributesException, IllegalArgumentException { - - super(SURL, lifeTime, dirOption, desiredProtocols, fileSize, status, - transferURL); - if (auth == null) { - throw new IllegalArgumentException( - "Unable to create the object, invalid arguments: auth=" + auth); - } - this.auth = auth; - } - - @Override - public GridUserInterface getUser() { - - return auth; - } - - @Override - public String getPrincipal() { - - return this.auth.getDn(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/IdentityPtPData.java b/src/main/java/it/grid/storm/catalogs/IdentityPtPData.java deleted file mode 100644 index af35bc9bb..000000000 --- a/src/main/java/it/grid/storm/catalogs/IdentityPtPData.java +++ /dev/null @@ -1,68 +0,0 @@ -/** - * - */ -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.griduser.GridUserInterface; -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TSpaceToken; -import it.grid.storm.srm.types.TTURL; -import it.grid.storm.synchcall.data.IdentityInputData; - -/** - * @author Michele Dibenedetto - * - */ -public class IdentityPtPData extends AnonymousPtPData implements - IdentityInputData { - - private final GridUserInterface auth; - - /** - * @param requestToken - * @param fromSURL - * @param lifeTime - * @param dirOption - * @param desiredProtocols - * @param fileSize - * @param status - * @param transferURL - * @throws InvalidPtGDataAttributesException - */ - public IdentityPtPData(GridUserInterface auth, TSURL SURL, - TLifeTimeInSeconds pinLifetime, TLifeTimeInSeconds fileLifetime, - TFileStorageType fileStorageType, TSpaceToken spaceToken, - TSizeInBytes expectedFileSize, TURLPrefix transferProtocols, - TOverwriteMode overwriteOption, TReturnStatus status, TTURL transferURL) - throws InvalidPtPDataAttributesException, - InvalidFileTransferDataAttributesException, - InvalidSurlRequestDataAttributesException, IllegalArgumentException { - - super(SURL, pinLifetime, fileLifetime, fileStorageType, spaceToken, - expectedFileSize, transferProtocols, overwriteOption, status, transferURL); - if (auth == null) { - throw new IllegalArgumentException( - "Unable to create the object, invalid arguments: auth=" + auth); - } - this.auth = auth; - } - - @Override - public GridUserInterface getUser() { - - return auth; - } - - @Override - public String getPrincipal() { - - return this.auth.getDn(); - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidBoLChunkDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidBoLChunkDataAttributesException.java deleted file mode 100644 index 5e782876e..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidBoLChunkDataAttributesException.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TDirOption; -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TTURL; - -/** - * This class represents an exceptin thrown when the attributes supplied to the - * constructor of BoLChunkData are invalid, that is if any of the following is - * _null_: requestToken, fromSURL, lifeTime, numOfLevels, transferProtocols, - * fileSize, status, transferURL. - * - * @author CNAF - * @date Aug 2009 - * @version 1.0 - */ -public class InvalidBoLChunkDataAttributesException extends Exception { - - private static final long serialVersionUID = 5657310881067434280L; - - // booleans that indicate whether the corresponding variable is null - private boolean nullRequestToken; - private boolean nullFromSURL; - private boolean nullLifeTime; - private boolean nullDirOption; - private boolean nullTransferProtocols; - private boolean nullFileSize; - private boolean nullStatus; - private boolean nullTransferURL; - - /** - * Constructor that requires the attributes that caused the exception to be - * thrown. - */ - public InvalidBoLChunkDataAttributesException(TRequestToken requestToken, - TSURL fromSURL, TLifeTimeInSeconds lifeTime, TDirOption dirOption, - TURLPrefix transferProtocols, TSizeInBytes fileSize, TReturnStatus status, - TTURL transferURL) { - - nullRequestToken = requestToken == null; - nullFromSURL = fromSURL == null; - nullLifeTime = lifeTime == null; - nullDirOption = dirOption == null; - nullTransferProtocols = transferProtocols == null; - nullFileSize = fileSize == null; - nullStatus = status == null; - nullTransferURL = transferURL == null; - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("Invalid BoLChunkData attributes: null-requestToken="); - sb.append(nullRequestToken); - sb.append("; nul-fromSURL="); - sb.append(nullFromSURL); - sb.append("; null-lifeTime="); - sb.append(nullLifeTime); - sb.append("; null-dirOption="); - sb.append(nullDirOption); - sb.append("; null-transferProtocols="); - sb.append(nullTransferProtocols); - sb.append("; null-fileSize="); - sb.append(nullFileSize); - sb.append("; null-status="); - sb.append(nullStatus); - sb.append("; null-transferURL="); - sb.append(nullTransferURL); - sb.append("."); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidBoLDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidBoLDataAttributesException.java deleted file mode 100644 index 41a9a9afc..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidBoLDataAttributesException.java +++ /dev/null @@ -1,94 +0,0 @@ -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.srm.types.TDirOption; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TTURL; - -/** - * @author Michele Dibenedetto - * - */ -public class InvalidBoLDataAttributesException extends - InvalidFileTransferDataAttributesException { - - private static final long serialVersionUID = 8113403994527678088L; - // booleans that indicate whether the corresponding variable is null - protected boolean nullLifeTime; - protected boolean nullDirOption; - protected boolean nullFileSize; - - public InvalidBoLDataAttributesException(TSURL fromSURL, - TLifeTimeInSeconds lifeTime, TDirOption dirOption, - TURLPrefix transferProtocols, TSizeInBytes fileSize, TReturnStatus status, - TTURL transferURL) { - - super(fromSURL, transferProtocols, status, transferURL); - init(lifeTime, dirOption, fileSize); - } - - public InvalidBoLDataAttributesException(TSURL fromSURL, - TLifeTimeInSeconds lifeTime, TDirOption dirOption, - TURLPrefix transferProtocols, TSizeInBytes fileSize, TReturnStatus status, - TTURL transferURL, String message) { - - super(fromSURL, transferProtocols, status, transferURL, message); - init(lifeTime, dirOption, fileSize); - } - - public InvalidBoLDataAttributesException(TSURL fromSURL, - TLifeTimeInSeconds lifeTime, TDirOption dirOption, - TURLPrefix transferProtocols, TSizeInBytes fileSize, TReturnStatus status, - TTURL transferURL, Throwable cause) { - - super(fromSURL, transferProtocols, status, transferURL, cause); - init(lifeTime, dirOption, fileSize); - } - - public InvalidBoLDataAttributesException(TSURL fromSURL, - TLifeTimeInSeconds lifeTime, TDirOption dirOption, - TURLPrefix transferProtocols, TSizeInBytes fileSize, TReturnStatus status, - TTURL transferURL, String message, Throwable cause) { - - super(fromSURL, transferProtocols, status, transferURL, message, cause); - init(lifeTime, dirOption, fileSize); - } - - private void init(TLifeTimeInSeconds lifeTime, TDirOption dirOption, - TSizeInBytes fileSize) { - - nullLifeTime = lifeTime == null; - nullDirOption = dirOption == null; - nullFileSize = fileSize == null; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder.append("InvalidBoLDataAttributesException [nullLifeTime="); - builder.append(nullLifeTime); - builder.append(", nullDirOption="); - builder.append(nullDirOption); - builder.append(", nullFileSize="); - builder.append(nullFileSize); - builder.append(", nullSURL="); - builder.append(nullSURL); - builder.append(", nullTransferProtocols="); - builder.append(nullTransferProtocols); - builder.append(", nullStatus="); - builder.append(nullStatus); - builder.append(", nullTransferURL="); - builder.append(nullTransferURL); - builder.append("]"); - return builder.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidBoLPersistentChunkDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidBoLPersistentChunkDataAttributesException.java deleted file mode 100644 index a47e5433c..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidBoLPersistentChunkDataAttributesException.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS - * OF ANY KIND, either express or implied. See the License for the specific - * language governing permissions and limitations under the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TDirOption; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TTURL; - -/** - * This class represents an exceptin thrown when the attributes supplied to the - * constructor of PtPChunkData are invalid, that is if any of the following is - * _null_: requestToken, toSURL, lifetime, fileStorageType, spaceToken, - * knownSizeOfThisFile, TURLPrefix transferProtocols, overwriteOption, fileSize, - * status, transferURL. - * - * @author EGRID - ICTP Trieste - * @date June, 2005 - * @version 2.0 - */ -public class InvalidBoLPersistentChunkDataAttributesException extends - InvalidBoLDataAttributesException { - - private static final long serialVersionUID = -5117535717125685975L; - /** - * booleans that indicate whether the corresponding variable is null - */ - boolean nullRequestToken; - - /** - * Constructor that requires the attributes that caused the exception to be - * thrown. - */ - public InvalidBoLPersistentChunkDataAttributesException( - TRequestToken requestToken, TSURL fromSURL, TLifeTimeInSeconds lifeTime, - TDirOption dirOption, TURLPrefix desiredProtocols, TSizeInBytes fileSize, - TReturnStatus status, TTURL transferURL) { - - super(fromSURL, lifeTime, dirOption, desiredProtocols, fileSize, status, - transferURL); - nullRequestToken = requestToken == null; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder - .append("InvalidBoLPersistentChunkDataAttributesException [nullRequestToken="); - builder.append(nullRequestToken); - builder.append(", nullLifeTime="); - builder.append(nullLifeTime); - builder.append(", nullDirOption="); - builder.append(nullDirOption); - builder.append(", nullFileSize="); - builder.append(nullFileSize); - builder.append(", nullSURL="); - builder.append(nullSURL); - builder.append(", nullTransferProtocols="); - builder.append(nullTransferProtocols); - builder.append(", nullStatus="); - builder.append(nullStatus); - builder.append(", nullTransferURL="); - builder.append(nullTransferURL); - builder.append("]"); - return builder.toString(); - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidCopyChunkDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidCopyChunkDataAttributesException.java deleted file mode 100644 index 01363fd8e..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidCopyChunkDataAttributesException.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TSpaceToken; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TReturnStatus; - -/** - * This class represents an exceptin thrown when the attributes supplied to the - * constructor of CopyChunkData are invalid, that is if any of the following is - * _null_: requestToken, fromsURL, toSURL, lifetime, fileStorageType, - * spaceToken, overwriteOption, status. - * - * @author EGRID - ICTP Trieste - * @date September, 2005 - * @version 2.0 - */ -public class InvalidCopyChunkDataAttributesException extends Exception { - - private static final long serialVersionUID = 6786154038995023512L; - - // booleans that indicate whether the corresponding variable is null - private boolean nullRequestToken; - private boolean nullFromSURL; - private boolean nullToSURL; - private boolean nullLifetime; - private boolean nullFileStorageType; - private boolean nullSpaceToken; - private boolean nullOverwriteOption; - private boolean nullStatus; - - /** - * Constructor that requires the attributes that caused the exception to be - * thrown. - */ - public InvalidCopyChunkDataAttributesException(TRequestToken requestToken, - TSURL fromSURL, TSURL toSURL, TLifeTimeInSeconds lifetime, - TFileStorageType fileStorageType, TSpaceToken spaceToken, - TOverwriteMode overwriteOption, TReturnStatus status) { - - nullRequestToken = requestToken == null; - nullFromSURL = fromSURL == null; - nullToSURL = toSURL == null; - nullLifetime = lifetime == null; - nullFileStorageType = fileStorageType == null; - nullSpaceToken = spaceToken == null; - nullOverwriteOption = overwriteOption == null; - nullStatus = status == null; - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("Invalid CopyChunkData attributes: null-requestToken="); - sb.append(nullRequestToken); - sb.append("; null-fromSURL="); - sb.append(nullFromSURL); - sb.append("; null-toSURL="); - sb.append(nullToSURL); - sb.append("; null-lifetime="); - sb.append(nullLifetime); - sb.append("; null-filestorageType="); - sb.append(nullFileStorageType); - sb.append("; null-spaceToken="); - sb.append(nullSpaceToken); - sb.append("; null-overwriteOption="); - sb.append(nullOverwriteOption); - sb.append("; null-status="); - sb.append(nullStatus); - sb.append("."); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidCopyDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidCopyDataAttributesException.java deleted file mode 100644 index 8af415056..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidCopyDataAttributesException.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSpaceToken; - -/** - * @author Michele Dibenedetto - * - */ -public class InvalidCopyDataAttributesException extends - InvalidSurlRequestDataAttributesException { - - private static final long serialVersionUID = -1217486426437414490L; - protected boolean nullDestinationSURL; - protected boolean nullLifetime; - protected boolean nullFileStorageType; - protected boolean nullSpaceToken; - protected boolean nullOverwriteOption; - - public InvalidCopyDataAttributesException(TSURL SURL, TSURL destinationSURL, - TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TOverwriteMode overwriteOption, TReturnStatus status) { - - super(SURL, status); - init(destinationSURL, lifetime, fileStorageType, spaceToken, - overwriteOption); - } - - public InvalidCopyDataAttributesException(TSURL SURL, TSURL destinationSURL, - TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TOverwriteMode overwriteOption, - TReturnStatus status, String message) { - - super(SURL, status, message); - init(destinationSURL, lifetime, fileStorageType, spaceToken, - overwriteOption); - } - - public InvalidCopyDataAttributesException(TSURL SURL, TSURL destinationSURL, - TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TOverwriteMode overwriteOption, - TReturnStatus status, Throwable cause) { - - super(SURL, status, cause); - init(destinationSURL, lifetime, fileStorageType, spaceToken, - overwriteOption); - } - - public InvalidCopyDataAttributesException(TSURL SURL, TSURL destinationSURL, - TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TOverwriteMode overwriteOption, - TReturnStatus status, String message, Throwable cause) { - - super(SURL, status, message, cause); - init(destinationSURL, lifetime, fileStorageType, spaceToken, - overwriteOption); - } - - private void init(TSURL destinationSURL, TLifeTimeInSeconds lifetime, - TFileStorageType fileStorageType, TSpaceToken spaceToken, - TOverwriteMode overwriteOption) { - - nullDestinationSURL = destinationSURL == null; - nullLifetime = lifetime == null; - nullFileStorageType = fileStorageType == null; - nullSpaceToken = spaceToken == null; - nullOverwriteOption = overwriteOption == null; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder.append("InvalidCopyDataAttributesException [nullDestinationSURL="); - builder.append(nullDestinationSURL); - builder.append(", nullLifetime="); - builder.append(nullLifetime); - builder.append(", nullFileStorageType="); - builder.append(nullFileStorageType); - builder.append(", nullSpaceToken="); - builder.append(nullSpaceToken); - builder.append(", nullOverwriteOption="); - builder.append(nullOverwriteOption); - builder.append(", nullSURL="); - builder.append(nullSURL); - builder.append(", nullStatus="); - builder.append(nullStatus); - builder.append("]"); - return builder.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidCopyPersistentChunkDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidCopyPersistentChunkDataAttributesException.java deleted file mode 100644 index 77cdb8dcd..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidCopyPersistentChunkDataAttributesException.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS - * OF ANY KIND, either express or implied. See the License for the specific - * language governing permissions and limitations under the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSpaceToken; - -/** - * @author Michele Dibenedetto - * - */ -public class InvalidCopyPersistentChunkDataAttributesException extends - InvalidCopyDataAttributesException { - - /** - * - */ - private static final long serialVersionUID = 1266996505954208061L; - private boolean nullRequestToken; - - public InvalidCopyPersistentChunkDataAttributesException( - TRequestToken requestToken, TSURL SURL, TSURL destinationSURL, - TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TOverwriteMode overwriteOption, TReturnStatus status) { - - super(SURL, destinationSURL, lifetime, fileStorageType, spaceToken, - overwriteOption, status); - init(requestToken); - } - - public InvalidCopyPersistentChunkDataAttributesException( - TRequestToken requestToken, TSURL SURL, TSURL destinationSURL, - TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TOverwriteMode overwriteOption, - TReturnStatus status, String message) { - - super(SURL, destinationSURL, lifetime, fileStorageType, spaceToken, - overwriteOption, status, message); - init(requestToken); - } - - public InvalidCopyPersistentChunkDataAttributesException( - TRequestToken requestToken, TSURL SURL, TSURL destinationSURL, - TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TOverwriteMode overwriteOption, - TReturnStatus status, Throwable cause) { - - super(SURL, destinationSURL, lifetime, fileStorageType, spaceToken, - overwriteOption, status, cause); - init(requestToken); - } - - public InvalidCopyPersistentChunkDataAttributesException( - TRequestToken requestToken, TSURL SURL, TSURL destinationSURL, - TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TOverwriteMode overwriteOption, - TReturnStatus status, String message, Throwable cause) { - - super(SURL, destinationSURL, lifetime, fileStorageType, spaceToken, - overwriteOption, status, message, cause); - init(requestToken); - } - - private void init(TRequestToken requestToken) { - - nullRequestToken = requestToken == null; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder - .append("InvalidCopyPersistentChunkDataAttributesException [nullRequestToken="); - builder.append(nullRequestToken); - builder.append(", nullDestinationSURL="); - builder.append(nullDestinationSURL); - builder.append(", nullLifetime="); - builder.append(nullLifetime); - builder.append(", nullFileStorageType="); - builder.append(nullFileStorageType); - builder.append(", nullSpaceToken="); - builder.append(nullSpaceToken); - builder.append(", nullOverwriteOption="); - builder.append(nullOverwriteOption); - builder.append(", nullSURL="); - builder.append(nullSURL); - builder.append(", nullStatus="); - builder.append(nullStatus); - builder.append("]"); - return builder.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidFileTransferDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidFileTransferDataAttributesException.java deleted file mode 100644 index fc28c0743..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidFileTransferDataAttributesException.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS - * OF ANY KIND, either express or implied. See the License for the specific - * language governing permissions and limitations under the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TTURL; - -/** - * @author Michele Dibenedetto - */ -public class InvalidFileTransferDataAttributesException extends - InvalidSurlRequestDataAttributesException { - - private static final long serialVersionUID = 4416318501544415810L; - protected boolean nullTransferProtocols; - protected boolean nullTransferURL; - - public InvalidFileTransferDataAttributesException(TSURL SURL, - TURLPrefix transferProtocols, TReturnStatus status, TTURL transferURL) { - - super(SURL, status); - init(transferProtocols, transferURL); - } - - public InvalidFileTransferDataAttributesException(TSURL SURL, - TURLPrefix transferProtocols, TReturnStatus status, TTURL transferURL, - String message) { - - super(SURL, status, message); - init(transferProtocols, transferURL); - } - - public InvalidFileTransferDataAttributesException(TSURL SURL, - TURLPrefix transferProtocols, TReturnStatus status, TTURL transferURL, - Throwable cause) { - - super(SURL, status, cause); - init(transferProtocols, transferURL); - } - - public InvalidFileTransferDataAttributesException(TSURL SURL, - TURLPrefix transferProtocols, TReturnStatus status, TTURL transferURL, - String message, Throwable cause) { - - super(SURL, status, message, cause); - init(transferProtocols, transferURL); - } - - private void init(TURLPrefix transferProtocols, TTURL transferURL) { - - nullTransferProtocols = transferProtocols == null; - nullTransferURL = transferURL == null; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder - .append("InvalidFileTransferDataAttributesException [nullTransferProtocols="); - builder.append(nullTransferProtocols); - builder.append(", nullTransferURL="); - builder.append(nullTransferURL); - builder.append(", nullSURL="); - builder.append(nullSURL); - builder.append(", nullStatus="); - builder.append(nullStatus); - builder.append("]"); - return builder.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidPtGDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidPtGDataAttributesException.java deleted file mode 100644 index 42ed5c4eb..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidPtGDataAttributesException.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TDirOption; -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TTURL; - -/** - * This class represents an exceptin thrown when the attributes supplied to the - * constructor of PtGChunkData are invalid, that is if any of the following is - * _null_: requestToken, fromSURL, lifeTime, numOfLevels, transferProtocols, - * fileSize, status, transferURL. - * - * @author EGRID - ICTP Trieste - * @date March 23rd, 2005 - * @version 3.0 - */ -public class InvalidPtGDataAttributesException extends - InvalidFileTransferDataAttributesException { - - private static final long serialVersionUID = -3484929474636108262L; - // booleans that indicate whether the corresponding variable is null - protected boolean nullLifeTime; - protected boolean nullDirOption; - protected boolean nullFileSize; - - /** - * Constructor that requires the attributes that caused the exception to be - * thrown. - */ - public InvalidPtGDataAttributesException(TSURL fromSURL, - TLifeTimeInSeconds lifeTime, TDirOption dirOption, - TURLPrefix transferProtocols, TSizeInBytes fileSize, TReturnStatus status, - TTURL transferURL) { - - super(fromSURL, transferProtocols, status, transferURL); - nullLifeTime = lifeTime == null; - nullDirOption = dirOption == null; - nullFileSize = fileSize == null; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder.append("InvalidPtGChunkDataAttributesException [nullLifeTime="); - builder.append(nullLifeTime); - builder.append(", nullDirOption="); - builder.append(nullDirOption); - builder.append(", nullFileSize="); - builder.append(nullFileSize); - builder.append(", nullSURL="); - builder.append(nullSURL); - builder.append(", nullTransferProtocols="); - builder.append(nullTransferProtocols); - builder.append(", nullStatus="); - builder.append(nullStatus); - builder.append(", nullTransferURL="); - builder.append(nullTransferURL); - builder.append("]"); - return builder.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidPtGPersistentChunkDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidPtGPersistentChunkDataAttributesException.java deleted file mode 100644 index 9ab9dcadb..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidPtGPersistentChunkDataAttributesException.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS - * OF ANY KIND, either express or implied. See the License for the specific - * language governing permissions and limitations under the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TDirOption; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TTURL; - -/** - * This class represents an exceptin thrown when the attributes supplied to the - * constructor of PtPChunkData are invalid, that is if any of the following is - * _null_: requestToken, toSURL, lifetime, fileStorageType, spaceToken, - * knownSizeOfThisFile, TURLPrefix transferProtocols, overwriteOption, fileSize, - * status, transferURL. - * - * @author EGRID - ICTP Trieste - * @date June, 2005 - * @version 2.0 - */ -public class InvalidPtGPersistentChunkDataAttributesException extends - InvalidPtGDataAttributesException { - - private static final long serialVersionUID = -5117535717125685975L; - /** - * booleans that indicate whether the corresponding variable is null - */ - boolean nullRequestToken; - - /** - * Constructor that requires the attributes that caused the exception to be - * thrown. - */ - public InvalidPtGPersistentChunkDataAttributesException( - TRequestToken requestToken, TSURL fromSURL, TLifeTimeInSeconds lifeTime, - TDirOption dirOption, TURLPrefix transferProtocols, TSizeInBytes fileSize, - TReturnStatus status, TTURL transferURL) { - - super(fromSURL, lifeTime, dirOption, transferProtocols, fileSize, status, - transferURL); - nullRequestToken = requestToken == null; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder - .append("InvalidPtGPersistentChunkDataAttributesException [nullRequestToken="); - builder.append(nullRequestToken); - builder.append(", nullLifeTime="); - builder.append(nullLifeTime); - builder.append(", nullDirOption="); - builder.append(nullDirOption); - builder.append(", nullFileSize="); - builder.append(nullFileSize); - builder.append(", nullSURL="); - builder.append(nullSURL); - builder.append(", nullTransferProtocols="); - builder.append(nullTransferProtocols); - builder.append(", nullStatus="); - builder.append(nullStatus); - builder.append(", nullTransferURL="); - builder.append(nullTransferURL); - builder.append("]"); - return builder.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidPtPDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidPtPDataAttributesException.java deleted file mode 100644 index 55d445e35..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidPtPDataAttributesException.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS - * OF ANY KIND, either express or implied. See the License for the specific - * language governing permissions and limitations under the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TSpaceToken; -import it.grid.storm.srm.types.TTURL; - -/** - * @author Michele Dibenedetto - */ -public class InvalidPtPDataAttributesException extends - InvalidFileTransferDataAttributesException { - - /** - * - */ - private static final long serialVersionUID = 1051060981188652979L; - protected boolean nullSpaceToken; - protected boolean nullPinLifetime; - protected boolean nullFileLifetime; - protected boolean nullFileStorageType; - protected boolean nullKnownSizeOfThisFile; - protected boolean nullOverwriteOption; - - public InvalidPtPDataAttributesException(TSURL toSURL, - TLifeTimeInSeconds fileLifetime, TLifeTimeInSeconds pinLifetime, - TFileStorageType fileStorageType, TSpaceToken spaceToken, - TSizeInBytes knownSizeOfThisFile, TURLPrefix transferProtocols, - TOverwriteMode overwriteOption, TReturnStatus status, TTURL transferURL) { - - super(toSURL, transferProtocols, status, transferURL); - init(spaceToken, fileLifetime, pinLifetime, fileStorageType, - knownSizeOfThisFile, overwriteOption); - } - - public InvalidPtPDataAttributesException(TSURL toSURL, - TLifeTimeInSeconds fileLifetime, TLifeTimeInSeconds pinLifetime, - TFileStorageType fileStorageType, TSpaceToken spaceToken, - TSizeInBytes knownSizeOfThisFile, TURLPrefix transferProtocols, - TOverwriteMode overwriteOption, TReturnStatus status, TTURL transferURL, - String message) { - - super(toSURL, transferProtocols, status, transferURL, message); - init(spaceToken, fileLifetime, pinLifetime, fileStorageType, - knownSizeOfThisFile, overwriteOption); - } - - public InvalidPtPDataAttributesException(TSURL toSURL, - TLifeTimeInSeconds fileLifetime, TLifeTimeInSeconds pinLifetime, - TFileStorageType fileStorageType, TSpaceToken spaceToken, - TSizeInBytes knownSizeOfThisFile, TURLPrefix transferProtocols, - TOverwriteMode overwriteOption, TReturnStatus status, TTURL transferURL, - Throwable cause) { - - super(toSURL, transferProtocols, status, transferURL, cause); - init(spaceToken, fileLifetime, pinLifetime, fileStorageType, - knownSizeOfThisFile, overwriteOption); - } - - public InvalidPtPDataAttributesException(TSURL toSURL, - TLifeTimeInSeconds fileLifetime, TLifeTimeInSeconds pinLifetime, - TFileStorageType fileStorageType, TSpaceToken spaceToken, - TSizeInBytes knownSizeOfThisFile, TURLPrefix transferProtocols, - TOverwriteMode overwriteOption, TReturnStatus status, TTURL transferURL, - String message, Throwable cause) { - - super(toSURL, transferProtocols, status, transferURL, message, cause); - init(spaceToken, fileLifetime, pinLifetime, fileStorageType, - knownSizeOfThisFile, overwriteOption); - } - - private void init(TSpaceToken spaceToken, TLifeTimeInSeconds fileLifetime, - TLifeTimeInSeconds pinLifetime, TFileStorageType fileStorageType, - TSizeInBytes knownSizeOfThisFile, TOverwriteMode overwriteOption) { - - nullSpaceToken = spaceToken == null; - nullPinLifetime = pinLifetime == null; - nullFileLifetime = fileLifetime == null; - nullFileStorageType = fileStorageType == null; - nullKnownSizeOfThisFile = knownSizeOfThisFile == null; - nullOverwriteOption = overwriteOption == null; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder.append("InvalidPtPDataAttributesException [nullSpaceToken="); - builder.append(nullSpaceToken); - builder.append(", nullPinLifetime="); - builder.append(nullPinLifetime); - builder.append(", nullFileLifetime="); - builder.append(nullFileLifetime); - builder.append(", nullFileStorageType="); - builder.append(nullFileStorageType); - builder.append(", nullKnownSizeOfThisFile="); - builder.append(nullKnownSizeOfThisFile); - builder.append(", nullOverwriteOption="); - builder.append(nullOverwriteOption); - builder.append(", nullSURL="); - builder.append(nullSURL); - builder.append(", nullTransferProtocols="); - builder.append(nullTransferProtocols); - builder.append(", nullStatus="); - builder.append(nullStatus); - builder.append(", nullTransferURL="); - builder.append(nullTransferURL); - builder.append("]"); - return builder.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidPtPPersistentChunkDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidPtPPersistentChunkDataAttributesException.java deleted file mode 100644 index cb191a997..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidPtPPersistentChunkDataAttributesException.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS - * OF ANY KIND, either express or implied. See the License for the specific - * language governing permissions and limitations under the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TSpaceToken; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TTURL; - -/** - * This class represents an exceptin thrown when the attributes supplied to the - * constructor of PtPChunkData are invalid, that is if any of the following is - * _null_: requestToken, toSURL, lifetime, fileStorageType, spaceToken, - * knownSizeOfThisFile, TURLPrefix transferProtocols, overwriteOption, fileSize, - * status, transferURL. - * - * @author EGRID - ICTP Trieste - * @date June, 2005 - * @version 2.0 - */ -public class InvalidPtPPersistentChunkDataAttributesException extends - InvalidPtPDataAttributesException { - - private static final long serialVersionUID = -5117535717125685975L; - /** - * booleans that indicate whether the corresponding variable is null - */ - boolean nullRequestToken; - - /** - * Constructor that requires the attributes that caused the exception to be - * thrown. - */ - public InvalidPtPPersistentChunkDataAttributesException( - TRequestToken requestToken, TSURL toSURL, TLifeTimeInSeconds fileLifetime, - TLifeTimeInSeconds pinLifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TSizeInBytes knownSizeOfThisFile, - TURLPrefix transferProtocols, TOverwriteMode overwriteOption, - TReturnStatus status, TTURL transferURL) { - - super(toSURL, fileLifetime, pinLifetime, fileStorageType, spaceToken, - knownSizeOfThisFile, transferProtocols, overwriteOption, status, - transferURL); - nullRequestToken = requestToken == null; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder - .append("InvalidPtPPersistentChunkDataAttributesException [nullRequestToken="); - builder.append(nullRequestToken); - builder.append(", nullSpaceToken="); - builder.append(nullSpaceToken); - builder.append(", nullPinLifetime="); - builder.append(nullPinLifetime); - builder.append(", nullFileLifetime="); - builder.append(nullFileLifetime); - builder.append(", nullFileStorageType="); - builder.append(nullFileStorageType); - builder.append(", nullKnownSizeOfThisFile="); - builder.append(nullKnownSizeOfThisFile); - builder.append(", nullOverwriteOption="); - builder.append(nullOverwriteOption); - builder.append(", nullSURL="); - builder.append(nullSURL); - builder.append(", nullTransferProtocols="); - builder.append(nullTransferProtocols); - builder.append(", nullStatus="); - builder.append(nullStatus); - builder.append(", nullTransferURL="); - builder.append(nullTransferURL); - builder.append("]"); - return builder.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidReducedCopyChunkDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidReducedCopyChunkDataAttributesException.java deleted file mode 100644 index bd1e35c03..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidReducedCopyChunkDataAttributesException.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TReturnStatus; - -/** - * This class represents an exception thrown when the attributes supplied to the - * constructor of ReducedCopyChunkData are invalid, that is if any of the - * following is _null_: fromsURL, toSURL, status. - * - * @author Michele Dibenedetto - */ -@SuppressWarnings("serial") -public class InvalidReducedCopyChunkDataAttributesException extends Exception { - - // booleans that indicate whether the corresponding variable is null - private boolean nullFromSURL; - private boolean nullToSURL; - private boolean nullStatus; - - /** - * Constructor that requires the attributes that caused the exception to be - * thrown. - */ - public InvalidReducedCopyChunkDataAttributesException(TSURL fromSURL, - TSURL toSURL, TReturnStatus status) { - - nullFromSURL = fromSURL == null; - nullToSURL = toSURL == null; - nullStatus = status == null; - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("Invalid CopyChunkData attributes: null-requestToken="); - sb.append("; null-fromSURL="); - sb.append(nullFromSURL); - sb.append("; null-toSURL="); - sb.append(nullToSURL); - sb.append("; null-status="); - sb.append(nullStatus); - sb.append("."); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidReducedPtPChunkDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidReducedPtPChunkDataAttributesException.java deleted file mode 100644 index 9b2847b73..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidReducedPtPChunkDataAttributesException.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TLifeTimeInSeconds; - -/** - * This class represents an exceptin thrown when the attributes supplied to the - * constructor of ReducedPtPChunkData are invalid, that is if any is _null_. - * - * @author EGRID - ICTP Trieste - * @date January, 2007 - * @version 1.0 - */ -public class InvalidReducedPtPChunkDataAttributesException extends Exception { - - private static final long serialVersionUID = 4945626188325362854L; - - // booleans that indicate whether the corresponding variable is null - private boolean nullToSURL; - private boolean nullStatus; - private boolean nullFileStorageType; - private boolean nullFileLifetime; - - /** - * Constructor that requires the attributes that caused the exception to be - * thrown. - */ - public InvalidReducedPtPChunkDataAttributesException(TSURL toSURL, - TReturnStatus status, TFileStorageType fileStorageType, - TLifeTimeInSeconds fileLifetime) { - - nullFileStorageType = fileStorageType == null; - nullToSURL = toSURL == null; - nullStatus = status == null; - nullFileLifetime = fileLifetime == null; - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("Invalid PtPChunkData attributes: null-toSURL="); - sb.append(nullToSURL); - sb.append("; null-status="); - sb.append(nullStatus); - sb.append("; null-fileStorageType="); - sb.append(nullFileStorageType); - sb.append("; null-fileLifetime="); - sb.append(nullFileLifetime); - sb.append("."); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidRequestSummaryDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidRequestSummaryDataAttributesException.java deleted file mode 100644 index 6021de690..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidRequestSummaryDataAttributesException.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TRequestType; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.griduser.GridUserInterface; - -/** - * This class represents an Exception thrown when a RequestSummaryData object is - * created with any invalid attributes: null TRequestType, null TRequestToken, - * null VomsGridUser. - * - * @author EGRID - ICTP Trieste - * @date March 18th, 2005 - * @version 3.0 - */ -public class InvalidRequestSummaryDataAttributesException extends Exception { - - private static final long serialVersionUID = -7729349713696058669L; - - // booleans true if the corresponding variablesare null or negative - private boolean nullRequestType = true; - private boolean nullRequestToken = true; - private boolean nullVomsGridUser = true; - - /** - * Constructor that requires the attributes that caused the exception to be - * thrown. - */ - public InvalidRequestSummaryDataAttributesException(TRequestType requestType, - TRequestToken requestToken, GridUserInterface gu) { - - nullRequestType = (requestType == null); - nullRequestToken = (requestToken == null); - nullVomsGridUser = (gu == null); - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("Invalid RequestSummaryData attributes exception: "); - sb.append("nullRequestType="); - sb.append(nullRequestType); - sb.append("; nullRequestToken="); - sb.append(nullRequestToken); - sb.append("; nullVomsGridUser="); - sb.append(nullVomsGridUser); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidRetrievedDataException.java b/src/main/java/it/grid/storm/catalogs/InvalidRetrievedDataException.java deleted file mode 100644 index 5a252b65f..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidRetrievedDataException.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -/** - * Class that represents an Exception thrown by the ReservedSpaceCatalog when it - * is asked to retrieve info from the persistence but the raw data is invalid - * and does not allow a well-formed domain obejcts to be created. - * - * @author: EGRID ICTP - * @version: 1.0 - * @date: June 2005 - */ -public class InvalidRetrievedDataException extends Exception { - - private static final long serialVersionUID = -3645913441787012438L; - - private String requestToken; - private String requestType; - private int totalFilesInThisRequest; - private int numOfQueuedRequests; - private int numOfProgressing; - private int numFinished; - private boolean isSuspended; - - /** - * Constructor that requires the attributes that caused the exception to be - * thrown. - */ - public InvalidRetrievedDataException(String requestToken, String requestType, - int totalFilesInThisRequest, int numOfQueuedRequests, - int numOfProgressingRequests, int numFinished, boolean isSuspended) { - - this.requestToken = requestToken; - this.requestType = requestType; - this.totalFilesInThisRequest = totalFilesInThisRequest; - this.numOfQueuedRequests = numOfQueuedRequests; - this.numOfProgressing = numOfProgressingRequests; - this.numFinished = numFinished; - this.isSuspended = isSuspended; - } - - public String toString() { - - return "InvalidRetrievedDataException: token=" + requestToken + " type=" - + requestType + " total-files=" + totalFilesInThisRequest + " queued=" - + numOfQueuedRequests + " progressing=" + numOfProgressing + " finished=" - + numFinished + " isSusp=" + isSuspended; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidSurlRequestDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidSurlRequestDataAttributesException.java deleted file mode 100644 index 363d6d895..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidSurlRequestDataAttributesException.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; - -/** - * @author Michele Dibenedetto - * - */ -public class InvalidSurlRequestDataAttributesException extends Exception { - - private static final long serialVersionUID = -8636768167720753989L; - protected boolean nullSURL; - protected boolean nullStatus; - - public InvalidSurlRequestDataAttributesException(TSURL SURL, - TReturnStatus status) { - - super(); - init(SURL, status); - } - - public InvalidSurlRequestDataAttributesException(TSURL SURL, - TReturnStatus status, String message) { - - super(message); - init(SURL, status); - } - - public InvalidSurlRequestDataAttributesException(TSURL SURL, - TReturnStatus status, Throwable cause) { - - super(cause); - init(SURL, status); - } - - public InvalidSurlRequestDataAttributesException(TSURL SURL, - TReturnStatus status, String message, Throwable cause) { - - super(message, cause); - init(SURL, status); - } - - private void init(TSURL SURL, TReturnStatus status) { - - nullSURL = SURL == null; - nullStatus = status == null; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder.append("InvalidSurlRequestDataAttributesException [nullSURL="); - builder.append(nullSURL); - builder.append(", nullStatus="); - builder.append(nullStatus); - builder.append("]"); - return builder.toString(); - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/JiTData.java b/src/main/java/it/grid/storm/catalogs/JiTData.java deleted file mode 100644 index 4c3e4eaee..000000000 --- a/src/main/java/it/grid/storm/catalogs/JiTData.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -/** - * Class that represents data associated to JiT entries. It contains a String - * representing the file, an int representing the ACL, an int representing the - * user UID, an int representing the user GID. - * - * @author EGRID - ICTP Trieste - * @version 1.0 - * @date November 2006 - */ -public class JiTData { - - private String file = ""; - private int uid = -1; - private int gid = -1; - private int acl = -1; - - /** - * Constructor requiring the complete name of the file as String, the acl as - * int, the uid and primary gid of the LocalUser bith as int. - */ - public JiTData(String file, int acl, int uid, int gid) { - - this.file = file; - this.acl = acl; - this.uid = uid; - this.gid = gid; - } - - public String pfn() { - - return file; - } - - public int acl() { - - return acl; - } - - public int uid() { - - return uid; - } - - public int gid() { - - return gid; - } - - public String toString() { - - return "file=" + file + " acl=" + acl + " uid=" + uid + " gid=" + gid; - } -} diff --git a/src/main/java/it/grid/storm/catalogs/MultipleDataEntriesException.java b/src/main/java/it/grid/storm/catalogs/MultipleDataEntriesException.java deleted file mode 100644 index 134b13ff2..000000000 --- a/src/main/java/it/grid/storm/catalogs/MultipleDataEntriesException.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TRequestToken; - -/** - * Class that represents an Exception thrown by the ReservedSpaceCatalog when it - * finds more than one row of data for the specified request. - * - * @author: EGRID ICTP - * @version: 1.0 - * @date: June 2005 - */ -public class MultipleDataEntriesException extends Exception { - - private static final long serialVersionUID = 427636739469695868L; - - private TRequestToken requestToken; - - /** - * Constructor tha trequires the attributes that caused the exception to be - * thrown. - */ - public MultipleDataEntriesException(TRequestToken requestToken) { - - this.requestToken = requestToken; - } - - public String toString() { - - return "MultipleDataEntriesException: requestToken=" + requestToken; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/NoDataFoundException.java b/src/main/java/it/grid/storm/catalogs/NoDataFoundException.java deleted file mode 100644 index bc44544a9..000000000 --- a/src/main/java/it/grid/storm/catalogs/NoDataFoundException.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TRequestToken; - -/** - * Class that represents an Exception thrown by the ReservedSpaceCatalog when it - * finds no data for the specified request. - * - * @author: EGRID ICTP - * @version: 1.0 - * @date: June 2005 - */ -public class NoDataFoundException extends Exception { - - private static final long serialVersionUID = -718255813130266566L; - - private TRequestToken requestToken; - - /** - * Constructor tha trequires the attributes that caused the exception to be - * thrown. - */ - public NoDataFoundException(TRequestToken requestToken) { - - this.requestToken = requestToken; - } - - public String toString() { - - return "NoDataFoundException: requestToken=" + requestToken; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/OverwriteModeConverter.java b/src/main/java/it/grid/storm/catalogs/OverwriteModeConverter.java deleted file mode 100644 index ddcf6eda6..000000000 --- a/src/main/java/it/grid/storm/catalogs/OverwriteModeConverter.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import java.util.Map; -import java.util.HashMap; -import java.util.Iterator; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.config.Configuration; - -/** - * Package private auxiliary class used to convert between DB and StoRM object - * model representation of TOverwriteMode. - * - * @author: EGRID ICTP - * @version: 2.0 - * @date: June 2005 - */ -public class OverwriteModeConverter { - - private Map DBtoSTORM = new HashMap(); - private Map STORMtoDB = new HashMap(); - - private static OverwriteModeConverter c = new OverwriteModeConverter(); - - /** - * Private constructor that fills in the conversion table; in particular, DB - * uses String values to represent TOverwriteMode: - * - * N NEVER A ALWAYS D WHENFILESAREDIFFERENT - */ - private OverwriteModeConverter() { - - DBtoSTORM.put("N", TOverwriteMode.NEVER); - DBtoSTORM.put("A", TOverwriteMode.ALWAYS); - DBtoSTORM.put("D", TOverwriteMode.WHENFILESAREDIFFERENT); - Object aux; - for (Iterator i = DBtoSTORM.keySet().iterator(); i.hasNext();) { - aux = i.next(); - STORMtoDB.put(DBtoSTORM.get(aux), aux); - } - } - - /** - * Method that returns the only instance of OverwriteModeConverter. - */ - public static OverwriteModeConverter getInstance() { - - return c; - } - - /** - * Method that returns the int used by DPM to represent the given - * TOverwriteMode. "" is returned if no match is found. - */ - public String toDB(TOverwriteMode om) { - - String aux = (String) STORMtoDB.get(om); - if (aux == null) - return ""; - return aux; - } - - /** - * Method that returns the TOverwriteMode used by StoRM to represent the - * supplied String representation of DPM. A configured default TOverwriteMode - * is returned in case no corresponding StoRM type is found. - * TOverwriteMode.EMPTY is returned if there are configuration errors. - */ - public TOverwriteMode toSTORM(String s) { - - TOverwriteMode aux = (TOverwriteMode) DBtoSTORM.get(s); - if (aux == null) - aux = (TOverwriteMode) DBtoSTORM.get(Configuration.getInstance() - .getDefaultOverwriteMode()); - if (aux == null) - return TOverwriteMode.EMPTY; - else - return aux; - } - - public String toString() { - - return "OverWriteModeConverter.\nDBtoSTORM map:" + DBtoSTORM - + "\nSTORMtoDB map:" + STORMtoDB; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/PersistentChunkData.java b/src/main/java/it/grid/storm/catalogs/PersistentChunkData.java deleted file mode 100644 index 59f59d81e..000000000 --- a/src/main/java/it/grid/storm/catalogs/PersistentChunkData.java +++ /dev/null @@ -1,11 +0,0 @@ -package it.grid.storm.catalogs; - -public interface PersistentChunkData extends ChunkData { - - /** - * Method that returns the primary key in persistence, associated with This - * Chunk. - */ - public long getPrimaryKey(); - -} diff --git a/src/main/java/it/grid/storm/catalogs/PinLifetimeConverter.java b/src/main/java/it/grid/storm/catalogs/PinLifetimeConverter.java deleted file mode 100644 index 1904e57a2..000000000 --- a/src/main/java/it/grid/storm/catalogs/PinLifetimeConverter.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.config.Configuration; - -/** - * Class that handles DB representation of a TLifetimeInSeconds, in particular - * it takes care of protocol specification: - * - * 0/null/negative are translated as default StoRM configurable values. StoRMs - * Empty TLifeTimeInSeconds is translated as 0. - * - * @author EGRID ICTP - * @version 1.0 - * @date March 2007 - */ -public class PinLifetimeConverter { - - private static PinLifetimeConverter stc = new PinLifetimeConverter(); // only - // instance - - private PinLifetimeConverter() { - - } - - /** - * Method that returns the only instance of SizeInBytesIntConverter - */ - public static PinLifetimeConverter getInstance() { - - return stc; - } - - /** - * Method that translates the Empty TLifeTimeInSeconds into the empty - * representation of DB which is 0. Any other value is left as is. - */ - public int toDB(long l) { - - if (l == TLifeTimeInSeconds.makeEmpty().value()) - return 0; - return new Long(l).intValue(); - } - - /** - * Method that returns the long corresponding to the int value in the DB, - * except if it is 0, NULL or negative; a configurable default value is - * returned instead, corresponding to the getPinLifetimeMinimum() - * Configuration class method. - */ - public long toStoRM(int s) { - - if (s == 0) { - return Configuration.getInstance().getPinLifetimeDefault(); - } else if (s < 0) { - // The default is used also as a Minimum - return Configuration.getInstance().getPinLifetimeDefault(); - } - return new Integer(s).longValue(); - } - - public long toStoRM(long s) { - - if (s == 0) { - return Configuration.getInstance().getPinLifetimeDefault(); - } else if (s < 0) { - // The default is used also as a Minimum - return Configuration.getInstance().getPinLifetimeDefault(); - } - return s; - } -} diff --git a/src/main/java/it/grid/storm/catalogs/PtGChunkCatalog.java b/src/main/java/it/grid/storm/catalogs/PtGChunkCatalog.java index 307ed1a19..16daf5304 100644 --- a/src/main/java/it/grid/storm/catalogs/PtGChunkCatalog.java +++ b/src/main/java/it/grid/storm/catalogs/PtGChunkCatalog.java @@ -17,6 +17,12 @@ package it.grid.storm.catalogs; +import java.util.ArrayList; +import java.util.Collection; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.common.types.SizeUnit; import it.grid.storm.common.types.TURLPrefix; import it.grid.storm.common.types.TimeUnit; @@ -24,11 +30,21 @@ import it.grid.storm.griduser.AbstractGridUser; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.GridUserManager; +import it.grid.storm.persistence.converter.PinLifetimeConverter; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.converter.TURLConverter; +import it.grid.storm.persistence.converter.TransferProtocolListConverter; +import it.grid.storm.persistence.dao.PtGChunkDAO; +import it.grid.storm.persistence.exceptions.InvalidReducedPtGChunkDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.persistence.impl.mysql.PtGChunkDAOMySql; +import it.grid.storm.persistence.model.PtGChunkDataTO; +import it.grid.storm.persistence.model.PtGPersistentChunkData; +import it.grid.storm.persistence.model.ReducedPtGChunkData; +import it.grid.storm.persistence.model.ReducedPtGChunkDataTO; import it.grid.storm.srm.types.InvalidTDirOptionAttributesException; -import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException; import it.grid.storm.srm.types.InvalidTSURLAttributesException; import it.grid.storm.srm.types.InvalidTSizeAttributesException; -import it.grid.storm.srm.types.InvalidTTURLAttributesException; import it.grid.storm.srm.types.TDirOption; import it.grid.storm.srm.types.TLifeTimeInSeconds; import it.grid.storm.srm.types.TRequestToken; @@ -38,820 +54,349 @@ import it.grid.storm.srm.types.TStatusCode; import it.grid.storm.srm.types.TTURL; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.Timer; -import java.util.TimerTask; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** - * Class that represents StoRMs PtGChunkCatalog: it collects PtGChunkData and - * provides methods for looking up a PtGChunkData based on TRequestToken, as - * well as for adding a new entry and removing an existing one. + * Class that represents StoRMs PtGChunkCatalog: it collects PtGChunkData and provides methods for + * looking up a PtGChunkData based on TRequestToken, as well as for adding a new entry and removing + * an existing one. * * @author EGRID - ICTP Trieste * @date April 26th, 2005 * @version 4.0 */ -@SuppressWarnings("unused") public class PtGChunkCatalog { - private static final Logger log = LoggerFactory - .getLogger(PtGChunkCatalog.class); - - /* Only instance of PtGChunkCatalog present in StoRM! */ - private static final PtGChunkCatalog cat = new PtGChunkCatalog(); - private final PtGChunkDAO dao = PtGChunkDAO.getInstance(); - - /* - * Timer object in charge of transiting expired requests from SRM_FILE_PINNED - * to SRM_RELEASED! - */ - private final Timer transiter = new Timer(); - /* Delay time before starting cleaning thread! */ - private final long delay = Configuration.getInstance() - .getTransitInitialDelay() * 1000; - /* Period of execution of cleaning! */ - private final long period = Configuration.getInstance() - .getTransitTimeInterval() * 1000; - - /** - * Private constructor that starts the internal timer needed to periodically - * check and transit requests whose pinLifetime has expired and are in - * SRM_FILE_PINNED, to SRM_RELEASED. - */ - private PtGChunkCatalog() { - - TimerTask transitTask = new TimerTask() { - - @Override - public void run() { - - transitExpiredSRM_FILE_PINNED(); - } - }; - transiter.scheduleAtFixedRate(transitTask, delay, period); - } - - /** - * Method that returns the only instance of PtGChunkCatalog available. - */ - public static PtGChunkCatalog getInstance() { - - return cat; - } - - /** - * Method used to update into Persistence a retrieved PtGChunkData. In case - * any error occurs, the operation does not proceed but no Exception is - * thrown. Error messages get logged. - * - * Only fileSize, StatusCode, errString and transferURL are updated. Likewise - * for the request pinLifetime. - */ - synchronized public void update(PtGPersistentChunkData chunkData) { - - PtGChunkDataTO to = new PtGChunkDataTO(); - /* Primary key needed by DAO Object */ - to.setPrimaryKey(chunkData.getPrimaryKey()); - to.setFileSize(chunkData.getFileSize().value()); - to.setStatus(StatusCodeConverter.getInstance().toDB( - chunkData.getStatus().getStatusCode())); - to.setErrString(chunkData.getStatus().getExplanation()); - to.setTurl(TURLConverter.getInstance().toDB( - chunkData.getTransferURL().toString())); - to.setLifeTime(PinLifetimeConverter.getInstance().toDB( - chunkData.getPinLifeTime().value())); - to.setNormalizedStFN(chunkData.getSURL().normalizedStFN()); - to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId())); - to.setClientDN(chunkData.getUser().getDn()); - if (chunkData.getUser() instanceof AbstractGridUser) { - if (((AbstractGridUser) chunkData.getUser()).hasVoms()) { - to.setVomsAttributes(((AbstractGridUser) chunkData.getUser()) - .getFQANsAsString()); - } - - } - dao.update(to); - } - - /** - * Refresh method. THIS IS A WORK IN PROGRESS!!!! This method have to synch - * the ChunkData information with the database status intended as the status - * code and the TURL - * - * @param auxTO - * @param PtGChunkData - * inputChunk - * @return PtGChunkData outputChunk - */ - synchronized public PtGPersistentChunkData refreshStatus( - PtGPersistentChunkData inputChunk) { - - PtGChunkDataTO chunkDataTO = dao.refresh(inputChunk.getPrimaryKey()); - - log.debug("PtG CHUNK CATALOG: retrieved data " + chunkDataTO); - if (chunkDataTO == null) { - log.warn("PtG CHUNK CATALOG! Empty TO found in persistence for specified " - + "request: {}", inputChunk.getPrimaryKey()); - return inputChunk; - } - - /* - * In this first version the only field updated is the Status. Once - * updated, the new status is rewritten into the input ChunkData - */ - - // status - TReturnStatus status = null; - TStatusCode code = StatusCodeConverter.getInstance().toSTORM(chunkDataTO.status()); - if (code != TStatusCode.EMPTY) { - status = new TReturnStatus(code, chunkDataTO.errString()); - } - inputChunk.setStatus(status); - TTURL turl = null; - try { - turl = TTURL.makeFromString(chunkDataTO.turl()); - } catch (InvalidTTURLAttributesException e) { - log.info("PtGChunkCatalog (FALSE-ERROR-in-abort-refresh-status?):" - + " built a TURL with protocol NULL (retrieved from the DB..)"); - } - inputChunk.setTransferURL(turl); - return inputChunk; - } - - /** - * Method that returns a Collection of PtGChunkData Objects matching the - * supplied TRequestToken. - * - * If any of the data associated to the TRequestToken is not well formed and - * so does not allow a PtGChunkData Object to be created, then that part of - * the request is dropped and gets logged, and the processing continues with - * the next part. All valid chunks get returned: the others get dropped. - * - * If there are no chunks to process then an empty Collection is returned, and - * a messagge gets logged. - */ - synchronized public Collection lookup(TRequestToken rt) { - - Collection chunkTOs = dao.find(rt); - log.debug("PtG CHUNK CATALOG: retrieved data " + chunkTOs); - ArrayList list = new ArrayList(); - if (chunkTOs.isEmpty()) { - log.warn("PtG CHUNK CATALOG! No chunks found in persistence for " - + "specified request: {}", rt); - return list; - } - PtGPersistentChunkData chunk; - for (PtGChunkDataTO chunkTO : chunkTOs) { - chunk = makeOne(chunkTO, rt); - if (chunk == null) { - continue; - } - list.add(chunk); - if (isComplete(chunkTO)) { - continue; - } - try { - dao.updateIncomplete(this.completeTO(chunkTO, chunk)); - } catch (InvalidReducedPtGChunkDataAttributesException e) { - log.warn("PtG CHUNK CATALOG! unable to add missing informations on DB " - + "to the request: {}", e.getMessage()); - } - } - log.debug("PtG CHUNK CATALOG: returning " + list); - return list; - } - - /** - * Generates a PtGChunkData from the received PtGChunkDataTO - * - * @param chunkDataTO - * @param rt - * @return - */ - private PtGPersistentChunkData makeOne(PtGChunkDataTO chunkDataTO, - TRequestToken rt) { - - StringBuilder errorSb = new StringBuilder(); - TSURL fromSURL = null; - try { - fromSURL = TSURL.makeFromStringValidate(chunkDataTO.fromSURL()); - } catch (InvalidTSURLAttributesException e) { - errorSb.append(e); - } - if (chunkDataTO.normalizedStFN() != null) { - fromSURL.setNormalizedStFN(chunkDataTO.normalizedStFN()); - } - if (chunkDataTO.surlUniqueID() != null) { - fromSURL.setUniqueID(chunkDataTO.surlUniqueID().intValue()); - } - // lifeTime - TLifeTimeInSeconds lifeTime = null; - try { - long pinLifeTime = PinLifetimeConverter.getInstance().toStoRM( - chunkDataTO.lifeTime()); - // Check for max value allowed - long max = Configuration.getInstance().getPinLifetimeMaximum(); - if (pinLifeTime > max) { - log.warn("PinLifeTime is greater than the max value allowed." - + " Drop the value to the max = {} seconds", max); - pinLifeTime = max; - } - lifeTime = TLifeTimeInSeconds.make((pinLifeTime), TimeUnit.SECONDS); - } catch (IllegalArgumentException e) { - errorSb.append("\n"); - errorSb.append(e); - } - // dirOption - TDirOption dirOption = null; - try { - dirOption = new TDirOption(chunkDataTO.dirOption(), - chunkDataTO.allLevelRecursive(), chunkDataTO.numLevel()); - } catch (InvalidTDirOptionAttributesException e) { - errorSb.append("\n"); - errorSb.append(e); - } - // transferProtocols - TURLPrefix transferProtocols = TransferProtocolListConverter - .toSTORM(chunkDataTO.protocolList()); - if (transferProtocols.size() == 0) { - errorSb.append("\nEmpty list of TransferProtocols or could " - + "not translate TransferProtocols!"); - /* fail construction of PtGChunkData! */ - transferProtocols = null; - } - // fileSize - TSizeInBytes fileSize = null; - try { - fileSize = TSizeInBytes.make(chunkDataTO.fileSize(), SizeUnit.BYTES); - } catch (InvalidTSizeAttributesException e) { - errorSb.append("\n"); - errorSb.append(e); - } - // status - TReturnStatus status = null; - TStatusCode code = StatusCodeConverter.getInstance().toSTORM( - chunkDataTO.status()); - if (code == TStatusCode.EMPTY) { - errorSb.append("\nRetrieved StatusCode was not recognised: " - + chunkDataTO.status()); - } else { - status = new TReturnStatus(code, chunkDataTO.errString()); - } - GridUserInterface gridUser = null; - try { - if (chunkDataTO.vomsAttributes() != null - && !chunkDataTO.vomsAttributes().trim().equals("")) { - gridUser = GridUserManager.makeVOMSGridUser(chunkDataTO.clientDN(), - chunkDataTO.vomsAttributesArray()); - } else { - gridUser = GridUserManager.makeGridUser(chunkDataTO.clientDN()); - } - - } catch (IllegalArgumentException e) { - log.error("Unexpected error on voms grid user creation." - + " IllegalArgumentException: {}", e.getMessage(), e); - } - // transferURL - /* - * whatever is read is just meaningless because PtG will fill it in!!! So - * create an Empty TTURL by default! Vital to avoid problems with unknown - * DPM NULL/EMPTY logic policy! - */ - TTURL transferURL = TTURL.makeEmpty(); - // make PtGChunkData - PtGPersistentChunkData aux = null; - try { - aux = new PtGPersistentChunkData(gridUser, rt, fromSURL, lifeTime, - dirOption, transferProtocols, fileSize, status, transferURL); - aux.setPrimaryKey(chunkDataTO.primaryKey()); - } catch (InvalidSurlRequestDataAttributesException e) { - dao.signalMalformedPtGChunk(chunkDataTO); - log.warn("PtG CHUNK CATALOG! Retrieved malformed PtG chunk data from " - + "persistence. Dropping chunk from request {}", rt); - log.warn(e.getMessage(), e); - log.warn(errorSb.toString()); - } - // end... - return aux; - } - - /** - * - * Adds to the received PtGChunkDataTO the normalized StFN and the SURL unique - * ID taken from the PtGChunkData - * - * @param chunkTO - * @param chunk - */ - private void completeTO(ReducedPtGChunkDataTO chunkTO, - final ReducedPtGChunkData chunk) { - - chunkTO.setNormalizedStFN(chunk.fromSURL().normalizedStFN()); - chunkTO.setSurlUniqueID(new Integer(chunk.fromSURL().uniqueId())); - } - - /** - * - * Creates a ReducedPtGChunkDataTO from the received PtGChunkDataTO and - * completes it with the normalized StFN and the SURL unique ID taken from the - * PtGChunkData - * - * @param chunkTO - * @param chunk - * @return - * @throws InvalidReducedPtGChunkDataAttributesException - */ - private ReducedPtGChunkDataTO completeTO(PtGChunkDataTO chunkTO, - final PtGPersistentChunkData chunk) - throws InvalidReducedPtGChunkDataAttributesException { - - ReducedPtGChunkDataTO reducedChunkTO = this.reduce(chunkTO); - this.completeTO(reducedChunkTO, this.reduce(chunk)); - return reducedChunkTO; - } - - /** - * Creates a ReducedPtGChunkData from the data contained in the received - * PtGChunkData - * - * @param chunk - * @return - * @throws InvalidReducedPtGChunkDataAttributesException - */ - private ReducedPtGChunkData reduce(PtGPersistentChunkData chunk) - throws InvalidReducedPtGChunkDataAttributesException { - - ReducedPtGChunkData reducedChunk = new ReducedPtGChunkData(chunk.getSURL(), - chunk.getStatus()); - reducedChunk.setPrimaryKey(chunk.getPrimaryKey()); - return reducedChunk; - } - - /** - * Creates a ReducedPtGChunkDataTO from the data contained in the received - * PtGChunkDataTO - * - * @param chunkTO - * @return - */ - private ReducedPtGChunkDataTO reduce(PtGChunkDataTO chunkTO) { - - ReducedPtGChunkDataTO reducedChunkTO = new ReducedPtGChunkDataTO(); - reducedChunkTO.setPrimaryKey(chunkTO.primaryKey()); - reducedChunkTO.setFromSURL(chunkTO.fromSURL()); - reducedChunkTO.setNormalizedStFN(chunkTO.normalizedStFN()); - reducedChunkTO.setSurlUniqueID(chunkTO.surlUniqueID()); - reducedChunkTO.setStatus(chunkTO.status()); - reducedChunkTO.setErrString(chunkTO.errString()); - return reducedChunkTO; - } - - /** - * Checks if the received PtGChunkDataTO contains the fields not set by the - * front end but required - * - * @param chunkTO - * @return - */ - private boolean isComplete(PtGChunkDataTO chunkTO) { - - return (chunkTO.normalizedStFN() != null) - && (chunkTO.surlUniqueID() != null); - } - - /** - * Checks if the received ReducedPtGChunkDataTO contains the fields not set by - * the front end but required - * - * @param reducedChunkTO - * @return - */ - private boolean isComplete(ReducedPtGChunkDataTO reducedChunkTO) { - - return (reducedChunkTO.normalizedStFN() != null) - && (reducedChunkTO.surlUniqueID() != null); - } - - /** - * Method that returns a Collection of ReducedPtGChunkData Objects associated - * to the supplied TRequestToken. - * - * If any of the data retrieved for a given chunk is not well formed and so - * does not allow a ReducedPtGChunkData Object to be created, then that chunk - * is dropped and gets logged, while processing continues with the next one. - * All valid chunks get returned: the others get dropped. - * - * If there are no chunks associated to the given TRequestToken, then an empty - * Collection is returned and a message gets logged. - */ - synchronized public Collection lookupReducedPtGChunkData( - TRequestToken rt) { - - Collection reducedChunkDataTOs = dao.findReduced(rt - .getValue()); - log.debug("PtG CHUNK CATALOG: retrieved data {}", reducedChunkDataTOs); - ArrayList list = new ArrayList(); - if (reducedChunkDataTOs.isEmpty()) { - log.debug("PtG CHUNK CATALOG! No chunks found in persistence for {}", rt); - } else { - ReducedPtGChunkData reducedChunkData = null; - for (ReducedPtGChunkDataTO reducedChunkDataTO : reducedChunkDataTOs) { - reducedChunkData = makeOneReduced(reducedChunkDataTO); - if (reducedChunkData != null) { - list.add(reducedChunkData); - if (!this.isComplete(reducedChunkDataTO)) { - this.completeTO(reducedChunkDataTO, reducedChunkData); - dao.updateIncomplete(reducedChunkDataTO); - } - } - } - log.debug("PtG CHUNK CATALOG: returning {}", list); - } - return list; - } - - public Collection lookupReducedPtGChunkData( - TRequestToken requestToken, Collection surls) { - - int[] surlsUniqueIDs = new int[surls.size()]; - String[] surlsArray = new String[surls.size()]; - int index = 0; - for (TSURL tsurl : surls) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surlsArray[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOCollection = dao.findReduced( - requestToken, surlsUniqueIDs, surlsArray); - log.debug("PtG CHUNK CATALOG: retrieved data {}", chunkDataTOCollection); - return buildReducedChunkDataList(chunkDataTOCollection); - } - - public Collection lookupPtGChunkData(TSURL surl, - GridUserInterface user) { - - return lookupPtGChunkData(Arrays.asList(new TSURL[] { surl }), user); - } - - public Collection lookupPtGChunkData(TSURL surl) { - - return lookupPtGChunkData(Arrays.asList(new TSURL[] { surl })); - } - - public Collection lookupPtGChunkData( - List surls, GridUserInterface user) { - - int[] surlsUniqueIDs = new int[surls.size()]; - String[] surlsArray = new String[surls.size()]; - int index = 0; - for (TSURL tsurl : surls) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surlsArray[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOCollection = dao.find(surlsUniqueIDs, - surlsArray, user.getDn()); - log.debug("PtG CHUNK CATALOG: retrieved data {}", chunkDataTOCollection); - return buildChunkDataList(chunkDataTOCollection); - } - - public Collection lookupPtGChunkData(List surls) { - - int[] surlsUniqueIDs = new int[surls.size()]; - String[] surlsArray = new String[surls.size()]; - int index = 0; - for (TSURL tsurl : surls) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surlsArray[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOCollection = dao.find(surlsUniqueIDs, - surlsArray); - log.debug("PtG CHUNK CATALOG: retrieved data {}", chunkDataTOCollection); - return buildChunkDataList(chunkDataTOCollection); - } - - private Collection buildChunkDataList( - Collection chunkDataTOCollection) { - - ArrayList list = new ArrayList(); - PtGPersistentChunkData chunk; - for (PtGChunkDataTO chunkTO : chunkDataTOCollection) { - chunk = makeOne(chunkTO); - if (chunk == null) { - continue; - } - list.add(chunk); - if (isComplete(chunkTO)) { - continue; - } - try { - dao.updateIncomplete(this.completeTO(chunkTO, chunk)); - } catch (InvalidReducedPtGChunkDataAttributesException e) { - log.warn("PtG CHUNK CATALOG! unable to add missing informations on " - + "DB to the request: ", e.getMessage()); - } - } - return list; - } - - private PtGPersistentChunkData makeOne(PtGChunkDataTO chunkTO) { - - try { - return makeOne(chunkTO, - new TRequestToken(chunkTO.requestToken(), chunkTO.timeStamp())); - } catch (InvalidTRequestTokenAttributesException e) { - throw new IllegalStateException( - "Unexpected InvalidTRequestTokenAttributesException in TRequestToken: " - + e); - } - } - - /** - * Method that returns a Collection of ReducedPtGChunkData Objects matching - * the supplied GridUser and Collection of TSURLs. If any of the data - * retrieved for a given chunk is not well formed and so does not allow a - * ReducedPtGChunkData Object to be created, then that chunk is dropped and - * gets logged, while processing continues with the next one. All valid chunks - * get returned: the others get dropped. If there are no chunks associated to - * the given GridUser and Collection of TSURLs, then an empty Collection is - * returned and a message gets logged. - */ - synchronized public Collection lookupReducedPtGChunkData( - GridUserInterface gu, Collection tsurlCollection) { - - int[] surlsUniqueIDs = new int[tsurlCollection.size()]; - String[] surls = new String[tsurlCollection.size()]; - int index = 0; - for (TSURL tsurl : tsurlCollection) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surls[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOCollection = dao.findReduced( - gu.getDn(), surlsUniqueIDs, surls); - log.debug("PtG CHUNK CATALOG: retrieved data {}", chunkDataTOCollection); - return buildReducedChunkDataList(chunkDataTOCollection); - } - - private Collection buildReducedChunkDataList( - Collection chunkDataTOCollection) { - - ArrayList list = new ArrayList(); - ReducedPtGChunkData reducedChunkData; - for (ReducedPtGChunkDataTO reducedChunkDataTO : chunkDataTOCollection) { - reducedChunkData = makeOneReduced(reducedChunkDataTO); - if (reducedChunkData != null) { - list.add(reducedChunkData); - if (!isComplete(reducedChunkDataTO)) { - completeTO(reducedChunkDataTO, reducedChunkData); - dao.updateIncomplete(reducedChunkDataTO); - } - } - } - log.debug("PtG CHUNK CATALOG: returning {}",list); - return list; - } - - /** - * - * - * @param reducedChunkDataTO - * @return - */ - private ReducedPtGChunkData makeOneReduced( - ReducedPtGChunkDataTO reducedChunkDataTO) { - - StringBuilder errorSb = new StringBuilder(); - // fromSURL - TSURL fromSURL = null; - try { - fromSURL = TSURL.makeFromStringValidate(reducedChunkDataTO.fromSURL()); - } catch (InvalidTSURLAttributesException e) { - errorSb.append(e); - } - if (reducedChunkDataTO.normalizedStFN() != null) { - fromSURL.setNormalizedStFN(reducedChunkDataTO.normalizedStFN()); - } - if (reducedChunkDataTO.surlUniqueID() != null) { - fromSURL.setUniqueID(reducedChunkDataTO.surlUniqueID().intValue()); - } - // status - TReturnStatus status = null; - TStatusCode code = StatusCodeConverter.getInstance().toSTORM( - reducedChunkDataTO.status()); - if (code == TStatusCode.EMPTY) { - errorSb.append("\nRetrieved StatusCode was not recognised: " - + reducedChunkDataTO.status()); - } else { - status = new TReturnStatus(code, reducedChunkDataTO.errString()); - } - // make ReducedPtGChunkData - ReducedPtGChunkData aux = null; - try { - aux = new ReducedPtGChunkData(fromSURL, status); - aux.setPrimaryKey(reducedChunkDataTO.primaryKey()); - } catch (InvalidReducedPtGChunkDataAttributesException e) { - log.warn("PtG CHUNK CATALOG! Retrieved malformed Reduced PtG chunk " - + "data from persistence: dropping reduced chunk..."); - log.warn(e.getMessage(), e); - log.warn(errorSb.toString()); - } - // end... - return aux; - } - - /** - * Method used to add into Persistence a new entry. The supplied PtGChunkData - * gets the primary key changed to the value assigned in Persistence. - * - * This method is intended to be used by a recursive PtG request: the parent - * request supplies a directory which must be expanded, so all new children - * requests resulting from the files in the directory are added into - * persistence. - * - * So this method does _not_ add a new SRM prepare_to_get request into the DB! - * - * The only children data written into the DB are: sourceSURL, TDirOption, - * statusCode and explanation. - * - * In case of any error the operation does not proceed, but no Exception is - * thrown! Proper messages get logged by underlaying DAO. - */ - synchronized public void addChild(PtGPersistentChunkData chunkData) { - - PtGChunkDataTO to = new PtGChunkDataTO(); - /* needed for now to find ID of request! Must be changed soon! */ - to.setRequestToken(chunkData.getRequestToken().toString()); - to.setFromSURL(chunkData.getSURL().toString()); - to.setNormalizedStFN(chunkData.getSURL().normalizedStFN()); - to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId())); - - to.setAllLevelRecursive(chunkData.getDirOption().isAllLevelRecursive()); - to.setDirOption(chunkData.getDirOption().isDirectory()); - to.setNumLevel(chunkData.getDirOption().getNumLevel()); - to.setStatus(StatusCodeConverter.getInstance().toDB( - chunkData.getStatus().getStatusCode())); - to.setErrString(chunkData.getStatus().getExplanation()); - to.setClientDN(chunkData.getUser().getDn()); - if (chunkData.getUser() instanceof AbstractGridUser) { - if (((AbstractGridUser) chunkData.getUser()).hasVoms()) { - to.setVomsAttributes(((AbstractGridUser) chunkData.getUser()) - .getFQANsAsString()); - } - - } - /* add the entry and update the Primary Key field! */ - dao.addChild(to); - /* set the assigned PrimaryKey! */ - chunkData.setPrimaryKey(to.primaryKey()); - } - - /** - * Method used to add into Persistence a new entry. The supplied PtGChunkData - * gets the primary key changed to the value assigned in the Persistence. The - * method requires the GridUser to whom associate the added request. - * - * This method is intended to be used by an srmCopy request in push mode which - * implies a local srmPtG. The only fields from PtGChunkData that are - * considered are: the requestToken, the sourceSURL, the pinLifetime, the - * dirOption, the protocolList, the status and error string. - * - * So this method _adds_ a new SRM prepare_to_get request into the DB! - * - * In case of any error the operation does not proceed, but no Exception is - * thrown! The underlaying DAO logs proper error messagges. - */ - synchronized public void add(PtGPersistentChunkData chunkData, - GridUserInterface gu) { - - PtGChunkDataTO to = new PtGChunkDataTO(); - to.setRequestToken(chunkData.getRequestToken().toString()); - to.setFromSURL(chunkData.getSURL().toString()); - to.setNormalizedStFN(chunkData.getSURL().normalizedStFN()); - to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId())); - - to.setLifeTime(new Long(chunkData.getPinLifeTime().value()).intValue()); - to.setAllLevelRecursive(chunkData.getDirOption().isAllLevelRecursive()); - to.setDirOption(chunkData.getDirOption().isDirectory()); - to.setNumLevel(chunkData.getDirOption().getNumLevel()); - to.setProtocolList(TransferProtocolListConverter.toDB(chunkData - .getTransferProtocols())); - to.setStatus(StatusCodeConverter.getInstance().toDB( - chunkData.getStatus().getStatusCode())); - to.setErrString(chunkData.getStatus().getExplanation()); - - to.setClientDN(chunkData.getUser().getDn()); - if (chunkData.getUser() instanceof AbstractGridUser) { - if (((AbstractGridUser) chunkData.getUser()).hasVoms()) { - to.setVomsAttributes(((AbstractGridUser) chunkData.getUser()) - .getFQANsAsString()); - } - - } - - dao.addNew(to, gu.getDn()); // add the entry and update the Primary Key - // field! - chunkData.setPrimaryKey(to.primaryKey()); // set the assigned PrimaryKey! - } - - /** - * Method used to establish if in Persistence there is a PtGChunkData working - * on the supplied SURL, and whose state is SRM_FILE_PINNED, in which case - * true is returned. In case none are found or there is any problem, false is - * returned. This method is intended to be used by srmMv. - */ - synchronized public boolean isSRM_FILE_PINNED(TSURL surl) { - - return (dao.numberInSRM_FILE_PINNED(surl.uniqueId()) > 0); - - } - - /** - * Method used to transit the specified Collection of ReducedPtGChunkData from - * SRM_FILE_PINNED to SRM_RELEASED. Chunks in any other starting state are not - * transited. In case of any error nothing is done, but proper error messages - * get logged by the underlaying DAO. - */ - synchronized public void transitSRM_FILE_PINNEDtoSRM_RELEASED( - Collection chunks, TRequestToken token) { - - if (chunks == null || chunks.isEmpty()) { - return; - } - long[] primaryKeys = new long[chunks.size()]; - int index = 0; - for (ReducedPtGChunkData chunkData : chunks) { - if (chunkData != null) { - primaryKeys[index] = chunkData.primaryKey(); - index++; - } - - } - dao.transitSRM_FILE_PINNEDtoSRM_RELEASED(primaryKeys, token); - for (ReducedPtGChunkData chunkData : chunks) { - if (chunkData != null) { - primaryKeys[index] = chunkData.primaryKey(); - index++; - } - } - } - - /** - * Method used to force transition to SRM_RELEASED from SRM_FILE_PINNED, of - * all PtG Requests whose pinLifetime has expired and the state still has not - * been changed (a user forgot to run srmReleaseFiles)! - */ - synchronized public void transitExpiredSRM_FILE_PINNED() { - - List expiredSurls = dao.transitExpiredSRM_FILE_PINNED(); - } - - public void updateStatus(TRequestToken requestToken, TSURL surl, - TStatusCode statusCode, String explanation) { - - dao.updateStatus(requestToken, new int[] { surl.uniqueId() }, - new String[] { surl.rawSurl() }, statusCode, explanation); - } - - public void updateFromPreviousStatus(TSURL surl, - TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation) { - - dao.updateStatusOnMatchingStatus(new int[] { surl.uniqueId() }, - new String[] { surl.rawSurl() }, expectedStatusCode, newStatusCode, - explanation); - - } - - public void updateFromPreviousStatus(TRequestToken requestToken, - TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation) { - - dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode, - newStatusCode, explanation); - } - - public void updateFromPreviousStatus(TRequestToken requestToken, - List surlList, TStatusCode expectedStatusCode, - TStatusCode newStatusCode) { - - int[] surlsUniqueIDs = new int[surlList.size()]; - String[] surls = new String[surlList.size()]; - int index = 0; - for (TSURL tsurl : surlList) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surls[index] = tsurl.rawSurl(); - index++; - } - dao.updateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls, - expectedStatusCode, newStatusCode); - } + private static final Logger log = LoggerFactory.getLogger(PtGChunkCatalog.class); + + private static PtGChunkCatalog instance; + + public static synchronized PtGChunkCatalog getInstance() { + if (instance == null) { + instance = new PtGChunkCatalog(); + } + return instance; + } + + private final PtGChunkDAO dao; + + /** + * Private constructor that starts the internal timer needed to periodically check and transit + * requests whose pinLifetime has expired and are in SRM_FILE_PINNED, to SRM_RELEASED. + */ + private PtGChunkCatalog() { + + dao = PtGChunkDAOMySql.getInstance(); + } + + /** + * Method used to update into Persistence a retrieved PtGChunkData. In case any error occurs, the + * operation does not proceed but no Exception is thrown. Error messages get logged. + * + * Only fileSize, StatusCode, errString and transferURL are updated. Likewise for the request + * pinLifetime. + */ + synchronized public void update(PtGPersistentChunkData chunkData) { + + PtGChunkDataTO to = new PtGChunkDataTO(); + /* Primary key needed by DAO Object */ + to.setPrimaryKey(chunkData.getPrimaryKey()); + to.setFileSize(chunkData.getFileSize().value()); + to.setStatus(StatusCodeConverter.getInstance().toDB(chunkData.getStatus().getStatusCode())); + to.setErrString(chunkData.getStatus().getExplanation()); + to.setTurl(TURLConverter.getInstance().toDB(chunkData.getTransferURL().toString())); + to.setLifeTime(PinLifetimeConverter.getInstance().toDB(chunkData.getPinLifeTime().value())); + to.setNormalizedStFN(chunkData.getSURL().normalizedStFN()); + to.setSurlUniqueID(Integer.valueOf(chunkData.getSURL().uniqueId())); + to.setClientDN(chunkData.getUser().getDn()); + if (chunkData.getUser() instanceof AbstractGridUser) { + if (((AbstractGridUser) chunkData.getUser()).hasVoms()) { + to.setVomsAttributes(((AbstractGridUser) chunkData.getUser()).getFQANsAsString()); + } + + } + dao.update(to); + } + + /** + * Method that returns a Collection of PtGChunkData Objects matching the supplied TRequestToken. + * + * If any of the data associated to the TRequestToken is not well formed and so does not allow a + * PtGChunkData Object to be created, then that part of the request is dropped and gets logged, + * and the processing continues with the next part. All valid chunks get returned: the others get + * dropped. + * + * If there are no chunks to process then an empty Collection is returned, and a messagge gets + * logged. + */ + synchronized public Collection lookup(TRequestToken rt) { + + Collection chunkTOs = dao.find(rt); + log.debug("PtG CHUNK CATALOG: retrieved data " + chunkTOs); + ArrayList list = new ArrayList(); + if (chunkTOs.isEmpty()) { + log.warn("PtG CHUNK CATALOG! No chunks found in persistence for " + "specified request: {}", + rt); + return list; + } + PtGPersistentChunkData chunk; + for (PtGChunkDataTO chunkTO : chunkTOs) { + chunk = makeOne(chunkTO, rt); + if (chunk == null) { + continue; + } + list.add(chunk); + if (isComplete(chunkTO)) { + continue; + } + try { + dao.updateIncomplete(this.completeTO(chunkTO, chunk)); + } catch (InvalidReducedPtGChunkDataAttributesException e) { + log.warn( + "PtG CHUNK CATALOG! unable to add missing informations on DB " + "to the request: {}", + e.getMessage()); + } + } + log.debug("PtG CHUNK CATALOG: returning " + list); + return list; + } + + /** + * Generates a PtGChunkData from the received PtGChunkDataTO + * + * @param chunkDataTO + * @param rt + * @return + */ + private PtGPersistentChunkData makeOne(PtGChunkDataTO chunkDataTO, TRequestToken rt) { + + StringBuilder errorSb = new StringBuilder(); + TSURL fromSURL = null; + try { + fromSURL = TSURL.makeFromStringValidate(chunkDataTO.fromSURL()); + } catch (InvalidTSURLAttributesException e) { + errorSb.append(e); + } + if (chunkDataTO.normalizedStFN() != null) { + fromSURL.setNormalizedStFN(chunkDataTO.normalizedStFN()); + } + if (chunkDataTO.surlUniqueID() != null) { + fromSURL.setUniqueID(chunkDataTO.surlUniqueID().intValue()); + } + // lifeTime + TLifeTimeInSeconds lifeTime = null; + try { + long pinLifeTime = PinLifetimeConverter.getInstance().toStoRM(chunkDataTO.lifeTime()); + // Check for max value allowed + long max = Configuration.getInstance().getPinLifetimeMaximum(); + if (pinLifeTime > max) { + log.warn("PinLifeTime is greater than the max value allowed." + + " Drop the value to the max = {} seconds", max); + pinLifeTime = max; + } + lifeTime = TLifeTimeInSeconds.make((pinLifeTime), TimeUnit.SECONDS); + } catch (IllegalArgumentException e) { + errorSb.append("\n"); + errorSb.append(e); + } + // dirOption + TDirOption dirOption = null; + try { + dirOption = new TDirOption(chunkDataTO.dirOption(), chunkDataTO.allLevelRecursive(), + chunkDataTO.numLevel()); + } catch (InvalidTDirOptionAttributesException e) { + errorSb.append("\n"); + errorSb.append(e); + } + // transferProtocols + TURLPrefix transferProtocols = + TransferProtocolListConverter.toSTORM(chunkDataTO.protocolList()); + if (transferProtocols.size() == 0) { + errorSb + .append("\nEmpty list of TransferProtocols or could " + "not translate TransferProtocols!"); + /* fail construction of PtGChunkData! */ + transferProtocols = null; + } + // fileSize + TSizeInBytes fileSize = null; + try { + fileSize = TSizeInBytes.make(chunkDataTO.fileSize(), SizeUnit.BYTES); + } catch (InvalidTSizeAttributesException e) { + errorSb.append("\n"); + errorSb.append(e); + } + // status + TReturnStatus status = null; + TStatusCode code = StatusCodeConverter.getInstance().toSTORM(chunkDataTO.status()); + if (code == TStatusCode.EMPTY) { + errorSb.append("\nRetrieved StatusCode was not recognised: " + chunkDataTO.status()); + } else { + status = new TReturnStatus(code, chunkDataTO.errString()); + } + GridUserInterface gridUser = null; + try { + if (chunkDataTO.vomsAttributes() != null && !chunkDataTO.vomsAttributes().trim().equals("")) { + gridUser = GridUserManager.makeVOMSGridUser(chunkDataTO.clientDN(), + chunkDataTO.vomsAttributesArray()); + } else { + gridUser = GridUserManager.makeGridUser(chunkDataTO.clientDN()); + } + + } catch (IllegalArgumentException e) { + log.error("Unexpected error on voms grid user creation." + " IllegalArgumentException: {}", + e.getMessage(), e); + } + // transferURL + /* + * whatever is read is just meaningless because PtG will fill it in!!! So create an Empty TTURL + * by default! Vital to avoid problems with unknown DPM NULL/EMPTY logic policy! + */ + TTURL transferURL = TTURL.makeEmpty(); + // make PtGChunkData + PtGPersistentChunkData aux = null; + try { + aux = new PtGPersistentChunkData(gridUser, rt, fromSURL, lifeTime, dirOption, + transferProtocols, fileSize, status, transferURL); + aux.setPrimaryKey(chunkDataTO.primaryKey()); + } catch (InvalidSurlRequestDataAttributesException e) { + dao.fail(chunkDataTO); + log.warn("PtG CHUNK CATALOG! Retrieved malformed PtG chunk data from " + + "persistence. Dropping chunk from request {}", rt); + log.warn(e.getMessage(), e); + log.warn(errorSb.toString()); + } + // end... + return aux; + } + + /** + * + * Adds to the received PtGChunkDataTO the normalized StFN and the SURL unique ID taken from the + * PtGChunkData + * + * @param chunkTO + * @param chunk + */ + private void completeTO(ReducedPtGChunkDataTO chunkTO, final ReducedPtGChunkData chunk) { + + chunkTO.setNormalizedStFN(chunk.fromSURL().normalizedStFN()); + chunkTO.setSurlUniqueID(Integer.valueOf(chunk.fromSURL().uniqueId())); + } + + /** + * + * Creates a ReducedPtGChunkDataTO from the received PtGChunkDataTO and completes it with the + * normalized StFN and the SURL unique ID taken from the PtGChunkData + * + * @param chunkTO + * @param chunk + * @return + * @throws InvalidReducedPtGChunkDataAttributesException + */ + private ReducedPtGChunkDataTO completeTO(PtGChunkDataTO chunkTO, + final PtGPersistentChunkData chunk) throws InvalidReducedPtGChunkDataAttributesException { + + ReducedPtGChunkDataTO reducedChunkTO = this.reduce(chunkTO); + this.completeTO(reducedChunkTO, this.reduce(chunk)); + return reducedChunkTO; + } + + /** + * Creates a ReducedPtGChunkData from the data contained in the received PtGChunkData + * + * @param chunk + * @return + * @throws InvalidReducedPtGChunkDataAttributesException + */ + private ReducedPtGChunkData reduce(PtGPersistentChunkData chunk) + throws InvalidReducedPtGChunkDataAttributesException { + + ReducedPtGChunkData reducedChunk = new ReducedPtGChunkData(chunk.getSURL(), chunk.getStatus()); + reducedChunk.setPrimaryKey(chunk.getPrimaryKey()); + return reducedChunk; + } + + /** + * Creates a ReducedPtGChunkDataTO from the data contained in the received PtGChunkDataTO + * + * @param chunkTO + * @return + */ + private ReducedPtGChunkDataTO reduce(PtGChunkDataTO chunkTO) { + + ReducedPtGChunkDataTO reducedChunkTO = new ReducedPtGChunkDataTO(); + reducedChunkTO.setPrimaryKey(chunkTO.primaryKey()); + reducedChunkTO.setFromSURL(chunkTO.fromSURL()); + reducedChunkTO.setNormalizedStFN(chunkTO.normalizedStFN()); + reducedChunkTO.setSurlUniqueID(chunkTO.surlUniqueID()); + reducedChunkTO.setStatus(chunkTO.status()); + reducedChunkTO.setErrString(chunkTO.errString()); + return reducedChunkTO; + } + + /** + * Checks if the received PtGChunkDataTO contains the fields not set by the front end but required + * + * @param chunkTO + * @return + */ + private boolean isComplete(PtGChunkDataTO chunkTO) { + + return (chunkTO.normalizedStFN() != null) && (chunkTO.surlUniqueID() != null); + } + + /** + * Method used to add into Persistence a new entry. The supplied PtGChunkData gets the primary key + * changed to the value assigned in Persistence. + * + * This method is intended to be used by a recursive PtG request: the parent request supplies a + * directory which must be expanded, so all new children requests resulting from the files in the + * directory are added into persistence. + * + * So this method does _not_ add a new SRM prepare_to_get request into the DB! + * + * The only children data written into the DB are: sourceSURL, TDirOption, statusCode and + * explanation. + * + * In case of any error the operation does not proceed, but no Exception is thrown! Proper + * messages get logged by underlaying DAO. + */ + synchronized public void addChild(PtGPersistentChunkData chunkData) { + + PtGChunkDataTO to = new PtGChunkDataTO(); + /* needed for now to find ID of request! Must be changed soon! */ + to.setRequestToken(chunkData.getRequestToken().toString()); + to.setFromSURL(chunkData.getSURL().toString()); + to.setNormalizedStFN(chunkData.getSURL().normalizedStFN()); + to.setSurlUniqueID(Integer.valueOf(chunkData.getSURL().uniqueId())); + + to.setAllLevelRecursive(chunkData.getDirOption().isAllLevelRecursive()); + to.setDirOption(chunkData.getDirOption().isDirectory()); + to.setNumLevel(chunkData.getDirOption().getNumLevel()); + to.setStatus(StatusCodeConverter.getInstance().toDB(chunkData.getStatus().getStatusCode())); + to.setErrString(chunkData.getStatus().getExplanation()); + to.setClientDN(chunkData.getUser().getDn()); + if (chunkData.getUser() instanceof AbstractGridUser) { + if (((AbstractGridUser) chunkData.getUser()).hasVoms()) { + to.setVomsAttributes(((AbstractGridUser) chunkData.getUser()).getFQANsAsString()); + } + + } + /* add the entry and update the Primary Key field! */ + dao.addChild(to); + /* set the assigned PrimaryKey! */ + chunkData.setPrimaryKey(to.primaryKey()); + } + + public void updateStatus(TRequestToken requestToken, TSURL surl, TStatusCode statusCode, + String explanation) { + + dao.updateStatus(requestToken, new int[] {surl.uniqueId()}, new String[] {surl.rawSurl()}, + statusCode, explanation); + } + + public void updateFromPreviousStatus(TRequestToken requestToken, TStatusCode expectedStatusCode, + TStatusCode newStatusCode, String explanation) { + + dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode, newStatusCode, explanation); + } } diff --git a/src/main/java/it/grid/storm/catalogs/PtGChunkDAO.java b/src/main/java/it/grid/storm/catalogs/PtGChunkDAO.java deleted file mode 100644 index 2db12ab2a..000000000 --- a/src/main/java/it/grid/storm/catalogs/PtGChunkDAO.java +++ /dev/null @@ -1,1778 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import static it.grid.storm.catalogs.ChunkDAOUtils.printWarnings; - -import it.grid.storm.config.Configuration; -import it.grid.storm.ea.StormEA; -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.StoRI; -import it.grid.storm.namespace.naming.SURL; -import it.grid.storm.srm.types.InvalidTSURLAttributesException; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TRequestType; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TStatusCode; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.sql.Timestamp; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Date; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map.Entry; -import java.util.Timer; -import java.util.TimerTask; - -/** - * DAO class for PtGChunkCatalog. This DAO is specifically designed to connect - * to a MySQL DB. The raw data found in those tables is pre-treated in order to - * turn it into the Object Model of StoRM. See Method comments for further info. - * - * BEWARE! DAO Adjusts for extra fields in the DB that are not present in the - * object model. - * - * @author EGRID ICTP - * @version 3.0 - * @date June 2005 - */ -public class PtGChunkDAO { - - private static final Logger log = LoggerFactory.getLogger(PtGChunkDAO.class); - - /** String with the name of the class for the DB driver */ - private final String driver = Configuration.getInstance().getDBDriver(); - /** String referring to the URL of the DB */ - private final String url = Configuration.getInstance().getStormDbURL(); - /** String with the password for the DB */ - private final String password = Configuration.getInstance().getDBPassword(); - /** String with the name for the DB */ - private final String name = Configuration.getInstance().getDBUserName(); - - /** Connection to DB - WARNING!!! It is kept open all the time! */ - private Connection con = null; - /** boolean that tells whether reconnection is needed because of MySQL bug! */ - private boolean reconnect = false; - - /** Singleton instance */ - private final static PtGChunkDAO dao = new PtGChunkDAO(); - - /** timer thread that will run a task to alert when reconnecting is necessary! */ - private Timer clock = null; - /** - * timer task that will update the boolean signaling that a reconnection is - * needed! - */ - private TimerTask clockTask = null; - /** milliseconds that must pass before reconnecting to DB */ - private final long period = Configuration.getInstance() - .getDBReconnectPeriod() * 1000; - /** initial delay in milliseconds before starting timer */ - private final long delay = Configuration.getInstance().getDBReconnectDelay() * 1000; - - private PtGChunkDAO() { - - setUpConnection(); - - clock = new Timer(); - clockTask = new TimerTask() { - - @Override - public void run() { - - reconnect = true; - } - }; // clock task - clock.scheduleAtFixedRate(clockTask, delay, period); - } - - /** - * Method that returns the only instance of the PtGChunkDAO. - */ - public static PtGChunkDAO getInstance() { - - return dao; - } - - /** - * Method used to add a new record to the DB: the supplied PtGChunkDataTO gets - * its primaryKey changed to the one assigned by the DB. - * - * The supplied PtGChunkData is used to fill in only the DB table where file - * specific info gets recorded: it does _not_ add a new request! So if - * spurious data is supplied, it will just stay there because of a lack of a - * parent request! - */ - public synchronized void addChild(PtGChunkDataTO to) { - - if (!checkConnection()) { - log.error("PTG CHUNK DAO: addChild - unable to get a valid connection!"); - return; - } - String str = null; - PreparedStatement id = null; // statement to find out the ID associated to - // the request token - ResultSet rsid = null; // result set containing the ID of the request. - try { - - // WARNING!!!! We are forced to run a query to get the ID of the request, - // which should NOT be so - // because the corresponding request object should have been changed with - // the extra field! However, it is not possible - // at the moment to perform such chage because of strict deadline and the - // change could wreak havoc - // the code. So we are forced to make this query!!! - - // begin transaction - con.setAutoCommit(false); - printWarnings(con.getWarnings()); - - // find ID of request corresponding to given RequestToken - str = "SELECT rq.ID FROM request_queue rq WHERE rq.r_token=?"; - - id = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - id.setString(1, to.requestToken()); - printWarnings(id.getWarnings()); - - log.debug("PTG CHUNK DAO: addChild; {}", id.toString()); - rsid = id.executeQuery(); - printWarnings(id.getWarnings()); - - /* ID of request in request_process! */ - int request_id = extractID(rsid); - int id_s = fillPtGTables(to, request_id); - - /* end transaction! */ - con.commit(); - printWarnings(con.getWarnings()); - con.setAutoCommit(true); - printWarnings(con.getWarnings()); - - // update primary key reading the generated key - to.setPrimaryKey(id_s); - } catch (SQLException e) { - log.error("PTG CHUNK DAO: unable to complete addChild! " - + "PtGChunkDataTO: {}; error: {}", to, e.getMessage(), e); - rollback(con); - } catch (Exception e) { - log.error("PTG CHUNK DAO: unable to complete addChild! " - + "PtGChunkDataTO: {}; error: {}", to, e.getMessage(), e); - rollback(con); - } finally { - close(rsid); - close(id); - } - } - - /** - * Method used to add a new record to the DB: the supplied PtGChunkDataTO gets - * its primaryKey changed to the one assigned by the DB. The client_dn must - * also be supplied as a String. - * - * The supplied PtGChunkData is used to fill in all the DB tables where file - * specific info gets recorded: it _adds_ a new request! - */ - public synchronized void addNew(PtGChunkDataTO to, String client_dn) { - - if (!checkConnection()) { - log.error("PTG CHUNK DAO: addNew - unable to get a valid connection!"); - return; - } - String str = null; - /* Result set containing the ID of the inserted new request */ - ResultSet rs_new = null; - /* Insert new request into process_request */ - PreparedStatement addNew = null; - /* Insert protocols for request. */ - PreparedStatement addProtocols = null; - try { - // begin transaction - con.setAutoCommit(false); - printWarnings(con.getWarnings()); - - // add to request_queue... - str = "INSERT INTO request_queue (config_RequestTypeID,client_dn,pinLifetime,status,errstring,r_token,nbreqfiles,timeStamp) VALUES (?,?,?,?,?,?,?,?)"; - addNew = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS); - printWarnings(con.getWarnings()); - /* Request type set to prepare to get! */ - addNew.setString(1, - RequestTypeConverter.getInstance().toDB(TRequestType.PREPARE_TO_GET)); - printWarnings(addNew.getWarnings()); - - addNew.setString(2, client_dn); - printWarnings(addNew.getWarnings()); - - addNew.setInt(3, to.lifeTime()); - printWarnings(addNew.getWarnings()); - - addNew.setInt( - 4, - StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_REQUEST_INPROGRESS)); - printWarnings(addNew.getWarnings()); - - addNew.setString(5, "New PtG Request resulting from srmCopy invocation."); - printWarnings(addNew.getWarnings()); - - addNew.setString(6, to.requestToken()); - printWarnings(addNew.getWarnings()); - - addNew.setInt(7, 1); // number of requested files set to 1! - printWarnings(addNew.getWarnings()); - - addNew.setTimestamp(8, new Timestamp(new Date().getTime())); - printWarnings(addNew.getWarnings()); - - log.trace("PTG CHUNK DAO: addNew; {}", addNew.toString()); - addNew.execute(); - printWarnings(addNew.getWarnings()); - - rs_new = addNew.getGeneratedKeys(); - int id_new = extractID(rs_new); - - // add protocols... - str = "INSERT INTO request_TransferProtocols (request_queueID,config_ProtocolsID) VALUES (?,?)"; - addProtocols = con.prepareStatement(str); - printWarnings(con.getWarnings()); - for (Iterator i = to.protocolList().iterator(); i.hasNext();) { - addProtocols.setInt(1, id_new); - printWarnings(addProtocols.getWarnings()); - - addProtocols.setString(2, i.next()); - printWarnings(addProtocols.getWarnings()); - - log.trace("PTG CHUNK DAO: addNew; {}", addProtocols.toString()); - addProtocols.execute(); - printWarnings(addProtocols.getWarnings()); - } - - // addChild... - int id_s = fillPtGTables(to, id_new); - - // end transaction! - con.commit(); - printWarnings(con.getWarnings()); - con.setAutoCommit(true); - printWarnings(con.getWarnings()); - - // update primary key reading the generated key - to.setPrimaryKey(id_s); - } catch (SQLException e) { - log.error("PTG CHUNK DAO: Rolling back! Unable to complete addNew! " - + "PtGChunkDataTO: {}; error: {}", to, e.getMessage(), e); - rollback(con); - } catch (Exception e) { - log.error("PTG CHUNK DAO: unable to complete addNew! " - + "PtGChunkDataTO: {}; error: {}", to, e.getMessage(), e); - rollback(con); - } finally { - close(rs_new); - close(addNew); - close(addProtocols); - } - } - - /** - * To be used inside a transaction - * - * @param to - * @param requestQueueID - * @return - * @throws SQLException - * @throws Exception - */ - private synchronized int fillPtGTables(PtGChunkDataTO to, int requestQueueID) - throws SQLException, Exception { - - String str = null; - /* Result set containing the ID of the inserted */ - ResultSet rs_do = null; - /* Result set containing the ID of the inserted */ - ResultSet rs_g = null; - /* Result set containing the ID of the inserted */ - ResultSet rs_s = null; - /* insert TDirOption for request */ - PreparedStatement addDirOption = null; - /* insert request_Get for request */ - PreparedStatement addGet = null; - PreparedStatement addChild = null; - - try { - // first fill in TDirOption - str = "INSERT INTO request_DirOption (isSourceADirectory,allLevelRecursive,numOfLevels) VALUES (?,?,?)"; - addDirOption = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS); - printWarnings(con.getWarnings()); - addDirOption.setBoolean(1, to.dirOption()); - printWarnings(addDirOption.getWarnings()); - - addDirOption.setBoolean(2, to.allLevelRecursive()); - printWarnings(addDirOption.getWarnings()); - - addDirOption.setInt(3, to.numLevel()); - printWarnings(addDirOption.getWarnings()); - - log.trace("PTG CHUNK DAO: addNew; {}", addDirOption.toString()); - addDirOption.execute(); - printWarnings(addDirOption.getWarnings()); - - rs_do = addDirOption.getGeneratedKeys(); - int id_do = extractID(rs_do); - - // second fill in request_Get... sourceSURL and TDirOption! - str = "INSERT INTO request_Get (request_DirOptionID,request_queueID,sourceSURL,normalized_sourceSURL_StFN,sourceSURL_uniqueID) VALUES (?,?,?,?,?)"; - addGet = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS); - printWarnings(con.getWarnings()); - addGet.setInt(1, id_do); - printWarnings(addGet.getWarnings()); - - addGet.setInt(2, requestQueueID); - printWarnings(addGet.getWarnings()); - - addGet.setString(3, to.fromSURL()); - printWarnings(addGet.getWarnings()); - - addGet.setString(4, to.normalizedStFN()); - printWarnings(addGet.getWarnings()); - - addGet.setInt(5, to.surlUniqueID()); - printWarnings(addGet.getWarnings()); - - log.trace("PTG CHUNK DAO: addNew; {}", addGet.toString()); - addGet.execute(); - printWarnings(addGet.getWarnings()); - - rs_g = addGet.getGeneratedKeys(); - int id_g = extractID(rs_g); - - // third fill in status_Get... - str = "INSERT INTO status_Get (request_GetID,statusCode,explanation) VALUES (?,?,?)"; - addChild = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS); - printWarnings(con.getWarnings()); - addChild.setInt(1, id_g); - printWarnings(addChild.getWarnings()); - - addChild.setInt(2, to.status()); - printWarnings(addChild.getWarnings()); - - addChild.setString(3, to.errString()); - printWarnings(addChild.getWarnings()); - - log.trace("PTG CHUNK DAO: addNew; {}", addChild.toString()); - addChild.execute(); - printWarnings(addChild.getWarnings()); - - return id_g; - } finally { - close(rs_do); - close(rs_g); - close(rs_s); - close(addDirOption); - close(addGet); - close(addChild); - } - } - - /** - * Method used to save the changes made to a retrieved PtGChunkDataTO, back - * into the MySQL DB. - * - * Only the fileSize, transferURL, statusCode and explanation, of status_Get - * table are written to the DB. Likewise for the request pinLifetime. - * - * In case of any error, an error message gets logged but no exception is - * thrown. - */ - public synchronized void update(PtGChunkDataTO to) { - - if (!checkConnection()) { - log.error("PTG CHUNK DAO: update - unable to get a valid connection!"); - return; - } - PreparedStatement updateFileReq = null; - try { - // ready updateFileReq... - updateFileReq = con - .prepareStatement("UPDATE request_queue rq JOIN (status_Get sg, request_Get rg) ON (rq.ID=rg.request_queueID AND sg.request_GetID=rg.ID) " - + "SET sg.fileSize=?, sg.transferURL=?, sg.statusCode=?, sg.explanation=?, rq.pinLifetime=?, rg.normalized_sourceSURL_StFN=?, rg.sourceSURL_uniqueID=? " - + "WHERE rg.ID=?"); - printWarnings(con.getWarnings()); - - updateFileReq.setLong(1, to.fileSize()); - printWarnings(updateFileReq.getWarnings()); - - updateFileReq.setString(2, to.turl()); - printWarnings(updateFileReq.getWarnings()); - - updateFileReq.setInt(3, to.status()); - printWarnings(updateFileReq.getWarnings()); - - updateFileReq.setString(4, to.errString()); - printWarnings(updateFileReq.getWarnings()); - - updateFileReq.setInt(5, to.lifeTime()); - printWarnings(updateFileReq.getWarnings()); - - updateFileReq.setString(6, to.normalizedStFN()); - printWarnings(updateFileReq.getWarnings()); - - updateFileReq.setInt(7, to.surlUniqueID()); - printWarnings(updateFileReq.getWarnings()); - - updateFileReq.setLong(8, to.primaryKey()); - printWarnings(updateFileReq.getWarnings()); - // execute update - log.trace("PTG CHUNK DAO: update method; {}", updateFileReq.toString()); - updateFileReq.executeUpdate(); - printWarnings(updateFileReq.getWarnings()); - } catch (SQLException e) { - log.error("PtG CHUNK DAO: Unable to complete update! {}", - e.getMessage(), e); - } finally { - close(updateFileReq); - } - } - - /** - * Updates the request_Get represented by the received ReducedPtGChunkDataTO - * by setting its normalized_sourceSURL_StFN and sourceSURL_uniqueID - * - * @param chunkTO - */ - public synchronized void updateIncomplete(ReducedPtGChunkDataTO chunkTO) { - - if (!checkConnection()) { - log - .error("PTG CHUNK DAO: updateIncomplete - unable to get a valid connection!"); - return; - } - String str = "UPDATE request_Get rg SET rg.normalized_sourceSURL_StFN=?, rg.sourceSURL_uniqueID=? " - + "WHERE rg.ID=?"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - stmt.setString(1, chunkTO.normalizedStFN()); - printWarnings(stmt.getWarnings()); - - stmt.setInt(2, chunkTO.surlUniqueID()); - printWarnings(stmt.getWarnings()); - - stmt.setLong(3, chunkTO.primaryKey()); - printWarnings(stmt.getWarnings()); - - log.trace("PtG CHUNK DAO - update incomplete: {}", stmt.toString()); - stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - } catch (SQLException e) { - log.error("PtG CHUNK DAO: Unable to complete update incomplete! {}", - e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * TODO WARNING! THIS IS A WORK IN PROGRESS!!! - * - * Method used to refresh the PtGChunkDataTO information from the MySQL DB. - * - * In this first version, only the statusCode and the TURL are reloaded from - * the DB. TODO The next version must contains all the information related to - * the Chunk! - * - * In case of any error, an error messagge gets logged but no exception is - * thrown. - */ - - public synchronized PtGChunkDataTO refresh(long primary_key) { - - if (!checkConnection()) { - log.error("PTG CHUNK DAO: refresh - unable to get a valid connection!"); - return null; - } - String queryString = null; - PreparedStatement find = null; - ResultSet rs = null; - - try { - // get chunks of the request - queryString = "SELECT sg.statusCode, sg.transferURL " - + "FROM status_Get sg " + "WHERE sg.request_GetID=?"; - find = con.prepareStatement(queryString); - printWarnings(con.getWarnings()); - find.setLong(1, primary_key); - printWarnings(find.getWarnings()); - log.trace("PTG CHUNK DAO: refresh status method; {}", find.toString()); - - rs = find.executeQuery(); - - printWarnings(find.getWarnings()); - PtGChunkDataTO chunkDataTO = null; - // The result shoul be un - while (rs.next()) { - chunkDataTO = new PtGChunkDataTO(); - chunkDataTO.setStatus(rs.getInt("sg.statusCode")); - chunkDataTO.setTurl(rs.getString("sg.transferURL")); - } - return chunkDataTO; - } catch (SQLException e) { - log.error("PTG CHUNK DAO: {}", e.getMessage(), e); - /* Return null TransferObject! */ - return null; - } finally { - close(rs); - close(find); - } - } - - /** - * Method that queries the MySQL DB to find all entries matching the supplied - * TRequestToken. The Collection contains the corresponding PtGChunkDataTO - * objects. - * - * An initial simple query establishes the list of protocols associated with - * the request. A second complex query establishes all chunks associated with - * the request, by properly joining request_queue, request_Get, status_Get and - * request_DirOption. The considered fields are: - * - * (1) From status_Get: the ID field which becomes the TOs primary key, and - * statusCode. - * - * (2) From request_Get: sourceSURL - * - * (3) From request_queue: pinLifetime - * - * (4) From request_DirOption: isSourceADirectory, alLevelRecursive, - * numOfLevels - * - * In case of any error, a log gets written and an empty collection is - * returned. No exception is thrown. - * - * NOTE! Chunks in SRM_ABORTED status are NOT returned! - */ - public synchronized Collection find(TRequestToken requestToken) { - - if (!checkConnection()) { - log.error("PTG CHUNK DAO: find - unable to get a valid connection!"); - return new ArrayList(); - } - String strToken = requestToken.toString(); - String str = null; - PreparedStatement find = null; - ResultSet rs = null; - try { - str = "SELECT tp.config_ProtocolsID " - + "FROM request_TransferProtocols tp JOIN request_queue rq ON tp.request_queueID=rq.ID " - + "WHERE rq.r_token=?"; - - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - List protocols = new ArrayList(); - find.setString(1, strToken); - printWarnings(find.getWarnings()); - - log.trace("PTG CHUNK DAO: find method; {}", find.toString()); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - while (rs.next()) { - protocols.add(rs.getString("tp.config_ProtocolsID")); - } - close(rs); - close(find); - - // get chunks of the request - str = "SELECT sg.statusCode, rq.pinLifetime, rg.ID, rq.timeStamp, rq.client_dn, rq.proxy, rg.sourceSURL, " - + "rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID, d.isSourceADirectory, " - + "d.allLevelRecursive, d.numOfLevels " - + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " - + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " - + "LEFT JOIN request_DirOption d ON rg.request_DirOptionID=d.ID " - + "WHERE rq.r_token=? AND sg.statusCode<>?"; - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - ArrayList list = new ArrayList(); - find.setString(1, strToken); - printWarnings(find.getWarnings()); - - find.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED)); - printWarnings(find.getWarnings()); - - log.trace("PTG CHUNK DAO: find method; " + find.toString()); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - - PtGChunkDataTO chunkDataTO; - while (rs.next()) { - chunkDataTO = new PtGChunkDataTO(); - chunkDataTO.setStatus(rs.getInt("sg.statusCode")); - chunkDataTO.setRequestToken(strToken); - chunkDataTO.setPrimaryKey(rs.getLong("rg.ID")); - chunkDataTO.setFromSURL(rs.getString("rg.sourceSURL")); - chunkDataTO.setNormalizedStFN(rs - .getString("rg.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rg.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(new Integer(uniqueID)); - } - - chunkDataTO.setClientDN(rs.getString("rq.client_dn")); - - /** - * This code is only for the 1.3.18. This is a workaround to get FQANs - * using the proxy field on request_queue. The FE use the proxy field of - * request_queue to insert a single FQAN string containing all FQAN - * separeted by the "#" char. The proxy is a BLOB, hence it has to be - * properly conveted in string. - */ - java.sql.Blob blob = rs.getBlob("rq.proxy"); - if (!rs.wasNull() && blob != null) { - byte[] bdata = blob.getBytes(1, (int) blob.length()); - chunkDataTO.setVomsAttributes(new String(bdata)); - } - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setLifeTime(rs.getInt("rq.pinLifetime")); - chunkDataTO.setDirOption(rs.getBoolean("d.isSourceADirectory")); - chunkDataTO.setAllLevelRecursive(rs.getBoolean("d.allLevelRecursive")); - chunkDataTO.setNumLevel(rs.getInt("d.numOfLevels")); - chunkDataTO.setProtocolList(protocols); - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("PTG CHUNK DAO: ", e.getMessage(), e); - /* Return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - /** - * Method that returns a Collection of ReducedPtGChunkDataTO associated to the - * given TRequestToken expressed as String. - */ - public synchronized Collection findReduced( - String reqtoken) { - - if (!checkConnection()) { - log - .error("PTG CHUNK DAO: findReduced - unable to get a valid connection!"); - return new ArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - try { - // get reduced chunks - String str = "SELECT sg.statusCode, rg.ID, rg.sourceSURL, rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID " - + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " - + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " - + "WHERE rq.r_token=?"; - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - ArrayList list = new ArrayList(); - find.setString(1, reqtoken); - printWarnings(find.getWarnings()); - - log.trace("PtG CHUNK DAO! findReduced with request token; {}", find.toString()); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - - ReducedPtGChunkDataTO reducedChunkDataTO = null; - while (rs.next()) { - reducedChunkDataTO = new ReducedPtGChunkDataTO(); - reducedChunkDataTO.setStatus(rs.getInt("sg.statusCode")); - reducedChunkDataTO.setPrimaryKey(rs.getLong("rg.ID")); - reducedChunkDataTO.setFromSURL(rs.getString("rg.sourceSURL")); - reducedChunkDataTO.setNormalizedStFN(rs - .getString("rg.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rg.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - reducedChunkDataTO.setSurlUniqueID(uniqueID); - } - - list.add(reducedChunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("PTG CHUNK DAO: {}", e.getMessage(), e); - /* Return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - public synchronized Collection findReduced( - TRequestToken requestToken, int[] surlsUniqueIDs, String[] surlsArray) { - - if (!checkConnection()) { - log - .error("PTG CHUNK DAO: findReduced - unable to get a valid connection!"); - return new ArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - - try { - - String str = "SELECT sg.statusCode, rg.ID, rg.sourceSURL, rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID " - + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " - + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " - + "WHERE rq.r_token=? AND ( rg.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlsUniqueIDs) - + " AND rg.sourceSURL IN " - + makeSurlString(surlsArray) + " ) "; - - find = con.prepareStatement(str); - - printWarnings(con.getWarnings()); - - ArrayList list = new ArrayList(); - find.setString(1, requestToken.getValue()); - printWarnings(find.getWarnings()); - - log.trace("PtG CHUNK DAO! findReduced with griduser+surlarray; {}", find.toString()); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - - ReducedPtGChunkDataTO chunkDataTO = null; - while (rs.next()) { - chunkDataTO = new ReducedPtGChunkDataTO(); - chunkDataTO.setStatus(rs.getInt("sg.statusCode")); - chunkDataTO.setPrimaryKey(rs.getLong("rg.ID")); - chunkDataTO.setFromSURL(rs.getString("rg.sourceSURL")); - chunkDataTO.setNormalizedStFN(rs - .getString("rg.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rg.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(uniqueID); - } - - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("PTG CHUNK DAO: {}", e.getMessage(), e); - /* Return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - /** - * Method that returns a Collection of ReducedPtGChunkDataTO associated to the - * given griduser, and whose SURLs are contained in the supplied array of - * Strings. - */ - public synchronized Collection findReduced( - String griduser, int[] surlUniqueIDs, String[] surls) { - - if (!checkConnection()) { - log - .error("PTG CHUNK DAO: findReduced - unable to get a valid connection!"); - return new ArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - try { - /* - * NOTE: we search also on the fromSurl because otherwise we lost all - * request_get that have not the uniqueID set because are not yet been - * used by anybody - */ - // get reduced chunks - String str = "SELECT sg.statusCode, rg.ID, rg.sourceSURL, rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID " - + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " - + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " - + "WHERE rq.client_dn=? AND ( rg.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlUniqueIDs) - + " AND rg.sourceSURL IN " - + makeSurlString(surls) + " ) "; - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - ArrayList list = new ArrayList(); - find.setString(1, griduser); - printWarnings(find.getWarnings()); - - log.trace("PtG CHUNK DAO! findReduced with griduser+surlarray; {}", find.toString()); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - - ReducedPtGChunkDataTO chunkDataTO = null; - while (rs.next()) { - chunkDataTO = new ReducedPtGChunkDataTO(); - chunkDataTO.setStatus(rs.getInt("sg.statusCode")); - chunkDataTO.setPrimaryKey(rs.getLong("rg.ID")); - chunkDataTO.setFromSURL(rs.getString("rg.sourceSURL")); - chunkDataTO.setNormalizedStFN(rs - .getString("rg.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rg.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(uniqueID); - } - - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("PTG CHUNK DAO: {}", e.getMessage(), e); - /* Return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - /** - * Method used in extraordinary situations to signal that data retrieved from - * the DB was malformed and could not be translated into the StoRM object - * model. - * - * This method attempts to change the status of the request to SRM_FAILURE and - * record it in the DB. - * - * This operation could potentially fail because the source of the malformed - * problems could be a problematic DB; indeed, initially only log messagges - * where recorded. - * - * Yet it soon became clear that the source of malformed data were the clients - * and/or FE recording info in the DB. In these circumstances the client would - * see its request as being in the SRM_IN_PROGRESS state for ever. Hence the - * pressing need to inform it of the encountered problems. - */ - public synchronized void signalMalformedPtGChunk(PtGChunkDataTO auxTO) { - - if (!checkConnection()) { - log - .error("PTG CHUNK DAO: signalMalformedPtGChunk - unable to get a valid connection!"); - return; - } - String signalSQL = "UPDATE status_Get SET statusCode=" - + StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FAILURE) - + ", explanation=? WHERE request_GetID=" + auxTO.primaryKey(); - PreparedStatement signal = null; - try { - signal = con.prepareStatement(signalSQL); - printWarnings(con.getWarnings()); - /* Prepared statement spares DB-specific String notation! */ - signal.setString(1, "Request is malformed!"); - printWarnings(signal.getWarnings()); - - log.trace("PTG CHUNK DAO: signalMalformed; {}", signal.toString()); - signal.executeUpdate(); - printWarnings(signal.getWarnings()); - } catch (SQLException e) { - log.error("PtGChunkDAO! Unable to signal in DB that the request was " - + "malformed! Request: {}; Exception: {}", auxTO.toString(), e.toString()); - } finally { - close(signal); - } - } - - /** - * Method that returns the number of Get requests on the given SURL, that are - * in SRM_FILE_PINNED state. - * - * This method is intended to be used by PtGChunkCatalog in the - * isSRM_FILE_PINNED method invocation. - * - * In case of any error, 0 is returned. - */ - // request_Get table - public synchronized int numberInSRM_FILE_PINNED(int surlUniqueID) { - - if (!checkConnection()) { - log - .error("PTG CHUNK DAO: numberInSRM_FILE_PINNED - unable to get a valid connection!"); - return 0; - } - String str = "SELECT COUNT(rg.ID) " - + "FROM status_Get sg JOIN request_Get rg " - + "ON (sg.request_GetID=rg.ID) " - + "WHERE rg.sourceSURL_uniqueID=? AND sg.statusCode=?"; - PreparedStatement find = null; - ResultSet rs = null; - try { - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - /* Prepared statement spares DB-specific String notation! */ - find.setInt(1, surlUniqueID); - printWarnings(find.getWarnings()); - - find.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED)); - printWarnings(find.getWarnings()); - - log.trace("PtG CHUNK DAO - numberInSRM_FILE_PINNED method: {}", find.toString()); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - - int numberFilePinned = 0; - if (rs.next()) { - numberFilePinned = rs.getInt(1); - } - return numberFilePinned; - } catch (SQLException e) { - log.error("PtG CHUNK DAO! Unable to determine numberInSRM_FILE_PINNED! " - + "Returning 0! {}", e.getMessage(), e); - return 0; - } finally { - close(rs); - close(find); - } - } - - /** - * Method that updates all expired requests in SRM_FILE_PINNED state, into - * SRM_RELEASED. - * - * This is needed when the client forgets to invoke srmReleaseFiles(). - * - * @return - */ - public synchronized List transitExpiredSRM_FILE_PINNED() { - - // tring to the surl unique ID - if (!checkConnection()) { - log - .error("PTG CHUNK DAO: transitExpiredSRM_FILE_PINNED - unable to get a valid connection!"); - return new ArrayList(); - } - HashMap expiredSurlMap = new HashMap(); - String str = null; - // Statement statement = null; - PreparedStatement preparedStatement = null; - - /* Find all expired surls */ - try { - // start transaction - con.setAutoCommit(false); - - str = "SELECT rg.sourceSURL , rg.sourceSURL_uniqueID " - + "FROM request_Get rg JOIN (status_Get sg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "WHERE sg.statusCode=?" - + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime "; - - preparedStatement = con.prepareStatement(str); - preparedStatement.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED)); - - ResultSet res = preparedStatement.executeQuery(); - printWarnings(preparedStatement.getWarnings()); - - while (res.next()) { - String sourceSURL = res.getString("rg.sourceSURL"); - Integer uniqueID = new Integer(res.getInt("rg.sourceSURL_uniqueID")); - /* If the uniqueID is not setted compute it */ - if (res.wasNull()) { - try { - TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL); - uniqueID = tsurl.uniqueId(); - } catch (InvalidTSURLAttributesException e) { - log.warn("PtGChunkDAO! unable to build the TSURL from {}: " - + "InvalidTSURLAttributesException {}", sourceSURL, e.getMessage(), e); - } - } - expiredSurlMap.put(sourceSURL, uniqueID); - } - - if (expiredSurlMap.isEmpty()) { - commit(con); - log - .trace("PtGChunkDAO! No chunk of PtG request was transited from SRM_FILE_PINNED to SRM_RELEASED."); - return new ArrayList(); - } - } catch (SQLException e) { - log.error("PtGChunkDAO! SQLException. {}", e.getMessage(), e); - rollback(con); - return new ArrayList(); - } finally { - close(preparedStatement); - } - - /* Update status of all expired surls to SRM_RELEASED */ - - preparedStatement = null; - try { - - str = "UPDATE " - + "status_Get sg JOIN (request_Get rg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "SET sg.statusCode=? " - + "WHERE sg.statusCode=? AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime "; - - preparedStatement = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - preparedStatement.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_RELEASED)); - printWarnings(preparedStatement.getWarnings()); - - preparedStatement.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED)); - printWarnings(preparedStatement.getWarnings()); - - log.trace("PtG CHUNK DAO - transitExpiredSRM_FILE_PINNED method: {}", - preparedStatement.toString()); - - int count = preparedStatement.executeUpdate(); - printWarnings(preparedStatement.getWarnings()); - - if (count == 0) { - log.trace("PtGChunkDAO! No chunk of PtG request was " - + "transited from SRM_FILE_PINNED to SRM_RELEASED."); - } else { - log.info("PtGChunkDAO! {} chunks of PtG requests were transited from" - + " SRM_FILE_PINNED to SRM_RELEASED.", count); - } - } catch (SQLException e) { - log.error("PtGChunkDAO! Unable to transit expired SRM_FILE_PINNED chunks " - + "of PtG requests, to SRM_RELEASED! {}", e.getMessage(), e); - rollback(con); - return new ArrayList(); - } finally { - close(preparedStatement); - } - - /* - * in order to enhance performance here we can check if there is any file - * system with tape (T1D0, T1D1), if there is not any we can skip the - * following - */ - - /* Find all not expired surls from PtG and BoL */ - - HashSet pinnedSurlSet = new HashSet(); - try { - - // SURLs pinned by PtGs - str = "SELECT rg.sourceSURL , rg.sourceSURL_uniqueID FROM " - + "request_Get rg JOIN (status_Get sg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "WHERE sg.statusCode=?" - + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) < rq.pinLifetime "; - - preparedStatement = con.prepareStatement(str); - preparedStatement.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED)); - - ResultSet res = preparedStatement.executeQuery(); - printWarnings(preparedStatement.getWarnings()); - - while (res.next()) { - String sourceSURL = res.getString("rg.sourceSURL"); - Integer uniqueID = new Integer(res.getInt("rg.sourceSURL_uniqueID")); - /* If the uniqueID is not setted compute it */ - if (res.wasNull()) { - try { - TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL); - uniqueID = tsurl.uniqueId(); - } catch (InvalidTSURLAttributesException e) { - log.warn("PtGChunkDAO! unable to build the TSURL from {}. " - + "InvalidTSURLAttributesException: {}", sourceSURL, e.getMessage()); - } - } - pinnedSurlSet.add(uniqueID); - } - - close(preparedStatement); - - // SURLs pinned by BoLs - str = "SELECT rb.sourceSURL , rb.sourceSURL_uniqueID FROM " - + "request_BoL rb JOIN (status_BoL sb, request_queue rq) ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID " - + "WHERE sb.statusCode=?" - + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) < rq.pinLifetime "; - - preparedStatement = con.prepareStatement(str); - preparedStatement.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS)); - - res = preparedStatement.executeQuery(); - printWarnings(preparedStatement.getWarnings()); - - while (res.next()) { - String sourceSURL = res.getString("rb.sourceSURL"); - Integer uniqueID = new Integer(res.getInt("rb.sourceSURL_uniqueID")); - /* If the uniqueID is not setted compute it */ - if (res.wasNull()) { - try { - TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL); - uniqueID = tsurl.uniqueId(); - } catch (InvalidTSURLAttributesException e) { - log.warn("PtGChunkDAO! unable to build the TSURL from {}. " - + "InvalidTSURLAttributesException: {}", sourceSURL, e.getMessage(), e); - } - } - pinnedSurlSet.add(uniqueID); - } - commit(con); - } catch (SQLException e) { - log.error("PtGChunkDAO! SQLException. {}", e.getMessage(), e); - rollback(con); - } finally { - close(preparedStatement); - } - - ArrayList expiredSurlList = new ArrayList(); - /* Remove the Extended Attribute pinned if there is not a valid surl on it */ - TSURL surl; - for (Entry surlEntry : expiredSurlMap.entrySet()) { - if (!pinnedSurlSet.contains(surlEntry.getValue())) { - try { - surl = TSURL.makeFromStringValidate(surlEntry.getKey()); - } catch (InvalidTSURLAttributesException e) { - log.error("Invalid SURL, cannot release the pin " - + "(Extended Attribute): {}", surlEntry.getKey()); - continue; - } - expiredSurlList.add(surl); - StoRI stori; - try { - stori = NamespaceDirector.getNamespace().resolveStoRIbySURL(surl); - } catch (Throwable e) { - log.error("Invalid SURL {} cannot release the pin. {}: {}", - surlEntry.getKey(), e.getClass().getCanonicalName(), e.getMessage(), e); - continue; - } - - if (stori.getVirtualFileSystem().getStorageClassType().isTapeEnabled()) { - StormEA.removePinned(stori.getAbsolutePath()); - } - } - } - return expiredSurlList; - } - - /** - * Method that updates all chunks in SRM_FILE_PINNED state, into SRM_RELEASED. - * An array of long representing the primary key of each chunk is required: - * only they get the status changed provided their current status is - * SRM_FILE_PINNED. - * - * This method is used during srmReleaseFiles - * - * In case of any error nothing happens and no exception is thrown, but proper - * messagges get logged. - */ - public synchronized void transitSRM_FILE_PINNEDtoSRM_RELEASED(long[] ids) { - - if (!checkConnection()) { - log - .error("PTG CHUNK DAO: transitSRM_FILE_PINNEDtoSRM_RELEASED - unable to get a valid connection!"); - return; - } - String str = "UPDATE status_Get sg SET sg.statusCode=? " - + "WHERE sg.statusCode=? AND sg.request_GetID IN " + makeWhereString(ids); - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - stmt.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_RELEASED)); - printWarnings(stmt.getWarnings()); - - stmt.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED)); - printWarnings(stmt.getWarnings()); - - log.trace("PtG CHUNK DAO - transitSRM_FILE_PINNEDtoSRM_RELEASED: {}", - stmt.toString()); - int count = stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - if (count == 0) { - log.trace("PtG CHUNK DAO! No chunk of PtG request was " - + "transited from SRM_FILE_PINNED to SRM_RELEASED."); - } else { - log.info("PtG CHUNK DAO! {} chunks of PtG requests were transited " - + "from SRM_FILE_PINNED to SRM_RELEASED.", count); - } - } catch (SQLException e) { - log.error("PtG CHUNK DAO! Unable to transit chunks" - + " from SRM_FILE_PINNED to SRM_RELEASED! {}", e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * @param ids - * @param token - */ - public synchronized void transitSRM_FILE_PINNEDtoSRM_RELEASED(long[] ids, - TRequestToken token) { - - if (token == null) { - transitSRM_FILE_PINNEDtoSRM_RELEASED(ids); - return; - } - - /* - * If a request token has been specified, only the related Get requests - * have to be released. This is done adding the r.r_token="..." clause in - * the where subquery. - */ - if (!checkConnection()) { - log.error("PTG CHUNK DAO: transitSRM_FILE_PINNEDtoSRM_RELEASED - " - + "unable to get a valid connection!"); - return; - } - - String str = "UPDATE " - + "status_Get sg JOIN (request_Get rg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "SET sg.statusCode=? " + "WHERE sg.statusCode=? AND rq.r_token='" - + token.toString() + "' AND rg.ID IN " + makeWhereString(ids); - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - stmt.setInt(1,StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_RELEASED)); - printWarnings(stmt.getWarnings()); - - stmt.setInt(2,StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED)); - printWarnings(stmt.getWarnings()); - - log.trace("PtG CHUNK DAO - transitSRM_FILE_PINNEDtoSRM_RELEASED: {}", stmt.toString()); - int count = stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - if (count == 0) { - log.trace("PtG CHUNK DAO! No chunk of PtG request was" - + " transited from SRM_FILE_PINNED to SRM_RELEASED."); - } else { - log.info("PtG CHUNK DAO! {} chunks of PtG requests were transited from " - + "SRM_FILE_PINNED to SRM_RELEASED.", count); - } - } catch (SQLException e) { - log.error("PtG CHUNK DAO! Unable to transit chunks from " - + "SRM_FILE_PINNED to SRM_RELEASED! {}", e.getMessage(), e); - } finally { - close(stmt); - } - } - - public synchronized void updateStatus(TRequestToken requestToken, - int[] surlUniqueIDs, String[] surls, TStatusCode statusCode, - String explanation) { - - if (!checkConnection()) { - log - .error("PTG CHUNK DAO: updateStatus - unable to get a valid connection!"); - return; - } - String str = "UPDATE " - + "status_Get sg JOIN (request_Get rg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "SET sg.statusCode=? , sg.explanation=? " + "WHERE rq.r_token='" - + requestToken.toString() + "' AND ( rg.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlUniqueIDs) + " AND rg.sourceSURL IN " - + makeSurlString(surls) + " ) "; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - stmt.setInt(1, StatusCodeConverter.getInstance().toDB(statusCode)); - printWarnings(stmt.getWarnings()); - - stmt.setString(2, (explanation != null ? explanation : "")); - printWarnings(stmt.getWarnings()); - - log.trace("PtG CHUNK DAO - updateStatus: {}", stmt.toString()); - int count = stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - if (count == 0) { - log.trace("PtG CHUNK DAO! No chunk of PtG request was updated to {}.", - statusCode); - } else { - log.info("PtG CHUNK DAO! {} chunks of PtG requests were updated to {}.", - count, statusCode); - } - } catch (SQLException e) { - log.error("PtG CHUNK DAO! Unable to updated to {}! {}", statusCode, - e.getMessage(), e); - } finally { - close(stmt); - } - } - - public synchronized void updateStatusOnMatchingStatus(int[] surlsUniqueIDs, - String[] surls, TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation) throws IllegalArgumentException { - - if (surlsUniqueIDs == null || surls == null || explanation == null - || surlsUniqueIDs.length == 0 || surls.length == 0 - || surlsUniqueIDs.length != surls.length) { - - throw new IllegalArgumentException( - "Unable to perform the updateStatusOnMatchingStatus, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs + " surls=" - + surls + " explanation=" + explanation); - } - - doUpdateStatusOnMatchingStatus(null, surlsUniqueIDs, surls, - expectedStatusCode, newStatusCode, explanation, false, true, true); - } - - public synchronized void updateStatusOnMatchingStatus( - TRequestToken requestToken, TStatusCode expectedStatusCode, - TStatusCode newStatusCode, String explanation) { - - if (requestToken == null || requestToken.getValue().trim().isEmpty() - || explanation == null) { - throw new IllegalArgumentException( - "Unable to perform the updateStatusOnMatchingStatus, " - + "invalid arguments: requestToken=" + requestToken + " explanation=" - + explanation); - } - doUpdateStatusOnMatchingStatus(requestToken, null, null, - expectedStatusCode, newStatusCode, explanation, true, false, true); - } - - public synchronized void updateStatusOnMatchingStatus( - TRequestToken requestToken, int[] surlsUniqueIDs, String[] surls, - TStatusCode expectedStatusCode, TStatusCode newStatusCode) - throws IllegalArgumentException { - - if (requestToken == null || requestToken.getValue().trim().isEmpty() - || surlsUniqueIDs == null || surls == null || surlsUniqueIDs.length == 0 - || surls.length == 0 || surlsUniqueIDs.length != surls.length) { - throw new IllegalArgumentException( - "Unable to perform the updateStatusOnMatchingStatus, " - + "invalid arguments: requestToken=" + requestToken - + "surlsUniqueIDs=" + surlsUniqueIDs + " surls=" + surls); - } - doUpdateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls, - expectedStatusCode, newStatusCode, null, true, true, false); - } - - public synchronized void doUpdateStatusOnMatchingStatus( - TRequestToken requestToken, int[] surlUniqueIDs, String[] surls, - TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation, boolean withRequestToken, boolean withSurls, - boolean withExplanation) throws IllegalArgumentException { - - if ((withRequestToken && requestToken == null) - || (withExplanation && explanation == null) - || (withSurls && (surlUniqueIDs == null || surls == null))) { - - throw new IllegalArgumentException( - "Unable to perform the doUpdateStatusOnMatchingStatus, " - + "invalid arguments: withRequestToken=" + withRequestToken - + " requestToken=" + requestToken + " withSurls=" + withSurls - + " surlUniqueIDs=" + surlUniqueIDs + " surls=" + surls - + " withExplaination=" + withExplanation + " explanation=" - + explanation); - } - if (!checkConnection()) { - log - .error("PTG CHUNK DAO: updateStatusOnMatchingStatus - unable to get a valid connection!"); - return; - } - String str = "UPDATE status_Get sg JOIN (request_Get rg, request_queue rq) " - + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "SET sg.statusCode=? "; - if (withExplanation) { - str += " , " + buildExpainationSet(explanation); - } - str += " WHERE sg.statusCode=? "; - if (withRequestToken) { - str += " AND " + buildTokenWhereClause(requestToken); - } - if (withSurls) { - str += " AND " + buildSurlsWhereClause(surlUniqueIDs, surls); - } - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - stmt.setInt(1, StatusCodeConverter.getInstance().toDB(newStatusCode)); - printWarnings(stmt.getWarnings()); - - stmt - .setInt(2, StatusCodeConverter.getInstance().toDB(expectedStatusCode)); - printWarnings(stmt.getWarnings()); - - log.trace("PtG CHUNK DAO - updateStatusOnMatchingStatus: {}", stmt.toString()); - int count = stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - if (count == 0) { - log.trace("PtG CHUNK DAO! No chunk of PtG request was updated " - + "from {} to {}.", expectedStatusCode, newStatusCode); - } else { - log.debug("PtG CHUNK DAO! {} chunks of PtG requests were updated " - + "from {} to {}.", count, expectedStatusCode, newStatusCode); - } - } catch (SQLException e) { - log.error("PtG CHUNK DAO! Unable to updated from {} to {}! {}", - expectedStatusCode, newStatusCode, e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Auxiliary method used to close a ResultSet - */ - private void close(ResultSet rset) { - - if (rset != null) { - try { - rset.close(); - } catch (Exception e) { - log.error("PTG CHUNK DAO! Unable to close ResultSet! Error: {}", - e.getMessage(), e); - } - } - } - - /** - * Auxiliary method used to close a Statement - */ - private void close(Statement stmt) { - - if (stmt != null) { - try { - stmt.close(); - } catch (Exception e) { - log.error("PTG CHUNK DAO! Unable to close Statement {} - Error: {}", - stmt.toString(), e.getMessage(), e); - } - } - } - - private void commit(Connection con) { - - if (con != null) { - try { - con.commit(); - con.setAutoCommit(true); - } catch (SQLException e) { - log.error("PtG, SQL Exception: {}", e.getMessage(), e); - } - } - } - - /** - * Auxiliary method used to roll back a failed transaction - */ - private void rollback(Connection con) { - - if (con != null) { - try { - con.rollback(); - con.setAutoCommit(true); - log.error("PTG CHUNK DAO: roll back successful!"); - } catch (SQLException e2) { - log.error("PTG CHUNK DAO: roll back failed! {}", e2.getMessage(), e2); - } - } - } - - /** - * Private method that returns the generated ID: it throws an exception in - * case of any problem! - */ - private int extractID(ResultSet rs) throws Exception { - - if (rs == null) { - throw new Exception("PTG CHUNK DAO! Null ResultSet!"); - } - if (rs.next()) { - return rs.getInt(1); - } else { - log.error("PTG CHUNK DAO! It was not possible to establish " - + "the assigned autoincrement primary key!"); - throw new Exception("PTG CHUNK DAO! It was not possible to" - + " establish the assigned autoincrement primary key!"); - } - } - - /** - * Method that returns a String containing all IDs. - */ - private String makeWhereString(long[] rowids) { - - StringBuilder sb = new StringBuilder("("); - int n = rowids.length; - for (int i = 0; i < n; i++) { - sb.append(rowids[i]); - if (i < (n - 1)) { - sb.append(","); - } - } - sb.append(")"); - return sb.toString(); - } - - /** - * Method that returns a String containing all Surl's IDs. - */ - private String makeSURLUniqueIDWhere(int[] surlUniqueIDs) { - - StringBuilder sb = new StringBuilder("("); - for (int i = 0; i < surlUniqueIDs.length; i++) { - if (i > 0) { - sb.append(","); - } - sb.append(surlUniqueIDs[i]); - } - sb.append(")"); - return sb.toString(); - } - - /** - * Method that returns a String containing all Surls. - */ - private String makeSurlString(String[] surls) { - - StringBuilder sb = new StringBuilder("("); - int n = surls.length; - - for (int i = 0; i < n; i++) { - - SURL requestedSURL; - - try { - requestedSURL = SURL.makeSURLfromString(surls[i]); - } catch (NamespaceException e) { - log.error(e.getMessage()); - log.debug("Skip '{}' during query creation", surls[i]); - continue; - } - - sb.append("'"); - sb.append(requestedSURL.getNormalFormAsString()); - sb.append("','"); - sb.append(requestedSURL.getQueryFormAsString()); - sb.append("'"); - - if (i < (n - 1)) { - sb.append(","); - } - } - - sb.append(")"); - return sb.toString(); - } - - /** - * Auxiliary method that sets up the connection to the DB, as well as the - * prepared statement. - */ - private boolean setUpConnection() { - - boolean response = false; - try { - Class.forName(driver); - con = DriverManager.getConnection(url, name, password); - printWarnings(con.getWarnings()); - response = con.isValid(0); - } catch (ClassNotFoundException | SQLException e) { - log.error("PTG CHUNK DAO! Exception in setUpConnection! {}", e.getMessage(), e); - } - return response; - } - - /** - * Auxiliary method that checks if time for resetting the connection has come, - * and eventually takes it down and up back again. - */ - private boolean checkConnection() { - - boolean response = true; - if (reconnect) { - log.debug("PTG CHUNK DAO! Reconnecting to DB! "); - takeDownConnection(); - response = setUpConnection(); - if (response) { - reconnect = false; - } - } - return response; - } - - /** - * Auxiliary method that tales down a connection to the DB. - */ - private void takeDownConnection() { - - if (con != null) { - try { - con.close(); - } catch (SQLException e) { - log.error("PTG CHUNK DAO! Exception in takeDownConnection method: {}", - e.getMessage(), e); - } - } - } - - public Collection find(int[] surlsUniqueIDs, - String[] surlsArray, String dn) throws IllegalArgumentException { - - if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 - || surlsArray == null || surlsArray.length == 0 || dn == null) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray + " dn=" + dn); - } - return find(surlsUniqueIDs, surlsArray, dn, true); - } - - public Collection find(int[] surlsUniqueIDs, - String[] surlsArray) throws IllegalArgumentException { - - if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 - || surlsArray == null || surlsArray.length == 0) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray); - } - return find(surlsUniqueIDs, surlsArray, null, false); - } - - private synchronized Collection find(int[] surlsUniqueIDs, - String[] surlsArray, String dn, boolean withDn) - throws IllegalArgumentException { - - if ((withDn && dn == null) || surlsUniqueIDs == null - || surlsUniqueIDs.length == 0 || surlsArray == null - || surlsArray.length == 0) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray + " withDn=" + withDn + " dn=" + dn); - } - if (!checkConnection()) { - log.error("PTG CHUNK DAO: find - unable to get a valid connection!"); - return new ArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - - try { - - String str = "SELECT rq.ID, rq.r_token, sg.statusCode, rq.pinLifetime, rg.ID, rq.timeStamp, " - + "rq.client_dn, rq.proxy, rg.sourceSURL, rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID, " - + "d.isSourceADirectory, d.allLevelRecursive, d.numOfLevels " - + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " - + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " - + "LEFT JOIN request_DirOption d ON rg.request_DirOptionID=d.ID " - + "WHERE ( rg.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlsUniqueIDs) - + " AND rg.sourceSURL IN " - + makeSurlString(surlsArray) + " )"; - - if (withDn) { - - str += " AND rq.client_dn=\'" + dn + "\'"; - } - - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - List list = new ArrayList(); - - log.trace("PTG CHUNK DAO - find method: {}", find.toString()); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - PtGChunkDataTO chunkDataTO = null; - while (rs.next()) { - - chunkDataTO = new PtGChunkDataTO(); - chunkDataTO.setStatus(rs.getInt("sg.statusCode")); - chunkDataTO.setRequestToken(rs.getString("rq.r_token")); - chunkDataTO.setPrimaryKey(rs.getLong("rg.ID")); - chunkDataTO.setFromSURL(rs.getString("rg.sourceSURL")); - - chunkDataTO.setNormalizedStFN(rs - .getString("rg.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rg.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(new Integer(uniqueID)); - } - - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setLifeTime(rs.getInt("rq.pinLifetime")); - chunkDataTO.setClientDN(rs.getString("rq.client_dn")); - - /** - * This code is only for the 1.3.18. This is a workaround to get FQANs - * using the proxy field on request_queue. The FE use the proxy field of - * request_queue to insert a single FQAN string containing all FQAN - * separeted by the "#" char. The proxy is a BLOB, hence it has to be - * properly conveted in string. - */ - java.sql.Blob blob = rs.getBlob("rq.proxy"); - if (!rs.wasNull() && blob != null) { - byte[] bdata = blob.getBytes(1, (int) blob.length()); - chunkDataTO.setVomsAttributes(new String(bdata)); - } - chunkDataTO.setDirOption(rs.getBoolean("d.isSourceADirectory")); - chunkDataTO.setAllLevelRecursive(rs.getBoolean("d.allLevelRecursive")); - chunkDataTO.setNumLevel(rs.getInt("d.numOfLevels")); - - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("PTG CHUNK DAO: {}", e.getMessage(), e); - /* return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - private String buildExpainationSet(String explanation) { - - return " sg.explanation='" + explanation + "' "; - } - - private String buildTokenWhereClause(TRequestToken requestToken) { - - return " rq.r_token='" + requestToken.toString() + "' "; - } - - private String buildSurlsWhereClause(int[] surlsUniqueIDs, String[] surls) { - - return " ( rg.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlsUniqueIDs) + " AND rg.sourceSURL IN " - + makeSurlString(surls) + " ) "; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/PtGChunkDataTO.java b/src/main/java/it/grid/storm/catalogs/PtGChunkDataTO.java deleted file mode 100644 index 7baf7ee18..000000000 --- a/src/main/java/it/grid/storm/catalogs/PtGChunkDataTO.java +++ /dev/null @@ -1,311 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TStatusCode; -import it.grid.storm.common.types.TURLPrefix; -import java.sql.Timestamp; -import java.util.List; -import it.grid.storm.namespace.model.Protocol; - -/** - * Class that represents a row in the Persistence Layer: this is all raw data - * referring to the PtGChunkData proper, that is, String and primitive types. - * - * Each field is initialized with default values as per SRM 2.2 specification: - * protocolList GSIFTP dirOption false status SRM_REQUEST_QUEUED - * - * All other fields are 0 if int, or a white space if String. - * - * @author EGRID ICTP - * @version 3.0 - * @date June 2005 - */ -public class PtGChunkDataTO { - - private static final String FQAN_SEPARATOR = "#"; - /* Database table request_Get fields BEGIN */ - private long primaryKey = -1; // ID primary key of record in DB - private boolean dirOption; // initialised in constructor - private String fromSURL = " "; - private String normalizedStFN = null; - private Integer surlUniqueID = null; - /* Database table request_Get fields END */ - - private String requestToken = " "; - private int lifetime = 0; - private boolean allLevelRecursive; // initialised in constructor - private int numLevel; // initialised in constructor - private List protocolList = null; // initialised in constructor - private long filesize = 0; - private int status; // initialised in constructor - private String errString = " "; - private String turl = " "; - private Timestamp timeStamp; - private String clientDN = null; - private String vomsAttributes = null; - - public PtGChunkDataTO() { - - TURLPrefix protocolPreferences = new TURLPrefix(); - protocolPreferences.addProtocol(Protocol.GSIFTP); - this.protocolList = TransferProtocolListConverter.toDB(protocolPreferences); - this.status = StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_REQUEST_QUEUED); - this.dirOption = false; - // - this.allLevelRecursive = false; - this.numLevel = 0; - } - - public long primaryKey() { - - return primaryKey; - } - - public void setPrimaryKey(long n) { - - primaryKey = n; - } - - public String requestToken() { - - return requestToken; - } - - public void setRequestToken(String s) { - - requestToken = s; - } - - public Timestamp timeStamp() { - - return timeStamp; - } - - public void setTimeStamp(Timestamp timeStamp) { - - this.timeStamp = timeStamp; - } - - public String fromSURL() { - - return fromSURL; - } - - public void setFromSURL(String s) { - - fromSURL = s; - } - - /** - * @param normalizedStFN - * the normalizedStFN to set - */ - public void setNormalizedStFN(String normalizedStFN) { - - this.normalizedStFN = normalizedStFN; - } - - /** - * @return the normalizedStFN - */ - public String normalizedStFN() { - - return normalizedStFN; - } - - /** - * @param sURLUniqueID - * the sURLUniqueID to set - */ - public void setSurlUniqueID(Integer sURLUniqueID) { - - this.surlUniqueID = sURLUniqueID; - } - - /** - * @return the sURLUniqueID - */ - public Integer surlUniqueID() { - - return surlUniqueID; - } - - public int lifeTime() { - - return lifetime; - } - - public void setLifeTime(int n) { - - lifetime = n; - } - - public boolean dirOption() { - - return dirOption; - } - - public void setDirOption(boolean b) { - - dirOption = b; - } - - public boolean allLevelRecursive() { - - return allLevelRecursive; - } - - public void setAllLevelRecursive(boolean b) { - - allLevelRecursive = b; - } - - public int numLevel() { - - return numLevel; - } - - public void setNumLevel(int n) { - - numLevel = n; - } - - public List protocolList() { - - return protocolList; - } - - public void setProtocolList(List l) { - - if ((l != null) && (!l.isEmpty())) - protocolList = l; - } - - public long fileSize() { - - return filesize; - } - - public void setFileSize(long n) { - - filesize = n; - } - - public int status() { - - return status; - } - - public void setStatus(int n) { - - status = n; - } - - public String errString() { - - return errString; - } - - public void setErrString(String s) { - - errString = s; - } - - public String turl() { - - return turl; - } - - public void setTurl(String s) { - - turl = s; - } - - public String clientDN() { - - return clientDN; - } - - public void setClientDN(String s) { - - clientDN = s; - } - - public String vomsAttributes() { - - return vomsAttributes; - } - - public void setVomsAttributes(String s) { - - vomsAttributes = s; - } - - public void setVomsAttributes(String[] fqaNsAsString) { - - vomsAttributes = ""; - for (int i = 0; i < fqaNsAsString.length; i++) { - vomsAttributes += fqaNsAsString[i]; - if (i < fqaNsAsString.length - 1) { - vomsAttributes += FQAN_SEPARATOR; - } - } - - } - - public String[] vomsAttributesArray() { - - return vomsAttributes.split(FQAN_SEPARATOR); - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append(primaryKey); - sb.append(" "); - sb.append(requestToken); - sb.append(" "); - sb.append(fromSURL); - sb.append(" "); - sb.append(normalizedStFN); - sb.append(" "); - sb.append(surlUniqueID); - sb.append(" "); - sb.append(lifetime); - sb.append(" "); - sb.append(dirOption); - sb.append(" "); - sb.append(allLevelRecursive); - sb.append(" "); - sb.append(numLevel); - sb.append(" "); - sb.append(protocolList); - sb.append(" "); - sb.append(filesize); - sb.append(" "); - sb.append(status); - sb.append(" "); - sb.append(errString); - sb.append(" "); - sb.append(turl); - return sb.toString(); - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/PtGData.java b/src/main/java/it/grid/storm/catalogs/PtGData.java deleted file mode 100644 index 0ef728428..000000000 --- a/src/main/java/it/grid/storm/catalogs/PtGData.java +++ /dev/null @@ -1,38 +0,0 @@ -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TDirOption; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TSizeInBytes; - -public interface PtGData extends FileTransferData { - - /** - * Method that returns the requested pin life time for this chunk of the srm - * request. - */ - public TLifeTimeInSeconds getPinLifeTime(); - - /** - * Method that returns the dirOption specified in the srm request. - */ - public TDirOption getDirOption(); - - /** - * Method that returns the file size for this chunk of the srm request. - */ - public TSizeInBytes getFileSize(); - - /** - * Method used to set the size of the file corresponding to the requested - * SURL. If the supplied TSizeInByte is null, then nothing gets set! - */ - public void setFileSize(TSizeInBytes size); - - /** - * Method that sets the status of this request to SRM_FILE_PINNED; it needs - * the explanation String which describes the situation in greater detail; if - * a null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_FILE_PINNED(String explanation); - -} diff --git a/src/main/java/it/grid/storm/catalogs/PtGPersistentChunkData.java b/src/main/java/it/grid/storm/catalogs/PtGPersistentChunkData.java deleted file mode 100644 index a363e36c2..000000000 --- a/src/main/java/it/grid/storm/catalogs/PtGPersistentChunkData.java +++ /dev/null @@ -1,217 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.griduser.GridUserInterface; -import it.grid.storm.srm.types.TDirOption; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TStatusCode; -import it.grid.storm.srm.types.TTURL; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class represents a PrepareToGetChunkData, that is part of a multifile - * PrepareToGet srm request. It contains data about: the requestToken, the - * fromSURL, the requested lifeTime of pinning, the TDirOption which tells - * whether the requested SURL is a directory and if it must be recursed at all - * levels, as well as the desired number of levels to recurse, the desired - * transferProtocols in order of preference, the fileSize, and the transferURL - * for the supplied SURL. - * - * @author EGRID - ICTP Trieste - * @date March 21st, 2005 - * @version 3.0 - */ -public class PtGPersistentChunkData extends IdentityPtGData implements - PersistentChunkData { - - private static final Logger log = LoggerFactory - .getLogger(PtGPersistentChunkData.class); - - /** - * long representing the primary key for the persistence layer, in the - * status_Get table - */ - private long primaryKey = -1; - - /** - * This is the requestToken of the multifile srm request to which this chunk - * belongs - */ - private TRequestToken requestToken; - - /** - * @param requestToken - * @param fromSURL - * @param lifeTime - * @param dirOption - * @param desiredProtocols - * @param fileSize - * @param status - * @param transferURL - * @throws InvalidPtGDataAttributesException - */ - public PtGPersistentChunkData(GridUserInterface auth, - TRequestToken requestToken, TSURL fromSURL, TLifeTimeInSeconds lifeTime, - TDirOption dirOption, TURLPrefix desiredProtocols, TSizeInBytes fileSize, - TReturnStatus status, TTURL transferURL) - throws InvalidPtGDataAttributesException, - InvalidPtGDataAttributesException, - InvalidFileTransferDataAttributesException, - InvalidSurlRequestDataAttributesException { - - super(auth, fromSURL, lifeTime, dirOption, desiredProtocols, fileSize, - status, transferURL); - if (requestToken == null) { - log.debug("PtGPersistentChunkData: requestToken is null!"); - throw new InvalidPtGPersistentChunkDataAttributesException(requestToken, - fromSURL, lifeTime, dirOption, desiredProtocols, fileSize, status, - transferURL); - } - - this.requestToken = requestToken; - } - - /** - * Method used to get the primary key used in the persistence layer! - */ - @Override - public long getPrimaryKey() { - - return primaryKey; - } - - /** - * Method used to set the primary key to be used in the persistence layer! - */ - public void setPrimaryKey(long l) { - - primaryKey = l; - } - - /** - * Method that returns the requestToken of the srm request to which this chunk - * belongs. - */ - @Override - public TRequestToken getRequestToken() { - - return requestToken; - } - - /** - * Method that sets the status of this request to SRM_FILE_PINNED; it needs - * the explanation String which describes the situation in greater detail; if - * a null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_FILE_PINNED(String explanation) { - - setStatus(TStatusCode.SRM_FILE_PINNED, explanation); - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#hashCode() - */ - @Override - public int hashCode() { - - final int prime = 31; - int result = super.hashCode(); - result = prime * result + (int) (primaryKey ^ (primaryKey >>> 32)); - result = prime * result - + ((requestToken == null) ? 0 : requestToken.hashCode()); - return result; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#equals(java.lang.Object) - */ - @Override - public boolean equals(Object obj) { - - if (this == obj) { - return true; - } - if (!super.equals(obj)) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - PtGPersistentChunkData other = (PtGPersistentChunkData) obj; - if (primaryKey != other.primaryKey) { - return false; - } - if (requestToken == null) { - if (other.requestToken != null) { - return false; - } - } else if (!requestToken.equals(other.requestToken)) { - return false; - } - return true; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder.append("PtGPersistentChunkData [primaryKey="); - builder.append(primaryKey); - builder.append(", requestToken="); - builder.append(requestToken); - builder.append(", pinLifeTime="); - builder.append(pinLifeTime); - builder.append(", dirOption="); - builder.append(dirOption); - builder.append(", fileSize="); - builder.append(fileSize); - builder.append(", transferProtocols="); - builder.append(transferProtocols); - builder.append(", SURL="); - builder.append(SURL); - builder.append(", status="); - builder.append(status); - builder.append(", transferURL="); - builder.append(transferURL); - builder.append("]"); - return builder.toString(); - } - - @Override - public long getIdentifier() { - - return getPrimaryKey(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/PtPChunkCatalog.java b/src/main/java/it/grid/storm/catalogs/PtPChunkCatalog.java index 482f3e15f..25a28a0b0 100644 --- a/src/main/java/it/grid/storm/catalogs/PtPChunkCatalog.java +++ b/src/main/java/it/grid/storm/catalogs/PtPChunkCatalog.java @@ -17,6 +17,15 @@ package it.grid.storm.catalogs; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Lists; + import it.grid.storm.common.types.SizeUnit; import it.grid.storm.common.types.TURLPrefix; import it.grid.storm.common.types.TimeUnit; @@ -24,6 +33,26 @@ import it.grid.storm.griduser.AbstractGridUser; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.GridUserManager; +import it.grid.storm.persistence.converter.FileLifetimeConverter; +import it.grid.storm.persistence.converter.FileStorageTypeConverter; +import it.grid.storm.persistence.converter.OverwriteModeConverter; +import it.grid.storm.persistence.converter.PinLifetimeConverter; +import it.grid.storm.persistence.converter.SizeInBytesIntConverter; +import it.grid.storm.persistence.converter.SpaceTokenStringConverter; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.converter.TURLConverter; +import it.grid.storm.persistence.converter.TransferProtocolListConverter; +import it.grid.storm.persistence.dao.PtPChunkDAO; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtPDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtPPersistentChunkDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidReducedPtPChunkDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.persistence.impl.mysql.PtPChunkDAOMySql; +import it.grid.storm.persistence.model.PtPChunkDataTO; +import it.grid.storm.persistence.model.PtPPersistentChunkData; +import it.grid.storm.persistence.model.ReducedPtPChunkData; +import it.grid.storm.persistence.model.ReducedPtPChunkDataTO; import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException; import it.grid.storm.srm.types.InvalidTSURLAttributesException; import it.grid.storm.srm.types.InvalidTSizeAttributesException; @@ -39,20 +68,11 @@ import it.grid.storm.srm.types.TStatusCode; import it.grid.storm.srm.types.TTURL; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** - * Class that represents StoRMs PtPChunkCatalog: it collects PtPChunkData and - * provides methods for looking up a PtPChunkData based on TRequestToken, as - * well as for updating data into persistence. Methods are also supplied to - * evaluate if a SURL is in SRM_SPACE_AVAILABLE state, and to transit expired - * SURLs in SRM_SPACE_AVAILABLE state to SRM_FILE_LIFETIME_EXPIRED. + * Class that represents StoRMs PtPChunkCatalog: it collects PtPChunkData and provides methods for + * looking up a PtPChunkData based on TRequestToken, as well as for updating data into persistence. + * Methods are also supplied to evaluate if a SURL is in SRM_SPACE_AVAILABLE state, and to transit + * expired SURLs in SRM_SPACE_AVAILABLE state to SRM_FILE_LIFETIME_EXPIRED. * * @author EGRID - ICTP Trieste * @date June, 2005 @@ -60,554 +80,412 @@ */ public class PtPChunkCatalog { - private static final Logger log = LoggerFactory - .getLogger(PtPChunkCatalog.class); - - /* only instance of PtPChunkCatalog present in StoRM! */ - private static final PtPChunkCatalog cat = new PtPChunkCatalog(); - private final PtPChunkDAO dao = PtPChunkDAO.getInstance(); - - private PtPChunkCatalog() {} - - /** - * Method that returns the only instance of PtPChunkCatalog available. - */ - public static PtPChunkCatalog getInstance() { - - return cat; - } - - /** - * Method used to update into Persistence a retrieved PtPChunkData. - */ - synchronized public void update(PtPPersistentChunkData chunkData) { - - PtPChunkDataTO to = new PtPChunkDataTO(); - /* rimary key needed by DAO Object */ - to.setPrimaryKey(chunkData.getPrimaryKey()); - to.setStatus(StatusCodeConverter.getInstance().toDB( - chunkData.getStatus().getStatusCode())); - to.setErrString(chunkData.getStatus().getExplanation()); - to.setTransferURL(TURLConverter.getInstance().toDB( - chunkData.getTransferURL().toString())); - to.setPinLifetime(PinLifetimeConverter.getInstance().toDB( - chunkData.pinLifetime().value())); - to.setFileLifetime(FileLifetimeConverter.getInstance().toDB( - chunkData.fileLifetime().value())); - to.setFileStorageType(FileStorageTypeConverter.getInstance().toDB( - chunkData.fileStorageType())); - to.setOverwriteOption(OverwriteModeConverter.getInstance().toDB( - chunkData.overwriteOption())); - to.setNormalizedStFN(chunkData.getSURL().normalizedStFN()); - to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId())); - to.setClientDN(chunkData.getUser().getDn()); - if (chunkData.getUser() instanceof AbstractGridUser) { - if (((AbstractGridUser) chunkData.getUser()).hasVoms()) { - to.setVomsAttributes(((AbstractGridUser) chunkData.getUser()) - .getFQANsAsString()); - } - - } - dao.update(to); - } - - /** - * Method that returns a Collection of PtPChunkData Objects matching the - * supplied TRequestToken. If any of the data associated to the TRequestToken - * is not well formed and so does not allow a PtPChunkData Object to be - * created, then that part of the request is dropped, gets logged and an - * attempt is made to write in the DB that the chunk was malformed; the - * processing continues with the next part. Only the valid chunks get - * returned. If there are no chunks to process then an empty Collection is - * returned, and a messagge gets logged. NOTE! Chunks in SRM_ABORTED status - * are NOT returned! This is imporant because this method is intended to be - * used by the Feeders to fetch all chunks in the request, and aborted chunks - * should not be picked up for processing! - */ - synchronized public Collection lookup( - final TRequestToken rt) { - - Collection chunkTOs = dao.find(rt); - log.debug("PtPChunkCatalog: retrieved data {}", chunkTOs); - return buildChunkDataList(chunkTOs); - } - - /** - * Private method used to create a PtPChunkData object, from a PtPChunkDataTO - * and TRequestToken. If a chunk cannot be created, an error messagge gets - * logged and an attempt is made to signal in the DB that the chunk is - * malformed. - */ - private PtPPersistentChunkData makeOne(PtPChunkDataTO auxTO, TRequestToken rt) { - - StringBuilder errorSb = new StringBuilder(); - // toSURL - TSURL toSURL = null; - try { - toSURL = TSURL.makeFromStringValidate(auxTO.toSURL()); - } catch (InvalidTSURLAttributesException e) { - errorSb.append(e); - } - if (auxTO.normalizedStFN() != null) { - toSURL.setNormalizedStFN(auxTO.normalizedStFN()); - } - if (auxTO.surlUniqueID() != null) { - toSURL.setUniqueID(auxTO.surlUniqueID().intValue()); - } - // pinLifetime - TLifeTimeInSeconds pinLifetime = null; - try { - long pinLifeTime = PinLifetimeConverter.getInstance().toStoRM( - auxTO.pinLifetime()); - // Check for max value allowed - long max = Configuration.getInstance().getPinLifetimeMaximum(); - if (pinLifeTime > max) { - log.warn("PinLifeTime is greater than the max value allowed. Drop the " - + "value to the max = {} seconds", max); - pinLifeTime = max; - } - pinLifetime = TLifeTimeInSeconds.make(pinLifeTime, TimeUnit.SECONDS); - } catch (IllegalArgumentException e) { - errorSb.append("\n"); - errorSb.append(e); - } - // fileLifetime - TLifeTimeInSeconds fileLifetime = null; - try { - fileLifetime = TLifeTimeInSeconds.make(FileLifetimeConverter - .getInstance().toStoRM(auxTO.fileLifetime()), TimeUnit.SECONDS); - } catch (IllegalArgumentException e) { - errorSb.append("\n"); - errorSb.append(e); - } - // fileStorageType - TFileStorageType fileStorageType = FileStorageTypeConverter.getInstance() - .toSTORM(auxTO.fileStorageType()); - if (fileStorageType == TFileStorageType.EMPTY) { - errorSb.append("\nTFileStorageType could not be translated from " - + "its String representation! String: " + auxTO.fileStorageType()); - // Use the default value defined in Configuration. - fileStorageType = TFileStorageType.getTFileStorageType(Configuration - .getInstance().getDefaultFileStorageType()); - errorSb.append("\nUsed the default TFileStorageType as defined " - + "in StoRM config.: " + fileStorageType); - } - // expectedFileSize - // - // WARNING! A converter is used because the DB uses 0 for empty, whereas - // StoRM object model does allow a 0 size! Since this is an optional - // field - // in the SRM specs, null must be converted explicitly to Empty - // TSizeInBytes - // because it is indeed well formed! - TSizeInBytes expectedFileSize = null; - TSizeInBytes emptySize = TSizeInBytes.makeEmpty(); - long sizeTranslation = SizeInBytesIntConverter.getInstance().toStoRM( - auxTO.expectedFileSize()); - if (emptySize.value() == sizeTranslation) { - expectedFileSize = emptySize; - } else { - try { - expectedFileSize = TSizeInBytes.make(auxTO.expectedFileSize(), - SizeUnit.BYTES); - } catch (InvalidTSizeAttributesException e) { - errorSb.append("\n"); - errorSb.append(e); - } - } - // spaceToken! - // - // WARNING! A converter is still needed because of DB logic for missing - // SpaceToken makes use of NULL, whereas StoRM object model does not - // allow - // for null! It makes use of a specific Empty type. - // - // Indeed, the SpaceToken field is optional, so a request with a null - // value - // for the SpaceToken field in the DB, _is_ well formed! - TSpaceToken spaceToken = null; - TSpaceToken emptyToken = TSpaceToken.makeEmpty(); - /** - * convert empty string representation of DPM into StoRM representation; - */ - String spaceTokenTranslation = SpaceTokenStringConverter.getInstance() - .toStoRM(auxTO.spaceToken()); - if (emptyToken.toString().equals(spaceTokenTranslation)) { - spaceToken = emptyToken; - } else { - try { - spaceToken = TSpaceToken.make(spaceTokenTranslation); - } catch (InvalidTSpaceTokenAttributesException e) { - errorSb.append("\n"); - errorSb.append(e); - } - } - // overwriteOption! - TOverwriteMode overwriteOption = OverwriteModeConverter.getInstance() - .toSTORM(auxTO.overwriteOption()); - if (overwriteOption == TOverwriteMode.EMPTY) { - errorSb.append("\nTOverwriteMode could not be translated " - + "from its String representation! String: " + auxTO.overwriteOption()); - overwriteOption = null; - } - // transferProtocols - TURLPrefix transferProtocols = TransferProtocolListConverter.toSTORM(auxTO - .protocolList()); - if (transferProtocols.size() == 0) { - errorSb.append("\nEmpty list of TransferProtocols " - + "or could not translate TransferProtocols!"); - transferProtocols = null; // fail construction of PtPChunkData! - } - // status - TReturnStatus status = null; - TStatusCode code = StatusCodeConverter.getInstance() - .toSTORM(auxTO.status()); - if (code == TStatusCode.EMPTY) { - errorSb.append("\nRetrieved StatusCode was not recognised: " - + auxTO.status()); - } else { - status = new TReturnStatus(code, auxTO.errString()); - } - GridUserInterface gridUser = null; - try { - if (auxTO.vomsAttributes() != null - && !auxTO.vomsAttributes().trim().equals("")) { - gridUser = GridUserManager.makeVOMSGridUser(auxTO.clientDN(), - auxTO.vomsAttributesArray()); - } else { - gridUser = GridUserManager.makeGridUser(auxTO.clientDN()); - } - - } catch (IllegalArgumentException e) { - log.error("Unexpected error on voms grid user creation. " - + "IllegalArgumentException: {}", e.getMessage(), e); - } - - // transferURL - /** - * whatever is read is just meaningless because PtP will fill it in!!! So - * create an Empty TTURL by default! Vital to avoid problems with unknown - * DPM NULL/EMPTY logic policy! - */ - TTURL transferURL = TTURL.makeEmpty(); - // make PtPChunkData - PtPPersistentChunkData aux = null; - try { - aux = new PtPPersistentChunkData(gridUser, rt, toSURL, pinLifetime, - fileLifetime, fileStorageType, spaceToken, expectedFileSize, - transferProtocols, overwriteOption, status, transferURL); - aux.setPrimaryKey(auxTO.primaryKey()); - } catch (InvalidPtPPersistentChunkDataAttributesException e) { - dao.signalMalformedPtPChunk(auxTO); - log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data" - + " from persistence. Dropping chunk from request: {}", rt); - log.warn(e.getMessage(), e); - log.warn(errorSb.toString()); - } catch (InvalidPtPDataAttributesException e) { - dao.signalMalformedPtPChunk(auxTO); - log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data" - + " from persistence. Dropping chunk from request: {}", rt); - log.warn(e.getMessage(), e); - log.warn(errorSb.toString()); - } catch (InvalidFileTransferDataAttributesException e) { - dao.signalMalformedPtPChunk(auxTO); - log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data" - + " from persistence. Dropping chunk from request: {}", rt); - log.warn(e.getMessage(), e); - log.warn(errorSb.toString()); - } catch (InvalidSurlRequestDataAttributesException e) { - dao.signalMalformedPtPChunk(auxTO); - log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data" - + " from persistence. Dropping chunk from request: {}", rt); - log.warn(e.getMessage(), e); - log.warn(errorSb.toString()); - } - // end... - return aux; - } - - /** - * - * Adds to the received PtPChunkDataTO the normalized StFN and the SURL unique - * ID taken from the PtPChunkData - * - * @param chunkTO - * @param chunk - */ - private void completeTO(ReducedPtPChunkDataTO chunkTO, - final ReducedPtPChunkData chunk) { - - chunkTO.setNormalizedStFN(chunk.toSURL().normalizedStFN()); - chunkTO.setSurlUniqueID(new Integer(chunk.toSURL().uniqueId())); - } - - /** - * - * Creates a ReducedPtGChunkDataTO from the received PtGChunkDataTO and - * completes it with the normalized StFN and the SURL unique ID taken from the - * PtGChunkData - * - * @param chunkTO - * @param chunk - * @return - * @throws InvalidReducedPtPChunkDataAttributesException - */ - private ReducedPtPChunkDataTO completeTO(PtPChunkDataTO chunkTO, - final PtPPersistentChunkData chunk) - throws InvalidReducedPtPChunkDataAttributesException { - - ReducedPtPChunkDataTO reducedChunkTO = this.reduce(chunkTO); - this.completeTO(reducedChunkTO, this.reduce(chunk)); - return reducedChunkTO; - } - - /** - * Creates a ReducedPtPChunkData from the data contained in the received - * PtPChunkData - * - * @param chunk - * @return - * @throws InvalidReducedPtPChunkDataAttributesException - */ - private ReducedPtPChunkData reduce(PtPPersistentChunkData chunk) - throws InvalidReducedPtPChunkDataAttributesException { - - ReducedPtPChunkData reducedChunk = new ReducedPtPChunkData(chunk.getSURL(), - chunk.getStatus(), chunk.fileStorageType(), chunk.fileLifetime()); - reducedChunk.setPrimaryKey(chunk.getPrimaryKey()); - return reducedChunk; - } - - /** - * Creates a ReducedPtPChunkDataTO from the data contained in the received - * PtPChunkDataTO - * - * @param chunkTO - * @return - */ - private ReducedPtPChunkDataTO reduce(PtPChunkDataTO chunkTO) { - - ReducedPtPChunkDataTO reducedChunkTO = new ReducedPtPChunkDataTO(); - reducedChunkTO.setPrimaryKey(chunkTO.primaryKey()); - reducedChunkTO.setToSURL(chunkTO.toSURL()); - reducedChunkTO.setNormalizedStFN(chunkTO.normalizedStFN()); - reducedChunkTO.setSurlUniqueID(chunkTO.surlUniqueID()); - reducedChunkTO.setStatus(chunkTO.status()); - reducedChunkTO.setErrString(chunkTO.errString()); - return reducedChunkTO; - } - - /** - * Checks if the received PtPChunkDataTO contains the fields not set by the - * front end but required - * - * @param chunkTO - * @return - */ - private boolean isComplete(PtPChunkDataTO chunkTO) { - - return (chunkTO.normalizedStFN() != null) - && (chunkTO.surlUniqueID() != null); - } - - /** - * Checks if the received ReducedPtGChunkDataTO contains the fields not set by - * the front end but required - * - * @param reducedChunkTO - * @return - */ - private boolean isComplete(ReducedPtPChunkDataTO reducedChunkTO) { - - return (reducedChunkTO.normalizedStFN() != null) - && (reducedChunkTO.surlUniqueID() != null); - } - - public Collection lookupReducedPtPChunkData( - TRequestToken requestToken, Collection surls) { - - Collection reducedChunkDataTOs = dao.findReduced( - requestToken.getValue(), surls); - log.debug("PtP CHUNK CATALOG: retrieved data {}", reducedChunkDataTOs); - return buildReducedChunkDataList(reducedChunkDataTOs); - } - - public Collection lookupPtPChunkData(TSURL surl, - GridUserInterface user) { - - return lookupPtPChunkData( - (List) Arrays.asList(new TSURL[] { surl }), user); - } - - private Collection lookupPtPChunkData( - List surls, GridUserInterface user) { - - int[] surlsUniqueIDs = new int[surls.size()]; - String[] surlsArray = new String[surls.size()]; - int index = 0; - for (TSURL tsurl : surls) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surlsArray[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOs = dao.find(surlsUniqueIDs, - surlsArray, user.getDn()); - log.debug("PtP CHUNK CATALOG: retrieved data {}", chunkDataTOs); - return buildChunkDataList(chunkDataTOs); - } - - private Collection buildChunkDataList( - Collection chunkDataTOs) { - - ArrayList list = new ArrayList(); - PtPPersistentChunkData chunk; - for (PtPChunkDataTO chunkTO : chunkDataTOs) { - chunk = makeOne(chunkTO); - if (chunk == null) { - continue; - } - list.add(chunk); - if (isComplete(chunkTO)) { - continue; - } - try { - dao.updateIncomplete(completeTO(chunkTO, chunk)); - } catch (InvalidReducedPtPChunkDataAttributesException e) { - log.warn("PtG CHUNK CATALOG! unable to add missing informations on " - + "DB to the request: {}", e.getMessage()); - } - } - log.debug("PtPChunkCatalog: returning {}\n\n", list); - return list; - } - - private PtPPersistentChunkData makeOne(PtPChunkDataTO chunkTO) { - - try { - return makeOne(chunkTO, - new TRequestToken(chunkTO.requestToken(), chunkTO.timeStamp())); - } catch (InvalidTRequestTokenAttributesException e) { - throw new IllegalStateException( - "Unexpected InvalidTRequestTokenAttributesException in TRequestToken: " - + e); - } - } - - private Collection buildReducedChunkDataList( - Collection chunkDataTOCollection) { - - ArrayList list = new ArrayList(); - ReducedPtPChunkData reducedChunkData; - for (ReducedPtPChunkDataTO reducedChunkDataTO : chunkDataTOCollection) { - reducedChunkData = makeOneReduced(reducedChunkDataTO); - if (reducedChunkData != null) { - list.add(reducedChunkData); - if (!this.isComplete(reducedChunkDataTO)) { - this.completeTO(reducedChunkDataTO, reducedChunkData); - dao.updateIncomplete(reducedChunkDataTO); - } - } - } - log.debug("PtP CHUNK CATALOG: returning {}", list); - return list; - } - - private ReducedPtPChunkData makeOneReduced( - ReducedPtPChunkDataTO reducedChunkDataTO) { - - StringBuilder errorSb = new StringBuilder(); - // fromSURL - TSURL toSURL = null; - try { - toSURL = TSURL.makeFromStringValidate(reducedChunkDataTO.toSURL()); - } catch (InvalidTSURLAttributesException e) { - errorSb.append(e); - } - if (reducedChunkDataTO.normalizedStFN() != null) { - toSURL.setNormalizedStFN(reducedChunkDataTO.normalizedStFN()); - } - if (reducedChunkDataTO.surlUniqueID() != null) { - toSURL.setUniqueID(reducedChunkDataTO.surlUniqueID().intValue()); - } - // status - TReturnStatus status = null; - TStatusCode code = StatusCodeConverter.getInstance().toSTORM( - reducedChunkDataTO.status()); - if (code == TStatusCode.EMPTY) { - errorSb.append("\nRetrieved StatusCode was not recognised: " - + reducedChunkDataTO.status()); - } else { - status = new TReturnStatus(code, reducedChunkDataTO.errString()); - } - // fileStorageType - TFileStorageType fileStorageType = FileStorageTypeConverter.getInstance() - .toSTORM(reducedChunkDataTO.fileStorageType()); - if (fileStorageType == TFileStorageType.EMPTY) { - errorSb.append("\nTFileStorageType could not be " - + "translated from its String representation! String: " - + reducedChunkDataTO.fileStorageType()); - // Use the default value defined in Configuration. - fileStorageType = TFileStorageType.getTFileStorageType(Configuration - .getInstance().getDefaultFileStorageType()); - errorSb - .append("\nUsed the default TFileStorageType as defined in StoRM config.: " - + fileStorageType); - } - // fileLifetime - TLifeTimeInSeconds fileLifetime = null; - try { - fileLifetime = TLifeTimeInSeconds.make(FileLifetimeConverter - .getInstance().toStoRM(reducedChunkDataTO.fileLifetime()), - TimeUnit.SECONDS); - } catch (IllegalArgumentException e) { - errorSb.append("\n"); - errorSb.append(e); - } - // make ReducedPtPChunkData - ReducedPtPChunkData aux = null; - try { - aux = new ReducedPtPChunkData(toSURL, status, fileStorageType, - fileLifetime); - aux.setPrimaryKey(reducedChunkDataTO.primaryKey()); - } catch (InvalidReducedPtPChunkDataAttributesException e) { - log.warn("PtP CHUNK CATALOG! Retrieved malformed Reduced PtP" - + " chunk data from persistence: dropping reduced chunk..."); - log.warn(e.getMessage(), e); - log.warn(errorSb.toString()); - } - // end... - return aux; - } - - public int updateStatus(TRequestToken requestToken, TSURL surl, - TStatusCode statusCode, String explanation) { - - return dao.updateStatus(requestToken, new int[] { surl.uniqueId() }, - new String[] { surl.rawSurl() }, statusCode, explanation); - } - - public int updateFromPreviousStatus(TRequestToken requestToken, - TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation) { - - return dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode, - newStatusCode, explanation); - } - - public int updateFromPreviousStatus(TRequestToken requestToken, - List surlList, TStatusCode expectedStatusCode, - TStatusCode newStatusCode) { - - int[] surlsUniqueIDs = new int[surlList.size()]; - String[] surls = new String[surlList.size()]; - int index = 0; - for (TSURL tsurl : surlList) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surls[index] = tsurl.rawSurl(); - index++; - } - return dao.updateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls, - expectedStatusCode, newStatusCode); - } + private static final Logger log = LoggerFactory.getLogger(PtPChunkCatalog.class); + + private static PtPChunkCatalog instance; + + public static synchronized PtPChunkCatalog getInstance() { + if (instance == null) { + instance = new PtPChunkCatalog(); + } + return instance; + } + + private final PtPChunkDAO dao; + + private PtPChunkCatalog() { + dao = PtPChunkDAOMySql.getInstance(); + } + + /** + * Method used to update into Persistence a retrieved PtPChunkData. + */ + public synchronized void update(PtPPersistentChunkData chunkData) { + + PtPChunkDataTO to = new PtPChunkDataTO(); + /* Primary key needed by DAO Object */ + to.setPrimaryKey(chunkData.getPrimaryKey()); + to.setStatus(StatusCodeConverter.getInstance().toDB(chunkData.getStatus().getStatusCode())); + to.setErrString(chunkData.getStatus().getExplanation()); + to.setTransferURL(TURLConverter.getInstance().toDB(chunkData.getTransferURL().toString())); + to.setPinLifetime(PinLifetimeConverter.getInstance().toDB(chunkData.pinLifetime().value())); + to.setFileLifetime(FileLifetimeConverter.getInstance().toDB(chunkData.fileLifetime().value())); + to.setFileStorageType(FileStorageTypeConverter.getInstance().toDB(chunkData.fileStorageType())); + to.setOverwriteOption(OverwriteModeConverter.toDB(chunkData.overwriteOption()).name()); + to.setNormalizedStFN(chunkData.getSURL().normalizedStFN()); + to.setSurlUniqueID(Integer.valueOf(chunkData.getSURL().uniqueId())); + to.setClientDN(chunkData.getUser().getDn()); + if (chunkData.getUser() instanceof AbstractGridUser) { + if (((AbstractGridUser) chunkData.getUser()).hasVoms()) { + to.setVomsAttributes(((AbstractGridUser) chunkData.getUser()).getFQANsAsString()); + } + + } + dao.update(to); + } + + /** + * Method that returns a Collection of PtPChunkData Objects matching the supplied TRequestToken. + * If any of the data associated to the TRequestToken is not well formed and so does not allow a + * PtPChunkData Object to be created, then that part of the request is dropped, gets logged and an + * attempt is made to write in the DB that the chunk was malformed; the processing continues with + * the next part. Only the valid chunks get returned. If there are no chunks to process then an + * empty Collection is returned, and a message gets logged. NOTE! Chunks in SRM_ABORTED status are + * NOT returned! This is important because this method is intended to be used by the Feeders to + * fetch all chunks in the request, and aborted chunks should not be picked up for processing! + */ + public synchronized Collection lookup(final TRequestToken rt) { + + Collection chunkTOs = dao.find(rt); + log.debug("PtPChunkCatalog: retrieved data {}", chunkTOs); + return buildChunkDataList(chunkTOs); + } + + /** + * Private method used to create a PtPChunkData object, from a PtPChunkDataTO and TRequestToken. + * If a chunk cannot be created, an error messagge gets logged and an attempt is made to signal in + * the DB that the chunk is malformed. + */ + private PtPPersistentChunkData makeOne(PtPChunkDataTO auxTO, TRequestToken rt) { + + StringBuilder errorSb = new StringBuilder(); + // toSURL + TSURL toSURL = null; + try { + toSURL = TSURL.makeFromStringValidate(auxTO.toSURL()); + } catch (InvalidTSURLAttributesException e) { + errorSb.append(e); + } + if (auxTO.normalizedStFN() != null) { + toSURL.setNormalizedStFN(auxTO.normalizedStFN()); + } + if (auxTO.surlUniqueID() != null) { + toSURL.setUniqueID(auxTO.surlUniqueID().intValue()); + } + // pinLifetime + TLifeTimeInSeconds pinLifetime = null; + try { + long pinLifeTime = PinLifetimeConverter.getInstance().toStoRM(auxTO.pinLifetime()); + // Check for max value allowed + long max = Configuration.getInstance().getPinLifetimeMaximum(); + if (pinLifeTime > max) { + log.warn("PinLifeTime is greater than the max value allowed. Drop the " + + "value to the max = {} seconds", max); + pinLifeTime = max; + } + pinLifetime = TLifeTimeInSeconds.make(pinLifeTime, TimeUnit.SECONDS); + } catch (IllegalArgumentException e) { + errorSb.append("\n"); + errorSb.append(e); + } + // fileLifetime + TLifeTimeInSeconds fileLifetime = null; + try { + fileLifetime = TLifeTimeInSeconds + .make(FileLifetimeConverter.getInstance().toStoRM(auxTO.fileLifetime()), TimeUnit.SECONDS); + } catch (IllegalArgumentException e) { + errorSb.append("\n"); + errorSb.append(e); + } + // fileStorageType + TFileStorageType fileStorageType = + FileStorageTypeConverter.getInstance().toSTORM(auxTO.fileStorageType()); + if (fileStorageType == TFileStorageType.EMPTY) { + errorSb.append("\nTFileStorageType could not be translated from " + + "its String representation! String: " + auxTO.fileStorageType()); + // Use the default value defined in Configuration. + fileStorageType = TFileStorageType + .getTFileStorageType(Configuration.getInstance().getDefaultFileStorageType()); + errorSb.append("\nUsed the default TFileStorageType as defined " + "in StoRM config.: " + + fileStorageType); + } + // expectedFileSize + // + // WARNING! A converter is used because the DB uses 0 for empty, whereas + // StoRM object model does allow a 0 size! Since this is an optional + // field + // in the SRM specs, null must be converted explicitly to Empty + // TSizeInBytes + // because it is indeed well formed! + TSizeInBytes expectedFileSize = null; + TSizeInBytes emptySize = TSizeInBytes.makeEmpty(); + long sizeTranslation = SizeInBytesIntConverter.getInstance().toStoRM(auxTO.expectedFileSize()); + if (emptySize.value() == sizeTranslation) { + expectedFileSize = emptySize; + } else { + try { + expectedFileSize = TSizeInBytes.make(auxTO.expectedFileSize(), SizeUnit.BYTES); + } catch (InvalidTSizeAttributesException e) { + errorSb.append("\n"); + errorSb.append(e); + } + } + // spaceToken! + // + // WARNING! A converter is still needed because of DB logic for missing + // SpaceToken makes use of NULL, whereas StoRM object model does not + // allow + // for null! It makes use of a specific Empty type. + // + // Indeed, the SpaceToken field is optional, so a request with a null + // value + // for the SpaceToken field in the DB, _is_ well formed! + TSpaceToken spaceToken = null; + TSpaceToken emptyToken = TSpaceToken.makeEmpty(); + /** + * convert empty string representation of DPM into StoRM representation; + */ + String spaceTokenTranslation = + SpaceTokenStringConverter.getInstance().toStoRM(auxTO.spaceToken()); + if (emptyToken.toString().equals(spaceTokenTranslation)) { + spaceToken = emptyToken; + } else { + try { + spaceToken = TSpaceToken.make(spaceTokenTranslation); + } catch (InvalidTSpaceTokenAttributesException e) { + errorSb.append("\n"); + errorSb.append(e); + } + } + // overwriteOption! + TOverwriteMode overwriteOption = + OverwriteModeConverter.toSTORM(auxTO.overwriteOption()); + if (overwriteOption == TOverwriteMode.EMPTY) { + errorSb.append("\nTOverwriteMode could not be translated " + + "from its String representation! String: " + auxTO.overwriteOption()); + overwriteOption = null; + } + // transferProtocols + TURLPrefix transferProtocols = TransferProtocolListConverter.toSTORM(auxTO.protocolList()); + if (transferProtocols.size() == 0) { + errorSb + .append("\nEmpty list of TransferProtocols " + "or could not translate TransferProtocols!"); + transferProtocols = null; // fail construction of PtPChunkData! + } + // status + TReturnStatus status = null; + TStatusCode code = StatusCodeConverter.getInstance().toSTORM(auxTO.status()); + if (code == TStatusCode.EMPTY) { + errorSb.append("\nRetrieved StatusCode was not recognised: " + auxTO.status()); + } else { + status = new TReturnStatus(code, auxTO.errString()); + } + GridUserInterface gridUser = null; + try { + if (auxTO.vomsAttributes() != null && !auxTO.vomsAttributes().trim().equals("")) { + gridUser = GridUserManager.makeVOMSGridUser(auxTO.clientDN(), auxTO.vomsAttributesArray()); + } else { + gridUser = GridUserManager.makeGridUser(auxTO.clientDN()); + } + + } catch (IllegalArgumentException e) { + log.error("Unexpected error on voms grid user creation. " + "IllegalArgumentException: {}", + e.getMessage(), e); + } + + // transferURL + /** + * whatever is read is just meaningless because PtP will fill it in!!! So create an Empty TTURL + * by default! Vital to avoid problems with unknown DPM NULL/EMPTY logic policy! + */ + TTURL transferURL = TTURL.makeEmpty(); + // make PtPChunkData + PtPPersistentChunkData aux = null; + try { + aux = new PtPPersistentChunkData(gridUser, rt, toSURL, pinLifetime, fileLifetime, + fileStorageType, spaceToken, expectedFileSize, transferProtocols, overwriteOption, status, + transferURL); + aux.setPrimaryKey(auxTO.primaryKey()); + } catch (InvalidPtPPersistentChunkDataAttributesException e) { + dao.fail(auxTO); + log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data" + + " from persistence. Dropping chunk from request: {}", rt); + log.warn(e.getMessage(), e); + log.warn(errorSb.toString()); + } catch (InvalidPtPDataAttributesException e) { + dao.fail(auxTO); + log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data" + + " from persistence. Dropping chunk from request: {}", rt); + log.warn(e.getMessage(), e); + log.warn(errorSb.toString()); + } catch (InvalidFileTransferDataAttributesException e) { + dao.fail(auxTO); + log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data" + + " from persistence. Dropping chunk from request: {}", rt); + log.warn(e.getMessage(), e); + log.warn(errorSb.toString()); + } catch (InvalidSurlRequestDataAttributesException e) { + dao.fail(auxTO); + log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data" + + " from persistence. Dropping chunk from request: {}", rt); + log.warn(e.getMessage(), e); + log.warn(errorSb.toString()); + } + // end... + return aux; + } + + /** + * + * Adds to the received PtPChunkDataTO the normalized StFN and the SURL unique ID taken from the + * PtPChunkData + * + * @param chunkTO + * @param chunk + */ + private void completeTO(ReducedPtPChunkDataTO chunkTO, final ReducedPtPChunkData chunk) { + + chunkTO.setNormalizedStFN(chunk.toSURL().normalizedStFN()); + chunkTO.setSurlUniqueID(Integer.valueOf(chunk.toSURL().uniqueId())); + } + + /** + * + * Creates a ReducedPtGChunkDataTO from the received PtGChunkDataTO and completes it with the + * normalized StFN and the SURL unique ID taken from the PtGChunkData + * + * @param chunkTO + * @param chunk + * @return + * @throws InvalidReducedPtPChunkDataAttributesException + */ + private ReducedPtPChunkDataTO completeTO(PtPChunkDataTO chunkTO, + final PtPPersistentChunkData chunk) throws InvalidReducedPtPChunkDataAttributesException { + + ReducedPtPChunkDataTO reducedChunkTO = this.reduce(chunkTO); + this.completeTO(reducedChunkTO, this.reduce(chunk)); + return reducedChunkTO; + } + + /** + * Creates a ReducedPtPChunkData from the data contained in the received PtPChunkData + * + * @param chunk + * @return + * @throws InvalidReducedPtPChunkDataAttributesException + */ + private ReducedPtPChunkData reduce(PtPPersistentChunkData chunk) + throws InvalidReducedPtPChunkDataAttributesException { + + ReducedPtPChunkData reducedChunk = new ReducedPtPChunkData(chunk.getSURL(), chunk.getStatus(), + chunk.fileStorageType(), chunk.fileLifetime()); + reducedChunk.setPrimaryKey(chunk.getPrimaryKey()); + return reducedChunk; + } + + /** + * Creates a ReducedPtPChunkDataTO from the data contained in the received PtPChunkDataTO + * + * @param chunkTO + * @return + */ + private ReducedPtPChunkDataTO reduce(PtPChunkDataTO chunkTO) { + + ReducedPtPChunkDataTO reducedChunkTO = new ReducedPtPChunkDataTO(); + reducedChunkTO.setPrimaryKey(chunkTO.primaryKey()); + reducedChunkTO.setToSURL(chunkTO.toSURL()); + reducedChunkTO.setNormalizedStFN(chunkTO.normalizedStFN()); + reducedChunkTO.setSurlUniqueID(chunkTO.surlUniqueID()); + reducedChunkTO.setStatus(chunkTO.status()); + reducedChunkTO.setErrString(chunkTO.errString()); + return reducedChunkTO; + } + + /** + * Checks if the received PtPChunkDataTO contains the fields not set by the front end but required + * + * @param chunkTO + * @return + */ + private boolean isComplete(PtPChunkDataTO chunkTO) { + + return (chunkTO.normalizedStFN() != null) && (chunkTO.surlUniqueID() != null); + } + + public Collection lookupPtPChunkData(TSURL surl, GridUserInterface user) { + + return lookupPtPChunkData((List) Arrays.asList(new TSURL[] {surl}), user); + } + + private Collection lookupPtPChunkData(List surls, + GridUserInterface user) { + + int[] surlsUniqueIDs = new int[surls.size()]; + String[] surlsArray = new String[surls.size()]; + int index = 0; + for (TSURL tsurl : surls) { + surlsUniqueIDs[index] = tsurl.uniqueId(); + surlsArray[index] = tsurl.rawSurl(); + index++; + } + Collection chunkDataTOs = dao.find(surlsUniqueIDs, surlsArray, user.getDn()); + log.debug("PtP CHUNK CATALOG: retrieved data {}", chunkDataTOs); + return buildChunkDataList(chunkDataTOs); + } + + private Collection buildChunkDataList( + Collection chunkDataTOs) { + + Collection list = Lists.newArrayList(); + PtPPersistentChunkData chunk; + for (PtPChunkDataTO chunkTO : chunkDataTOs) { + chunk = makeOne(chunkTO); + if (chunk == null) { + continue; + } + list.add(chunk); + if (isComplete(chunkTO)) { + continue; + } + try { + dao.updateIncomplete(completeTO(chunkTO, chunk)); + } catch (InvalidReducedPtPChunkDataAttributesException e) { + log.warn( + "PtG CHUNK CATALOG! unable to add missing informations on " + "DB to the request: {}", + e.getMessage()); + } + } + log.debug("PtPChunkCatalog: returning {}\n\n", list); + return list; + } + + private PtPPersistentChunkData makeOne(PtPChunkDataTO chunkTO) { + + try { + return makeOne(chunkTO, new TRequestToken(chunkTO.requestToken(), chunkTO.timeStamp())); + } catch (InvalidTRequestTokenAttributesException e) { + throw new IllegalStateException( + "Unexpected InvalidTRequestTokenAttributesException in TRequestToken: " + e); + } + } + + public int updateStatus(TRequestToken requestToken, TSURL surl, TStatusCode statusCode, + String explanation) { + + return dao.updateStatus(requestToken, new int[] {surl.uniqueId()}, + new String[] {surl.rawSurl()}, statusCode, explanation); + } + + public int updateFromPreviousStatus(TRequestToken requestToken, TStatusCode expectedStatusCode, + TStatusCode newStatusCode, String explanation) { + + return dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode, newStatusCode, + explanation); + } + + public int updateFromPreviousStatus(TRequestToken requestToken, List surlList, + TStatusCode expectedStatusCode, TStatusCode newStatusCode) { + + int[] surlsUniqueIDs = new int[surlList.size()]; + String[] surls = new String[surlList.size()]; + int index = 0; + for (TSURL tsurl : surlList) { + surlsUniqueIDs[index] = tsurl.uniqueId(); + surls[index] = tsurl.rawSurl(); + index++; + } + return dao.updateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls, expectedStatusCode, + newStatusCode); + } } diff --git a/src/main/java/it/grid/storm/catalogs/PtPChunkDAO.java b/src/main/java/it/grid/storm/catalogs/PtPChunkDAO.java deleted file mode 100644 index 7d9c69fc5..000000000 --- a/src/main/java/it/grid/storm/catalogs/PtPChunkDAO.java +++ /dev/null @@ -1,1683 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import static it.grid.storm.catalogs.ChunkDAOUtils.buildInClauseForArray; -import static it.grid.storm.catalogs.ChunkDAOUtils.printWarnings; -import static it.grid.storm.srm.types.TStatusCode.SRM_ABORTED; -import static it.grid.storm.srm.types.TStatusCode.SRM_FAILURE; -import static it.grid.storm.srm.types.TStatusCode.SRM_FILE_LIFETIME_EXPIRED; -import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_INPROGRESS; -import static it.grid.storm.srm.types.TStatusCode.SRM_SPACE_AVAILABLE; -import static it.grid.storm.srm.types.TStatusCode.SRM_SUCCESS; - -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; - -import it.grid.storm.config.Configuration; -import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.naming.SURL; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TStatusCode; - -import org.apache.commons.lang.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.Collection; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Timer; -import java.util.TimerTask; - -/** - * DAO class for PtPChunkCatalog. This DAO is specifically designed to connect - * to a MySQL DB. The raw data found in those tables is pre-treated in order to - * turn it into the Object Model of StoRM. See Method comments for further info. - * BEWARE! DAO Adjusts for extra fields in the DB that are not present in the - * object model. - * - * @author EGRID ICTP - * @version 2.0 - * @date June 2005 - */ -public class PtPChunkDAO { - - private static final Logger log = LoggerFactory.getLogger(PtPChunkDAO.class); - - /* String with the name of the class for the DB driver */ - private final String driver = Configuration.getInstance().getDBDriver(); - /* String referring to the URL of the DB */ - private final String url = Configuration.getInstance().getStormDbURL(); - /* String with the password for the DB */ - private final String password = Configuration.getInstance().getDBPassword(); - /* String with the name for the DB */ - private final String name = Configuration.getInstance().getDBUserName(); - /* Connection to DB - WARNING!!! It is kept open all the time! */ - private Connection con = null; - - private static final PtPChunkDAO dao = new PtPChunkDAO(); - - /* timer thread that will run a task to alert when reconnecting is necessary! */ - private Timer clock = null; - /* - * timer task that will update the boolean signaling that a reconnection is - * needed - */ - private TimerTask clockTask = null; - /* milliseconds that must pass before reconnecting to DB */ - private final long period = Configuration.getInstance().getDBReconnectPeriod() * 1000; - /* initial delay in milliseconds before starting timer */ - private final long delay = Configuration.getInstance().getDBReconnectDelay() * 1000; - - /* boolean that tells whether reconnection is needed because of MySQL bug! */ - private boolean reconnect = false; - - private StatusCodeConverter statusCodeConverter = StatusCodeConverter.getInstance(); - - private PtPChunkDAO() { - - setUpConnection(); - clock = new Timer(); - clockTask = new TimerTask() { - - @Override - public void run() { - - reconnect = true; - } - }; // clock task - clock.scheduleAtFixedRate(clockTask, delay, period); - } - - /** - * Method that returns the only instance of the PtPChunkDAO. - */ - public static PtPChunkDAO getInstance() { - - return dao; - } - - /** - * Method used to save the changes made to a retrieved PtPChunkDataTO, back - * into the MySQL DB. Only the transferURL, statusCode and explanation, of - * status_Put table get written to the DB. Likewise for the pinLifetime and - * fileLifetime of request_queue. In case of any error, an error messagge gets - * logged but no exception is thrown. - */ - public synchronized void update(PtPChunkDataTO to) { - - if (!checkConnection()) { - log.error("PtP CHUNK DAO: update - unable to get a valid connection!"); - return; - } - PreparedStatement updatePut = null; - try { - // prepare statement... - updatePut = con - .prepareStatement("UPDATE " - + "request_queue rq JOIN (status_Put sp, request_Put rp) ON " - + "(rq.ID=rp.request_queueID AND sp.request_PutID=rp.ID) " - + "SET sp.transferURL=?, sp.statusCode=?, sp.explanation=?, rq.pinLifetime=?, rq.fileLifetime=?, rq.config_FileStorageTypeID=?, rq.config_OverwriteID=?, " - + "rp.normalized_targetSURL_StFN=?, rp.targetSURL_uniqueID=? " - + "WHERE rp.ID=?"); - printWarnings(con.getWarnings()); - - updatePut.setString(1, to.transferURL()); - printWarnings(updatePut.getWarnings()); - - updatePut.setInt(2, to.status()); - printWarnings(updatePut.getWarnings()); - - updatePut.setString(3, to.errString()); - printWarnings(updatePut.getWarnings()); - - updatePut.setInt(4, to.pinLifetime()); - printWarnings(updatePut.getWarnings()); - - updatePut.setInt(5, to.fileLifetime()); - printWarnings(updatePut.getWarnings()); - - updatePut.setString(6, to.fileStorageType()); - printWarnings(updatePut.getWarnings()); - - updatePut.setString(7, to.overwriteOption()); - printWarnings(updatePut.getWarnings()); - - updatePut.setString(8, to.normalizedStFN()); - printWarnings(updatePut.getWarnings()); - - updatePut.setInt(9, to.surlUniqueID()); - printWarnings(updatePut.getWarnings()); - - updatePut.setLong(10, to.primaryKey()); - printWarnings(updatePut.getWarnings()); - // run updateStatusPut... - log.trace("PtP CHUNK DAO - update method: {}", updatePut); - updatePut.executeUpdate(); - printWarnings(updatePut.getWarnings()); - } catch (SQLException e) { - log.error("PtP CHUNK DAO: Unable to complete update! {}", e.getMessage(), e); - } finally { - close(updatePut); - } - } - - /** - * Updates the request_Put represented by the received ReducedPtPChunkDataTO - * by setting its normalized_targetSURL_StFN and targetSURL_uniqueID - * - * @param chunkTO - */ - public synchronized void updateIncomplete(ReducedPtPChunkDataTO chunkTO) { - - if (!checkConnection()) { - log - .error("PtP CHUNK DAO: updateIncomplete - unable to get a valid connection!"); - return; - } - String str = "UPDATE request_Put SET normalized_targetSURL_StFN=?, targetSURL_uniqueID=? " - + "WHERE ID=?"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - stmt.setString(1, chunkTO.normalizedStFN()); - printWarnings(stmt.getWarnings()); - - stmt.setInt(2, chunkTO.surlUniqueID()); - printWarnings(stmt.getWarnings()); - - stmt.setLong(3, chunkTO.primaryKey()); - printWarnings(stmt.getWarnings()); - - log.trace("PtP CHUNK DAO - update incomplete: {}", stmt); - stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - } catch (SQLException e) { - log.error("PtP CHUNK DAO: Unable to complete update incomplete! {}", - e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Method used to refresh the PtPChunkDataTO information from the MySQL DB. - * This method is intended to be used during the srmAbortRequest/File - * operation. In case of any error, an error message gets logged but no - * exception is thrown; a null PtPChunkDataTO is returned. - */ - public synchronized PtPChunkDataTO refresh(long id) { - - if (!checkConnection()) { - log.error("PtP CHUNK DAO: refresh - unable to get a valid connection!"); - return null; - } - String prot = "SELECT tp.config_ProtocolsID FROM request_TransferProtocols tp " - + "WHERE tp.request_queueID IN " - + "(SELECT rp.request_queueID FROM request_Put rp WHERE rp.ID=?)"; - - String refresh = "SELECT rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.timeStamp, rq.pinLifetime, rq.fileLifetime, rq.s_token, rq.r_token, rq.client_dn, rq.proxy, rp.ID, rp.targetSURL, rp.expectedFileSize, rp.normalized_targetSURL_StFN, rp.targetSURL_uniqueID, sp.statusCode, sp.transferURL " - + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " - + "ON (rq.ID=rp.request_queueID AND sp.request_PutID=rp.ID) " - + "WHERE rp.ID=?"; - - PreparedStatement stmt = null; - ResultSet rs = null; - PtPChunkDataTO chunkDataTO = null; - - try { - // get protocols for the request - stmt = con.prepareStatement(prot); - printWarnings(con.getWarnings()); - - List protocols = Lists.newArrayList(); - stmt.setLong(1, id); - printWarnings(stmt.getWarnings()); - - log.trace("PtP CHUNK DAO - refresh method: {}", stmt); - rs = stmt.executeQuery(); - printWarnings(stmt.getWarnings()); - while (rs.next()) { - protocols.add(rs.getString("tp.config_ProtocolsID")); - } - close(rs); - close(stmt); - - // get chunk of the request - stmt = con.prepareStatement(refresh); - printWarnings(con.getWarnings()); - - stmt.setLong(1, id); - printWarnings(stmt.getWarnings()); - - log.trace("PtP CHUNK DAO - refresh method: {}", stmt); - rs = stmt.executeQuery(); - printWarnings(stmt.getWarnings()); - - if (rs.next()) { - chunkDataTO = new PtPChunkDataTO(); - chunkDataTO.setFileStorageType(rs - .getString("rq.config_FileStorageTypeID")); - chunkDataTO.setOverwriteOption(rs.getString("rq.config_OverwriteID")); - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setPinLifetime(rs.getInt("rq.pinLifetime")); - chunkDataTO.setFileLifetime(rs.getInt("rq.fileLifetime")); - chunkDataTO.setSpaceToken(rs.getString("rq.s_token")); - chunkDataTO.setRequestToken(rs.getString("rq.r_token")); - chunkDataTO.setPrimaryKey(rs.getLong("rp.ID")); - chunkDataTO.setToSURL(rs.getString("rp.targetSURL")); - chunkDataTO.setNormalizedStFN(rs - .getString("rp.normalized_targetSURL_StFN")); - int uniqueID = rs.getInt("rp.targetSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(Integer.valueOf(uniqueID)); - } - - chunkDataTO.setExpectedFileSize(rs.getLong("rp.expectedFileSize")); - chunkDataTO.setProtocolList(protocols); - chunkDataTO.setStatus(rs.getInt("sp.statusCode")); - chunkDataTO.setTransferURL(rs.getString("sp.transferURL")); - chunkDataTO.setClientDN(rs.getString("rq.client_dn")); - - /** - * This code is only for the 1.3.18. This is a workaround to get FQANs - * using the proxy field on request_queue. The FE use the proxy field of - * request_queue to insert a single FQAN string containing all FQAN - * separated by the "#" char. The proxy is a BLOB, hence it has to be - * properly converted in string. - */ - java.sql.Blob blob = rs.getBlob("rq.proxy"); - if (!rs.wasNull() && blob != null) { - byte[] bdata = blob.getBytes(1, (int) blob.length()); - chunkDataTO.setVomsAttributes(new String(bdata)); - } - if (rs.next()) { - log.warn("ATTENTION in PtP CHUNK DAO! Possible DB corruption! " - + "refresh method invoked for specific chunk with id {}, but found " - + "more than one such chunks!", id); - } - } else { - log.warn("ATTENTION in PtP CHUNK DAO! Possible DB corruption! " - + "refresh method invoked for specific chunk with id {}, but chunk " - + "NOT found in persistence!", id); - } - } catch (SQLException e) { - log.error("PtP CHUNK DAO! Unable to refresh chunk! {}", e.getMessage(), e); - chunkDataTO = null; - } finally { - close(rs); - close(stmt); - } - return chunkDataTO; - } - - /** - * Method that queries the MySQL DB to find all entries matching the supplied - * TRequestToken. The Collection contains the corresponding PtPChunkDataTO - * objects. An initial simple query establishes the list of protocols - * associated with the request. A second complex query establishes all chunks - * associated with the request, by properly joining request_queue, request_Put - * and status_Put. The considered fields are: (1) From status_Put: the ID - * field which becomes the TOs primary key, and statusCode. (2) From - * request_Put: targetSURL and expectedFileSize. (3) From request_queue: - * pinLifetime, fileLifetime, config_FileStorageTypeID, s_token, - * config_OverwriteID. In case of any error, a log gets written and an empty - * collection is returned. No exception is returned. NOTE! Chunks in - * SRM_ABORTED status are NOT returned! This is important because this method - * is intended to be used by the Feeders to fetch all chunks in the request, - * and aborted chunks should not be picked up for processing! - */ - public synchronized Collection find(TRequestToken requestToken) { - - if (!checkConnection()) { - log.error("PtP CHUNK DAO: find - unable to get a valid connection!"); - return null; - } - String strToken = requestToken.toString(); - String str = null; - PreparedStatement find = null; - ResultSet rs = null; - try { - str = "SELECT tp.config_ProtocolsID " - + "FROM request_TransferProtocols tp JOIN request_queue rq ON tp.request_queueID=rq.ID " - + "WHERE rq.r_token=?"; - - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - List protocols = Lists.newArrayList(); - find.setString(1, strToken); - printWarnings(find.getWarnings()); - - log.trace("PtP CHUNK DAO - find method: {}", find); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - - while (rs.next()) { - protocols.add(rs.getString("tp.config_ProtocolsID")); - } - close(rs); - close(find); - - // get chunks of the request - str = "SELECT rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.timeStamp, rq.pinLifetime, rq.fileLifetime, rq.s_token, rq.client_dn, rq.proxy, rp.ID, rp.targetSURL, rp.expectedFileSize, rp.normalized_targetSURL_StFN, rp.targetSURL_uniqueID, sp.statusCode " - + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " - + "ON (rp.request_queueID=rq.ID AND sp.request_PutID=rp.ID) " - + "WHERE rq.r_token=? AND sp.statusCode<>?"; - - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - List list = Lists.newArrayList(); - find.setString(1, strToken); - printWarnings(find.getWarnings()); - - find.setInt(2, - statusCodeConverter.toDB(SRM_ABORTED)); - printWarnings(find.getWarnings()); - - log.trace("PtP CHUNK DAO - find method: {}", find); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - PtPChunkDataTO chunkDataTO = null; - while (rs.next()) { - chunkDataTO = new PtPChunkDataTO(); - chunkDataTO.setFileStorageType(rs - .getString("rq.config_FileStorageTypeID")); - chunkDataTO.setOverwriteOption(rs.getString("rq.config_OverwriteID")); - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setPinLifetime(rs.getInt("rq.pinLifetime")); - chunkDataTO.setFileLifetime(rs.getInt("rq.fileLifetime")); - chunkDataTO.setSpaceToken(rs.getString("rq.s_token")); - chunkDataTO.setClientDN(rs.getString("rq.client_dn")); - - /** - * This code is only for the 1.3.18. This is a workaround to get FQANs - * using the proxy field on request_queue. The FE use the proxy field of - * request_queue to insert a single FQAN string containing all FQAN - * separated by the "#" char. The proxy is a BLOB, hence it has to be - * properly converted in string. - */ - java.sql.Blob blob = rs.getBlob("rq.proxy"); - if (!rs.wasNull() && blob != null) { - byte[] bdata = blob.getBytes(1, (int) blob.length()); - chunkDataTO.setVomsAttributes(new String(bdata)); - } - chunkDataTO.setPrimaryKey(rs.getLong("rp.ID")); - chunkDataTO.setToSURL(rs.getString("rp.targetSURL")); - - chunkDataTO.setNormalizedStFN(rs - .getString("rp.normalized_targetSURL_StFN")); - int uniqueID = rs.getInt("rp.targetSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(Integer.valueOf(uniqueID)); - } - - chunkDataTO.setExpectedFileSize(rs.getLong("rp.expectedFileSize")); - chunkDataTO.setProtocolList(protocols); - chunkDataTO.setRequestToken(strToken); - chunkDataTO.setStatus(rs.getInt("sp.statusCode")); - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("PTP CHUNK DAO: {}", e.getMessage(), e); - /* return empty Collection! */ - return Lists.newArrayList(); - } finally { - close(rs); - close(find); - } - } - - /** - * Method that returns a Collection of ReducedPtPChunkDataTO associated to the - * given TRequestToken expressed as String. - */ - public synchronized Collection findReduced( - String reqtoken, Collection surls) { - - if (!checkConnection()) { - log - .error("PtP CHUNK DAO: findReduced - unable to get a valid connection!"); - return Lists.newArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - boolean addInClause = surls != null && !surls.isEmpty(); - try { - // get reduced chunks - String str = "SELECT rq.fileLifetime, rq.config_FileStorageTypeID, rp.ID, rp.targetSURL, rp.normalized_targetSURL_StFN, rp.targetSURL_uniqueID, sp.statusCode " - + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " - + "ON (rp.request_queueID=rq.ID AND sp.request_PutID=rp.ID) " - + "WHERE rq.r_token=?"; - if (addInClause) { - str += " AND rp.targetSURL_uniqueID IN ("; - for (int i=0; i list = Lists.newArrayList(); - find.setString(1, reqtoken); - printWarnings(find.getWarnings()); - if (addInClause) { - Iterator iterator = surls.iterator(); - int start = 2; - while (iterator.hasNext()) { - TSURL surl = iterator.next(); - find.setInt(start++, surl.uniqueId()); - } - } - printWarnings(find.getWarnings()); - log.trace("PtP CHUNK DAO! findReduced with request token; {}", find); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - - ReducedPtPChunkDataTO reducedChunkDataTO = null; - while (rs.next()) { - reducedChunkDataTO = new ReducedPtPChunkDataTO(); - reducedChunkDataTO.setFileLifetime(rs.getInt("rq.fileLifetime")); - reducedChunkDataTO.setFileStorageType(rs - .getString("rq.config_FileStorageTypeID")); - reducedChunkDataTO.setPrimaryKey(rs.getLong("rp.ID")); - reducedChunkDataTO.setToSURL(rs.getString("rp.targetSURL")); - reducedChunkDataTO.setNormalizedStFN(rs - .getString("rp.normalized_targetSURL_StFN")); - int uniqueID = rs.getInt("rp.targetSURL_uniqueID"); - if (!rs.wasNull()) { - reducedChunkDataTO.setSurlUniqueID(uniqueID); - } - - reducedChunkDataTO.setStatus(rs.getInt("sp.statusCode")); - list.add(reducedChunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("PTP CHUNK DAO: {}", e.getMessage(), e); - /* return empty Collection! */ - return Lists.newArrayList(); - } finally { - close(rs); - close(find); - } - } - - /** - * Method that returns a Collection of ReducedPtPChunkDataTO corresponding to - * the IDs supplied in the given List of Long. If the List is null or empty, - * an empty collection is returned and error messages get logged. - */ - public synchronized Collection findReduced( - List ids) { - - if (ids != null && !ids.isEmpty()) { - if (!checkConnection()) { - log - .error("PtP CHUNK DAO: findReduced - unable to get a valid connection!"); - return Lists.newArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - try { - // get reduced chunks - String str = "SELECT rq.fileLifetime, rq.config_FileStorageTypeID, rp.ID, rp.targetSURL, rp.normalized_targetSURL_StFN, rp.targetSURL_uniqueID, sp.statusCode " - + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " - + "ON (rp.request_queueID=rq.ID AND sp.request_PutID=rp.ID) " - + "WHERE rp.ID IN (" + StringUtils.join(ids.toArray(), ',') + ")"; - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - List list = Lists.newArrayList(); - log.trace("PtP CHUNK DAO! fetchReduced; {}", find); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - - ReducedPtPChunkDataTO reducedChunkDataTO = null; - while (rs.next()) { - reducedChunkDataTO = new ReducedPtPChunkDataTO(); - reducedChunkDataTO.setFileLifetime(rs.getInt("rq.fileLifetime")); - reducedChunkDataTO.setFileStorageType(rs - .getString("rq.config_FileStorageTypeID")); - reducedChunkDataTO.setPrimaryKey(rs.getLong("rp.ID")); - reducedChunkDataTO.setToSURL(rs.getString("rp.targetSURL")); - reducedChunkDataTO.setNormalizedStFN(rs - .getString("rp.normalized_targetSURL_StFN")); - int uniqueID = rs.getInt("rp.targetSURL_uniqueID"); - if (!rs.wasNull()) { - reducedChunkDataTO.setSurlUniqueID(uniqueID); - } - - reducedChunkDataTO.setStatus(rs.getInt("sp.statusCode")); - list.add(reducedChunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("PTP CHUNK DAO: {}", e.getMessage(), e); - /* return empty Collection */ - return Lists.newArrayList(); - } finally { - close(rs); - close(find); - } - } else { - log.warn("ATTENTION in PtP CHUNK DAO! fetchReduced " - + "invoked with null or empty list of IDs!"); - return Lists.newArrayList(); - } - } - - /** - * Method used in extraordinary situations to signal that data retrieved from - * the DB was malformed and could not be translated into the StoRM object - * model. This method attempts to change the status of the chunk to - * SRM_FAILURE and record it in the DB, in the status_Put table. This - * operation could potentially fail because the source of the malformed - * problems could be a problematic DB; indeed, initially only log messages - * were recorded. Yet it soon became clear that the source of malformed data - * were actually the clients themselves and/or FE recording in the DB. In - * these circumstances the client would find its request as being in the - * SRM_IN_PROGRESS state for ever. Hence the pressing need to inform it of the - * encountered problems. - */ - public synchronized void signalMalformedPtPChunk(PtPChunkDataTO auxTO) { - - if (!checkConnection()) { - log - .error("PtP CHUNK DAO: signalMalformedPtPChunk - unable to get a valid connection!"); - return; - } - String signalSQL = "UPDATE status_Put sp SET sp.statusCode=" - + statusCodeConverter.toDB(SRM_FAILURE) - + ", sp.explanation=? " + "WHERE sp.request_PutID=" + auxTO.primaryKey(); - PreparedStatement signal = null; - try { - signal = con.prepareStatement(signalSQL); - printWarnings(con.getWarnings()); - /* NB: Prepared statement spares DB-specific String notation! */ - signal.setString(1, "This chunk of the request is malformed!"); - printWarnings(signal.getWarnings()); - - log.trace("PtP CHUNK DAO - signalMalformedPtPChunk method: {}", signal); - signal.executeUpdate(); - printWarnings(signal.getWarnings()); - } catch (SQLException e) { - log.error("PtPChunkDAO! Unable to signal in DB that a chunk of " - + "the request was malformed! Request: {}; Error: {}", auxTO.toString(), - e.getMessage(), e); - } finally { - close(signal); - } - } - - /** - * Method that returns the number of Put requests on the given SURL, that are - * in SRM_SPACE_AVAILABLE state. This method is intended to be used by - * PtPChunkCatalog in the isSRM_SPACE_AVAILABLE method invocation. In case of - * any error, 0 is returned. - */ - public synchronized int numberInSRM_SPACE_AVAILABLE(int surlUniqueID) { - - if (!checkConnection()) { - log - .error("PtP CHUNK DAO: numberInSRM_SPACE_AVAILABLE - unable to get a valid connection!"); - return 0; - } - - String str = "SELECT COUNT(rp.ID) FROM status_Put sp JOIN request_Put rp " - + "ON (sp.request_PutID=rp.ID) " - + "WHERE rp.targetSURL_uniqueID=? AND sp.statusCode=?"; - PreparedStatement stmt = null; - ResultSet rs = null; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - /* Prepared statement spares DB-specific String notation! */ - stmt.setInt(1, surlUniqueID); - printWarnings(stmt.getWarnings()); - - stmt.setInt(2,statusCodeConverter.toDB(SRM_SPACE_AVAILABLE)); - printWarnings(stmt.getWarnings()); - - log.trace("PtP CHUNK DAO - numberInSRM_SPACE_AVAILABLE method: {}", stmt); - rs = stmt.executeQuery(); - printWarnings(stmt.getWarnings()); - - int numberSpaceAvailable = 0; - if (rs.next()) { - numberSpaceAvailable = rs.getInt(1); - } - return numberSpaceAvailable; - } catch (SQLException e) { - log.error("PtPChunkDAO! Unable to determine " - + "numberInSRM_SPACE_AVAILABLE! Returning 0! {}", e.getMessage(), e); - return 0; - } finally { - close(rs); - close(stmt); - } - } - - /** - * Method that retrieves all expired requests in SRM_SPACE_AVAILABLE state. - * - * @return a Map containing the ID of the request as key and the relative - * SURL as value - */ - public synchronized Map getExpiredSRM_SPACE_AVAILABLE() { - - Map ids = Maps.newHashMap(); - - if (!checkConnection()) { - log - .error("PtP CHUNK DAO: getExpiredSRM_SPACE_AVAILABLE - unable to get a valid connection!"); - return ids; - } - - String idsstr = "SELECT rp.ID, rp.targetSURL FROM " - + "status_Put sp JOIN (request_Put rp, request_queue rq) ON sp.request_PutID=rp.ID AND rp.request_queueID=rq.ID " - + "WHERE sp.statusCode=? AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime "; - - PreparedStatement stmt = null; - ResultSet rs = null; - - try { - stmt = con.prepareStatement(idsstr); - printWarnings(con.getWarnings()); - - stmt.setInt(1, statusCodeConverter.toDB(SRM_SPACE_AVAILABLE)); - printWarnings(stmt.getWarnings()); - - log.trace("PtP CHUNK DAO - getExpiredSRM_SPACE_AVAILABLE: {}", stmt); - - rs = stmt.executeQuery(); - printWarnings(stmt.getWarnings()); - - while (rs.next()) { - ids.put(rs.getLong("rp.ID"), rs.getString("rp.targetSURL")); - } - } catch (SQLException e) { - log.error("PtPChunkDAO! Unable to select expired " - + "SRM_SPACE_AVAILABLE chunks of PtP requests. {}", e.getMessage(), e); - - } finally { - close(rs); - close(stmt); - } - return ids; - } - - /** - * Method that retrieves all ptp requests in SRM_REQUEST_INPROGRESS state which can be - * considered as expired. - * - * @return a Map containing the ID of the request as key and the involved array of SURLs as - * value - */ - public synchronized List getExpiredSRM_REQUEST_INPROGRESS(long expirationTime) { - - List ids = Lists.newArrayList(); - - if (!checkConnection()) { - log.error( - "PtP CHUNK DAO: getExpiredSRM_REQUEST_INPROGRESS - unable to get a valid connection!"); - return ids; - } - - String query = "SELECT rq.ID FROM request_queue rq, request_Put rp, status_Put sp " - + "WHERE rq.ID = rp.request_queueID and rp.ID = sp.request_PutID " - + "AND rq.status=? AND rq.timeStamp <= DATE_SUB(CURRENT_TIMESTAMP(), INTERVAL ? SECOND)"; - - PreparedStatement stmt = null; - ResultSet rs = null; - - try { - stmt = con.prepareStatement(query); - printWarnings(con.getWarnings()); - - stmt.setLong(1, statusCodeConverter.toDB(SRM_REQUEST_INPROGRESS)); - printWarnings(stmt.getWarnings()); - - stmt.setLong(2, expirationTime); - printWarnings(stmt.getWarnings()); - - log.trace("PtP CHUNK DAO - getExpiredSRM_REQUEST_INPROGRESS: {}", stmt); - - rs = stmt.executeQuery(); - printWarnings(stmt.getWarnings()); - - while (rs.next()) { - ids.add(rs.getLong("rq.ID")); - } - } catch (SQLException e) { - log.error( - "PtPChunkDAO! Unable to select expired " - + "SRM_REQUEST_INPROGRESS chunks of PtP requests. {}", - e.getMessage(), e); - - } finally { - close(rs); - close(stmt); - } - return ids; - } - - /** - * Method that updates chunks in SRM_SPACE_AVAILABLE state, into SRM_SUCCESS. - * An array of long representing the primary key of each chunk is required. - * This is needed when the client invokes srmPutDone() In case of any error - * nothing happens and no exception is thrown, but proper messages get - * logged. - */ - public synchronized void transitSRM_SPACE_AVAILABLEtoSRM_SUCCESS(List ids) { - - if (!checkConnection()) { - log - .error("PtP CHUNK DAO: transitSRM_SPACE_AVAILABLEtoSRM_SUCCESS - unable to get a valid connection!"); - return; - } - - String str = "UPDATE " - + "status_Put sp JOIN (request_Put rp, request_queue rq) ON sp.request_PutID=rp.ID AND rp.request_queueID=rq.ID " - + "SET sp.statusCode=? " + "WHERE sp.statusCode=? AND rp.ID IN (" - + StringUtils.join(ids.toArray(), ',') + ")"; - - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - stmt.setInt(1, - statusCodeConverter.toDB(SRM_SUCCESS)); - printWarnings(stmt.getWarnings()); - - stmt.setInt(2, statusCodeConverter.toDB(SRM_SPACE_AVAILABLE)); - printWarnings(stmt.getWarnings()); - - log.trace("PtP CHUNK DAO - " - + "transitSRM_SPACE_AVAILABLEtoSRM_SUCCESS: {}", stmt); - - int count = stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - - if (count == 0) { - log.trace("PtPChunkDAO! No chunk of PtP request was " - + "transited from SRM_SPACE_AVAILABLE to SRM_SUCCESS."); - } else { - log.info("PtPChunkDAO! {} chunks of PtP requests were transited " - + "from SRM_SPACE_AVAILABLE to SRM_SUCCESS.", count); - } - } catch (SQLException e) { - log.error("PtPChunkDAO! Unable to transit chunks from " - + "SRM_SPACE_AVAILABLE to SRM_SUCCESS! {}", e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Method that updates chunks in SRM_SPACE_AVAILABLE state, into - * SRM_FILE_LIFETIME_EXPIRED. An array of Long representing the primary key - * of each chunk is required. This is needed when the client forgets to invoke - * srmPutDone(). In case of any error or exception, the returned int value - * will be zero or less than the input List size. - * - * @param the list of the request id to update - * - * @return The number of the updated records into the db - */ - public synchronized int transitExpiredSRM_SPACE_AVAILABLEtoSRM_FILE_LIFETIME_EXPIRED(Collection ids) { - - Preconditions.checkNotNull(ids, "Invalid list of id"); - - if (!checkConnection()) { - log.error("Unable to get a valid connection to the database!"); - return 0; - } - - String querySQL = "UPDATE status_Put sp " - + "JOIN (request_Put rp, request_queue rq) ON sp.request_PutID=rp.ID AND rp.request_queueID=rq.ID " - + "SET sp.statusCode=?, sp.explanation=? " - + "WHERE sp.statusCode=? AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime "; - - - if (!ids.isEmpty()) { - querySQL += "AND rp.ID IN (" + StringUtils.join(ids.toArray(), ',') + ")"; - } - - PreparedStatement stmt = null; - int count = 0; - try { - stmt = con.prepareStatement(querySQL); - printWarnings(con.getWarnings()); - - stmt.setInt(1, statusCodeConverter.toDB(SRM_FILE_LIFETIME_EXPIRED)); - printWarnings(stmt.getWarnings()); - - stmt.setString(2, "Expired pinLifetime"); - printWarnings(stmt.getWarnings()); - - stmt.setInt(3, statusCodeConverter.toDB(SRM_SPACE_AVAILABLE)); - printWarnings(stmt.getWarnings()); - - log.trace( - "PtP CHUNK DAO - transit SRM_SPACE_AVAILABLE to SRM_FILE_LIFETIME_EXPIRED: {}", - stmt); - - count = stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - - } catch (SQLException e) { - log.error( - "PtPChunkDAO! Unable to transit chunks from " - + "SRM_SPACE_AVAILABLE to SRM_FILE_LIFETIME_EXPIRED! {}", - e.getMessage(), e); - } finally { - close(stmt); - } - log.trace("PtPChunkDAO! {} chunks of PtP requests were transited " - + "from SRM_SPACE_AVAILABLE to SRM_FILE_LIFETIME_EXPIRED.", count); - return count; - } - - /** - * Method that updates enqueued requests selected by id into SRM_FAILURE. - * An array of Long representing the id of each request is required. - * - * @param the list of the request id to update - * - * @return The number of the updated records. Zero or less than the input list size in case of errors. - */ - public synchronized int transitExpiredSRM_REQUEST_INPROGRESStoSRM_FAILURE(Collection ids) { - - Preconditions.checkNotNull(ids, "Invalid list of id"); - - if (ids.isEmpty()) { - return 0; - } - - if (!checkConnection()) { - log.error("Unable to get a valid connection to the database!"); - return 0; - } - - String querySQL = "UPDATE request_queue rq, request_Put rp, status_Put sp " - + "SET rq.status=?, sp.statusCode=?, sp.explanation=? " - + "WHERE rq.ID = rp.request_queueID and rp.ID = sp.request_PutID " - + "AND rq.status=? AND rq.ID IN (" + buildInClauseForArray(ids.size()) + ")"; - - PreparedStatement stmt = null; - int count = 0; - try { - stmt = con.prepareStatement(querySQL); - printWarnings(con.getWarnings()); - - stmt.setInt(1, statusCodeConverter.toDB(SRM_FAILURE)); - printWarnings(stmt.getWarnings()); - - stmt.setInt(2, statusCodeConverter.toDB(SRM_FAILURE)); - printWarnings(stmt.getWarnings()); - - stmt.setString(3, "Request expired"); - printWarnings(stmt.getWarnings()); - - stmt.setInt(4, statusCodeConverter.toDB(SRM_REQUEST_INPROGRESS)); - printWarnings(stmt.getWarnings()); - - int i = 5; - for (Long id: ids) { - stmt.setLong(i, id); - printWarnings(stmt.getWarnings()); - i++; - } - - log.trace( - "PtP CHUNK DAO - transit SRM_REQUEST_INPROGRESS to SRM_FAILURE: {}", - stmt); - - count = stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - - } catch (SQLException e) { - log.error( - "PtPChunkDAO! Unable to transit chunks from " - + "SRM_REQUEST_INPROGRESS to SRM_FAILURE! {}", - e.getMessage(), e); - } finally { - close(stmt); - } - log.trace("PtPChunkDAO! {} chunks of PtP requests were transited " - + "from SRM_REQUEST_INPROGRESS to SRM_FAILURE.", count); - return count; - - } - - /** - * Method that transit chunks in SRM_SPACE_AVAILABLE to SRM_ABORTED, for the - * given SURL: the overall request status of the requests containing that - * chunk, is not changed! The TURL is set to null. Beware, that the chunks may - * be part of requests that have finished, or that still have not finished - * because other chunks are still being processed. - */ - public synchronized void transitSRM_SPACE_AVAILABLEtoSRM_ABORTED( - int surlUniqueID, String surl, String explanation) { - - if (!checkConnection()) { - log - .error("PtP CHUNK DAO: transitSRM_SPACE_AVAILABLEtoSRM_ABORTED - unable to get a valid connection!"); - return; - } - String str = "UPDATE " - + "status_Put sp JOIN (request_Put rp, request_queue rq) ON sp.request_PutID=rp.ID AND rp.request_queueID=rq.ID " - + "SET sp.statusCode=?, sp.explanation=?, sp.transferURL=NULL " - + "WHERE sp.statusCode=? AND (rp.targetSURL_uniqueID=? OR rp.targetSURL=?)"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - stmt.setInt(1, statusCodeConverter.toDB(SRM_ABORTED)); - printWarnings(stmt.getWarnings()); - - stmt.setString(2, explanation); - printWarnings(stmt.getWarnings()); - - stmt.setInt(3, statusCodeConverter.toDB(SRM_SPACE_AVAILABLE)); - printWarnings(stmt.getWarnings()); - - stmt.setInt(4, surlUniqueID); - printWarnings(stmt.getWarnings()); - - stmt.setString(5, surl); - printWarnings(stmt.getWarnings()); - - log.trace("PtP CHUNK DAO - " - + "transitSRM_SPACE_AVAILABLEtoSRM_ABORTED: {}", stmt); - int count = stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - - if (count > 0) { - log.info("PtP CHUNK DAO! {} chunks were transited from " - + "SRM_SPACE_AVAILABLE to SRM_ABORTED.", count); - } else { - log.trace("PtP CHUNK DAO! No chunks " - + "were transited from SRM_SPACE_AVAILABLE to SRM_ABORTED."); - } - } catch (SQLException e) { - log.error("PtP CHUNK DAO! Unable to " - + "transitSRM_SPACE_AVAILABLEtoSRM_ABORTED! {}", e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Auxiliary method used to close a Statement - */ - private void close(Statement stmt) { - - if (stmt != null) { - try { - stmt.close(); - } catch (Exception e) { - log.error("PTP CHUNK DAO! Unable to close Statement {} - Error: {}", - stmt.toString(), e.getMessage(), e); - } - } - } - - /** - * Auxiliary method used to close a ResultSet - */ - private void close(ResultSet rset) { - - if (rset != null) { - try { - rset.close(); - } catch (Exception e) { - log.error("PTP CHUNK DAO! Unable to close ResultSet! Error: {}", - e.getMessage(), e); - } - } - } - - /** - * Auxiliary method that sets up the connection to the DB. - */ - private boolean setUpConnection() { - - boolean response = false; - try { - Class.forName(driver); - con = DriverManager.getConnection(url, name, password); - printWarnings(con.getWarnings()); - response = con.isValid(0); - } catch (ClassNotFoundException | SQLException e) { - log.error("PTP CHUNK DAO! Exception in setUpConnection! {}", e.getMessage(), e); - } - return response; - } - - /** - * Auxiliary method that checks if time for resetting the connection has come, - * and eventually takes it down and up back again. - */ - private boolean checkConnection() { - - boolean response = true; - if (reconnect) { - log.debug("PTP CHUNK DAO! Reconnecting to DB! "); - takeDownConnection(); - response = setUpConnection(); - if (response) { - reconnect = false; - } - } - return response; - } - - /** - * Auxiliary method that takes down a connection to the DB. - */ - private void takeDownConnection() { - - if (con != null) { - try { - con.close(); - } catch (SQLException e) { - log.error("PTP CHUNK DAO! Exception in takeDownConnection method - " - + "could not close connection! {}", e.getMessage(), e); - } - } - } - - public synchronized int updateStatus(int[] surlsUniqueIDs, String[] surls, - TStatusCode statusCode, String explanation) { - - if (explanation == null) { - throw new IllegalArgumentException("Unable to perform the updateStatus, " - + "invalid arguments: explanation=" + explanation); - } - return doUpdateStatus(null, surlsUniqueIDs, surls, statusCode, explanation, false, - true); - } - - public synchronized int updateStatus(TRequestToken requestToken, - int[] surlsUniqueIDs, String[] surls, TStatusCode statusCode, - String explanation) { - - if (requestToken == null || requestToken.getValue().trim().isEmpty() - || explanation == null) { - throw new IllegalArgumentException("Unable to perform the updateStatus, " - + "invalid arguments: requestToken=" + requestToken + " explanation=" - + explanation); - } - return doUpdateStatus(requestToken, surlsUniqueIDs, surls, statusCode, - explanation, true, true); - } - - private int doUpdateStatus(TRequestToken requestToken, int[] surlsUniqueIDs, - String[] surls, TStatusCode statusCode, String explanation, - boolean withRequestToken, boolean withExplaination) - throws IllegalArgumentException { - - if ((withRequestToken && requestToken == null) - || (withExplaination && explanation == null)) { - throw new IllegalArgumentException("Unable to perform the updateStatus, " - + "invalid arguments: withRequestToken=" + withRequestToken - + " requestToken=" + requestToken + " withExplaination=" - + withExplaination + " explaination=" + explanation); - } - if (!checkConnection()) { - log - .error("PTP CHUNK DAO: updateStatus - unable to get a valid connection!"); - return 0; - } - String str = "UPDATE status_Put sp JOIN (request_Put rp, request_queue rq) ON sp.request_PutID=rp.ID AND " - + "rp.request_queueID=rq.ID " + "SET sp.statusCode=? "; - if (withExplaination) { - str += " , " + buildExpainationSet(explanation); - } - str += " WHERE "; - if (withRequestToken) { - str += buildTokenWhereClause(requestToken) + " AND "; - } - str += " ( rp.targetSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlsUniqueIDs) + " AND rp.targetSURL IN " - + makeSurlString(surls) + " ) "; - PreparedStatement stmt = null; - int count = 0; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - stmt.setInt(1, statusCodeConverter.toDB(statusCode)); - printWarnings(stmt.getWarnings()); - - log.trace("PTP CHUNK DAO - updateStatus: {}", stmt); - count = stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - if (count == 0) { - log.trace("PTP CHUNK DAO! No chunk of PTP request was updated to {}.", - statusCode); - } else { - log.info("PTP CHUNK DAO! {} chunks of PTP requests were updated " - + "to {}.", count, statusCode); - } - } catch (SQLException e) { - log.error("PTP CHUNK DAO! Unable to updated from to {}! {}", statusCode, - e.getMessage(), e); - } finally { - close(stmt); - } - return count; - } - - public synchronized int updateStatusOnMatchingStatus( - TRequestToken requestToken, TStatusCode expectedStatusCode, - TStatusCode newStatusCode, String explanation) { - - if (requestToken == null || requestToken.getValue().trim().isEmpty() - || explanation == null) { - throw new IllegalArgumentException( - "Unable to perform the updateStatusOnMatchingStatus, " - + "invalid arguments: requestToken=" + requestToken + " explanation=" - + explanation); - } - return doUpdateStatusOnMatchingStatus(requestToken, null, null, - expectedStatusCode, newStatusCode, explanation, true, false, true); - } - - public synchronized int updateStatusOnMatchingStatus(int[] surlsUniqueIDs, - String[] surls, TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation) { - - if (surlsUniqueIDs == null || surls == null || explanation == null - || surlsUniqueIDs.length == 0 || surls.length == 0 - || surlsUniqueIDs.length != surls.length) { - throw new IllegalArgumentException( - "Unable to perform the updateStatusOnMatchingStatus, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs + " surls=" - + surls + " explanation=" + explanation); - } - return doUpdateStatusOnMatchingStatus(null, surlsUniqueIDs, surls, - expectedStatusCode, newStatusCode, explanation, false, true, true); - } - - public synchronized int updateStatusOnMatchingStatus( - TRequestToken requestToken, int[] surlsUniqueIDs, String[] surls, - TStatusCode expectedStatusCode, TStatusCode newStatusCode) { - - if (requestToken == null || requestToken.getValue().trim().isEmpty() - || surlsUniqueIDs == null || surls == null || surlsUniqueIDs.length == 0 - || surls.length == 0 || surlsUniqueIDs.length != surls.length) { - throw new IllegalArgumentException( - "Unable to perform the updateStatusOnMatchingStatus, " - + "invalid arguments: requestToken=" + requestToken - + "surlsUniqueIDs=" + surlsUniqueIDs + " surls=" + surls); - } - return doUpdateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls, - expectedStatusCode, newStatusCode, null, true, true, false); - } - - private int doUpdateStatusOnMatchingStatus(TRequestToken requestToken, - int[] surlsUniqueIDs, String[] surls, TStatusCode expectedStatusCode, - TStatusCode newStatusCode, String explanation, boolean withRequestToken, - boolean withSurls, boolean withExplanation) { - - if ((withRequestToken && requestToken == null) - || (withExplanation && explanation == null) - || (withSurls && (surlsUniqueIDs == null || surls == null))) { - throw new IllegalArgumentException( - "Unable to perform the doUpdateStatusOnMatchingStatus, " - + "invalid arguments: withRequestToken=" + withRequestToken - + " requestToken=" + requestToken + " withSurls=" + withSurls - + " surlsUniqueIDs=" + surlsUniqueIDs + " surls=" + surls - + " withExplaination=" + withExplanation + " explanation=" - + explanation); - } - if (!checkConnection()) { - log - .error("PTP CHUNK DAO: updateStatusOnMatchingStatus - unable to get a valid connection!"); - return 0; - } - String str = "UPDATE " - + "status_Put sp JOIN (request_Put rp, request_queue rq) ON sp.request_PutID=rp.ID AND rp.request_queueID=rq.ID " - + "SET sp.statusCode=? "; - if (withExplanation) { - str += " , " + buildExpainationSet(explanation); - } - str += " WHERE sp.statusCode=? "; - if (withRequestToken) { - str += " AND " + buildTokenWhereClause(requestToken); - } - if (withSurls) { - str += " AND " + buildSurlsWhereClause(surlsUniqueIDs, surls); - } - - int count = 0; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - stmt.setInt(1, statusCodeConverter.toDB(newStatusCode)); - printWarnings(stmt.getWarnings()); - - stmt.setInt(2, statusCodeConverter.toDB(expectedStatusCode)); - printWarnings(stmt.getWarnings()); - - log.trace("PTP CHUNK DAO - updateStatusOnMatchingStatus: {}", stmt); - count = stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - if (count == 0) { - log.trace("PTP CHUNK DAO! No chunk of PTP request was updated " - + "from {} to {}.", expectedStatusCode, newStatusCode); - } else { - log.debug("PTP CHUNK DAO! {} chunks of PTP requests were updated " - + "from {} to {}.", count, expectedStatusCode, newStatusCode); - } - } catch (SQLException e) { - log.error("PTP CHUNK DAO! Unable to updated from {} to {}! Error: {}", - expectedStatusCode, newStatusCode, e.getMessage(), e); - } finally { - close(stmt); - } - return count; - } - - public Collection find(int[] surlsUniqueIDs, String[] surlsArray, String dn) { - - if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 - || surlsArray == null || surlsArray.length == 0 || dn == null) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray + " dn=" + dn); - } - return find(surlsUniqueIDs, surlsArray, dn, true); - } - - public Collection find(int[] surlsUniqueIDs, String[] surlsArray) { - - if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 - || surlsArray == null || surlsArray.length == 0) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray); - } - return find(surlsUniqueIDs, surlsArray, null, false); - } - - - private List chunkTOfromResultSet(ResultSet rs) - throws SQLException{ - - List results = Lists.newArrayList(); - while (rs.next()) { - - PtPChunkDataTO chunkDataTO = new PtPChunkDataTO(); - - chunkDataTO.setFileStorageType(rs - .getString("rq.config_FileStorageTypeID")); - chunkDataTO.setOverwriteOption(rs.getString("rq.config_OverwriteID")); - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setPinLifetime(rs.getInt("rq.pinLifetime")); - chunkDataTO.setFileLifetime(rs.getInt("rq.fileLifetime")); - chunkDataTO.setSpaceToken(rs.getString("rq.s_token")); - chunkDataTO.setClientDN(rs.getString("rq.client_dn")); - - /** - * This code is only for the 1.3.18. This is a workaround to get FQANs - * using the proxy field on request_queue. The FE use the proxy field of - * request_queue to insert a single FQAN string containing all FQAN - * separated by the "#" char. The proxy is a BLOB, hence it has to be - * properly converted in string. - */ - java.sql.Blob blob = rs.getBlob("rq.proxy"); - if (!rs.wasNull() && blob != null) { - byte[] bdata = blob.getBytes(1, (int) blob.length()); - chunkDataTO.setVomsAttributes(new String(bdata)); - } - chunkDataTO.setPrimaryKey(rs.getLong("rp.ID")); - chunkDataTO.setToSURL(rs.getString("rp.targetSURL")); - - chunkDataTO.setNormalizedStFN(rs - .getString("rp.normalized_targetSURL_StFN")); - int uniqueID = rs.getInt("rp.targetSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(Integer.valueOf(uniqueID)); - } - - chunkDataTO.setExpectedFileSize(rs.getLong("rp.expectedFileSize")); - chunkDataTO.setRequestToken(rs.getString("rq.r_token")); - chunkDataTO.setStatus(rs.getInt("sp.statusCode")); - results.add(chunkDataTO); - } - - return results; - } - - - - public synchronized List findActivePtPsOnSURLs(List surls){ - - if (surls == null || surls.isEmpty()){ - throw new IllegalArgumentException("cannot find active active " - + "PtPs for an empty or null list of SURLs!"); - } - - ResultSet rs = null; - PreparedStatement stat = null; - - try { - String query = "SELECT rq.ID, rq.r_token, rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.timeStamp, rq.pinLifetime, rq.fileLifetime, " - + "rq.s_token, rq.client_dn, rq.proxy, rp.ID, rp.targetSURL, rp.expectedFileSize, rp.normalized_targetSURL_StFN, rp.targetSURL_uniqueID, " - + "sp.statusCode " - + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " - + "ON (rp.request_queueID=rq.ID AND sp.request_PutID=rp.ID) " - + "WHERE ( rp.targetSURL in "+ makeSurlString((String[])surls.toArray()) +" )" - + "AND sp.statusCode = 24"; - - stat = con.prepareStatement(query); - printWarnings(con.getWarnings()); - - rs = stat.executeQuery(); - List results = chunkTOfromResultSet(rs); - - return results; - - } catch (SQLException e) { - - log.error("findActivePtPsOnSURLs(): SQL Error: {}", e.getMessage(),e); - return Collections.emptyList(); - - } finally { - close(rs); - close(stat); - } - } - - - public synchronized List findActivePtPsOnSURL(String surl) { - return findActivePtPsOnSURL(surl, null); - } - - public synchronized List findActivePtPsOnSURL(String surl, - String currentRequestToken) { - - if (surl == null || surl.isEmpty()) { - throw new IllegalArgumentException("cannot find active active " - + "PtPs for an empty or null SURL!"); - } - - ResultSet rs = null; - PreparedStatement stat = null; - - try { - - String query = "SELECT rq.ID, rq.r_token, rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.timeStamp, rq.pinLifetime, rq.fileLifetime, " - + "rq.s_token, rq.client_dn, rq.proxy, rp.ID, rp.targetSURL, rp.expectedFileSize, rp.normalized_targetSURL_StFN, rp.targetSURL_uniqueID, " - + "sp.statusCode " - + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " - + "ON (rp.request_queueID=rq.ID AND sp.request_PutID=rp.ID) " - + "WHERE ( rp.targetSURL = ? and sp.statusCode=24 )"; - - if (currentRequestToken != null){ - query += "AND rq.r_token != ?"; - } - - stat = con.prepareStatement(query); - printWarnings(con.getWarnings()); - - stat.setString(1, surl); - - if (currentRequestToken != null){ - stat.setString(2, currentRequestToken); - } - - rs = stat.executeQuery(); - List results = chunkTOfromResultSet(rs); - - return results; - - } catch (SQLException e) { - - log.error("findActivePtPsOnSURL(): SQL Error: {}", e.getMessage(),e); - return Collections.emptyList(); - - } finally { - close(rs); - close(stat); - } - - } - - private synchronized Collection find(int[] surlsUniqueIDs, - String[] surlsArray, String dn, boolean withDn) - throws IllegalArgumentException { - - if ((withDn && dn == null) || surlsUniqueIDs == null - || surlsUniqueIDs.length == 0 || surlsArray == null - || surlsArray.length == 0) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray + " withDn=" + withDn + " dn=" + dn); - } - if (!checkConnection()) { - log.error("PtP CHUNK DAO: find - unable to get a valid connection!"); - return Lists.newArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - try { - // get chunks of the request - String str = "SELECT rq.ID, rq.r_token, rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.timeStamp, rq.pinLifetime, rq.fileLifetime, " - + "rq.s_token, rq.client_dn, rq.proxy, rp.ID, rp.targetSURL, rp.expectedFileSize, rp.normalized_targetSURL_StFN, rp.targetSURL_uniqueID, " - + "sp.statusCode " - + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " - + "ON (rp.request_queueID=rq.ID AND sp.request_PutID=rp.ID) " - + "WHERE ( rp.targetSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlsUniqueIDs) - + " AND rp.targetSURL IN " - + makeSurlString(surlsArray) + " )"; - - if (withDn) { - str += " AND rq.client_dn=\'" + dn + "\'"; - } - - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - List list = Lists.newArrayList(); - - log.trace("PtP CHUNK DAO - find method: {}", find); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - PtPChunkDataTO chunkDataTO = null; - while (rs.next()) { - chunkDataTO = new PtPChunkDataTO(); - chunkDataTO.setFileStorageType(rs - .getString("rq.config_FileStorageTypeID")); - chunkDataTO.setOverwriteOption(rs.getString("rq.config_OverwriteID")); - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setPinLifetime(rs.getInt("rq.pinLifetime")); - chunkDataTO.setFileLifetime(rs.getInt("rq.fileLifetime")); - chunkDataTO.setSpaceToken(rs.getString("rq.s_token")); - chunkDataTO.setClientDN(rs.getString("rq.client_dn")); - - /** - * This code is only for the 1.3.18. This is a workaround to get FQANs - * using the proxy field on request_queue. The FE use the proxy field of - * request_queue to insert a single FQAN string containing all FQAN - * separated by the "#" char. The proxy is a BLOB, hence it has to be - * properly converted in string. - */ - java.sql.Blob blob = rs.getBlob("rq.proxy"); - if (!rs.wasNull() && blob != null) { - byte[] bdata = blob.getBytes(1, (int) blob.length()); - chunkDataTO.setVomsAttributes(new String(bdata)); - } - chunkDataTO.setPrimaryKey(rs.getLong("rp.ID")); - chunkDataTO.setToSURL(rs.getString("rp.targetSURL")); - - chunkDataTO.setNormalizedStFN(rs - .getString("rp.normalized_targetSURL_StFN")); - int uniqueID = rs.getInt("rp.targetSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(Integer.valueOf(uniqueID)); - } - - chunkDataTO.setExpectedFileSize(rs.getLong("rp.expectedFileSize")); - chunkDataTO.setRequestToken(rs.getString("rq.r_token")); - chunkDataTO.setStatus(rs.getInt("sp.statusCode")); - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("PTP CHUNK DAO: {}", e.getMessage(), e); - /* return empty Collection! */ - return Lists.newArrayList(); - } finally { - close(rs); - close(find); - } - } - - public synchronized List findProtocols(long requestQueueId) { - - if (!checkConnection()) { - log.error("PtP CHUNK DAO: find - unable to get a valid connection!"); - return Lists.newArrayList(); - } - String str = null; - PreparedStatement find = null; - ResultSet rs = null; - try { - str = "SELECT tp.config_ProtocolsID " - + "FROM request_TransferProtocols tp " + "WHERE tp.request_queueID=?"; - - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - List protocols = Lists.newArrayList(); - find.setLong(1, requestQueueId); - printWarnings(find.getWarnings()); - - log.trace("PtP CHUNK DAO - findProtocols method: {}", find); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - - while (rs.next()) { - protocols.add(rs.getString("tp.config_ProtocolsID")); - } - - return protocols; - } catch (SQLException e) { - log.error("PTP CHUNK DAO: {}", e.getMessage(), e); - /* return empty Collection! */ - return Lists.newArrayList(); - } finally { - close(rs); - close(find); - } - } - - private String buildExpainationSet(String explanation) { - - return " sp.explanation='" + explanation + "' "; - } - - private String buildTokenWhereClause(TRequestToken requestToken) { - - return " rq.r_token='" + requestToken.toString() + "' "; - } - - private String buildSurlsWhereClause(int[] surlsUniqueIDs, String[] surls) { - - return " ( rp.targetSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlsUniqueIDs) + " AND rp.targetSURL IN " - + makeSurlString(surls) + " ) "; - } - - /** - * Method that returns a String containing all Surl's IDs. - */ - private String makeSURLUniqueIDWhere(int[] surlUniqueIDs) { - - StringBuilder sb = new StringBuilder("("); - for (int i = 0; i < surlUniqueIDs.length; i++) { - if (i > 0) { - sb.append(","); - } - sb.append(surlUniqueIDs[i]); - } - sb.append(")"); - return sb.toString(); - } - - /** - * Method that returns a String containing all Surls. - */ - private String makeSurlString(String[] surls) { - - StringBuilder sb = new StringBuilder("("); - int n = surls.length; - - for (int i = 0; i < n; i++) { - - SURL requestedSURL; - - try { - requestedSURL = SURL.makeSURLfromString(surls[i]); - } catch (NamespaceException e) { - log.error(e.getMessage(), e); - log.debug("Skip '{}' during query creation", surls[i]); - continue; - } - - sb.append("'"); - sb.append(requestedSURL.getNormalFormAsString()); - sb.append("','"); - sb.append(requestedSURL.getQueryFormAsString()); - sb.append("'"); - - if (i < (n - 1)) { - sb.append(","); - } - } - - sb.append(")"); - return sb.toString(); - } - -} \ No newline at end of file diff --git a/src/main/java/it/grid/storm/catalogs/PtPChunkDataTO.java b/src/main/java/it/grid/storm/catalogs/PtPChunkDataTO.java deleted file mode 100644 index 9f1a5c7a3..000000000 --- a/src/main/java/it/grid/storm/catalogs/PtPChunkDataTO.java +++ /dev/null @@ -1,342 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.config.Configuration; -import it.grid.storm.namespace.model.Protocol; -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TStatusCode; - -import java.sql.Timestamp; -import java.util.List; - -/** - * Class that represents a row in the Persistence Layer: this is all raw data - * referring to the PtPChunkData proper, that is, String and primitive types. - * - * Each field is initialized with default values as per SRM 2.2 specification: - * protocolList GSIFTP fileStorageType VOLATILE overwriteMode NEVER status - * SRM_REQUEST_QUEUED - * - * All other fields are 0 if int, or a white space if String. - * - * @author EGRID ICTP - * @version 2.0 - * @date June 2005 - */ -public class PtPChunkDataTO { - - private static final String FQAN_SEPARATOR = "#"; - /* Database table request_Get fields BEGIN */ - private long primaryKey = -1; // ID primary key of status_Put record in DB - private String toSURL = " "; - private long expectedFileSize = 0; - private String normalizedStFN = null; - private Integer surlUniqueID = null; - /* Database table request_Get fields END */ - - private String requestToken = " "; - private int pinLifetime = -1; - private int fileLifetime = -1; - private String fileStorageType = null; // initialised in constructor - private String spaceToken = " "; - private List protocolList = null; // initialised in constructor - private String overwriteOption = null; // initialised in constructor - private int status; // initialised in constructor - private String errString = " "; - private String turl = " "; - private Timestamp timeStamp = null; - - private String clientDN = null; - private String vomsAttributes = null; - - - public PtPChunkDataTO() { - - this.fileStorageType = FileStorageTypeConverter.getInstance().toDB( - TFileStorageType.getTFileStorageType(Configuration.getInstance() - .getDefaultFileStorageType())); - TURLPrefix protocolPreferences = new TURLPrefix(); - protocolPreferences.addProtocol(Protocol.GSIFTP); - this.protocolList = TransferProtocolListConverter.toDB(protocolPreferences); - this.overwriteOption = OverwriteModeConverter.getInstance().toDB( - TOverwriteMode.NEVER); - this.status = StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_REQUEST_QUEUED); - } - - public long primaryKey() { - - return primaryKey; - } - - public void setPrimaryKey(long n) { - - primaryKey = n; - } - - public String requestToken() { - - return requestToken; - } - - public void setRequestToken(String s) { - - requestToken = s; - } - - public Timestamp timeStamp() { - - return timeStamp; - } - - public void setTimeStamp(Timestamp timeStamp) { - - this.timeStamp = timeStamp; - } - - public String toSURL() { - - return toSURL; - } - - public void setToSURL(String s) { - - toSURL = s; - } - - /** - * @return the normalizedStFN - */ - public String normalizedStFN() { - - return normalizedStFN; - } - - /** - * @param normalizedStFN - * the normalizedStFN to set - */ - public void setNormalizedStFN(String normalizedStFN) { - - this.normalizedStFN = normalizedStFN; - } - - /** - * @return the surlUniqueID - */ - public Integer surlUniqueID() { - - return surlUniqueID; - } - - /** - * @param surlUniqueID - * the surlUniqueID to set - */ - public void setSurlUniqueID(Integer surlUniqueID) { - - this.surlUniqueID = surlUniqueID; - } - - public int pinLifetime() { - - return pinLifetime; - } - - public void setPinLifetime(int n) { - - pinLifetime = n; - } - - public int fileLifetime() { - - return fileLifetime; - } - - public void setFileLifetime(int n) { - - fileLifetime = n; - } - - public String fileStorageType() { - - return fileStorageType; - } - - /** - * Method that sets the FileStorageType: if it is null nothing gets set. The - * deafult value is Permanent. - */ - public void setFileStorageType(String s) { - - if (s != null) - fileStorageType = s; - } - - public String spaceToken() { - - return spaceToken; - } - - public void setSpaceToken(String s) { - - spaceToken = s; - } - - public long expectedFileSize() { - - return expectedFileSize; - } - - public void setExpectedFileSize(long l) { - - expectedFileSize = l; - } - - public List protocolList() { - - return protocolList; - } - - public void setProtocolList(List l) { - - if ((l != null) && (!l.isEmpty())) - protocolList = l; - } - - public String overwriteOption() { - - return overwriteOption; - } - - /** - * Method that sets the OverwriteMode: if it is null nothing gets set. The - * deafult value is Never. - */ - public void setOverwriteOption(String s) { - - if (s != null) - overwriteOption = s; - } - - public int status() { - - return status; - } - - public void setStatus(int n) { - - status = n; - } - - public String errString() { - - return errString; - } - - public void setErrString(String s) { - - errString = s; - } - - public String transferURL() { - - return turl; - } - - public void setTransferURL(String s) { - - turl = s; - } - - public String clientDN() { - - return clientDN; - } - - public void setClientDN(String s) { - - clientDN = s; - } - - public String vomsAttributes() { - - return vomsAttributes; - } - - public void setVomsAttributes(String s) { - - vomsAttributes = s; - } - - public void setVomsAttributes(String[] fqaNsAsString) { - - vomsAttributes = ""; - for (int i = 0; i < fqaNsAsString.length; i++) { - vomsAttributes += fqaNsAsString[i]; - if (i < fqaNsAsString.length - 1) { - vomsAttributes += FQAN_SEPARATOR; - } - } - - } - - public String[] vomsAttributesArray() { - - return vomsAttributes.split(FQAN_SEPARATOR); - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append(primaryKey); - sb.append(" "); - sb.append(requestToken); - sb.append(" "); - sb.append(toSURL); - sb.append(" "); - sb.append(normalizedStFN); - sb.append(" "); - sb.append(surlUniqueID); - sb.append(" "); - sb.append(pinLifetime); - sb.append(" "); - sb.append(fileLifetime); - sb.append(" "); - sb.append(fileStorageType); - sb.append(" "); - sb.append(spaceToken); - sb.append(" "); - sb.append(expectedFileSize); - sb.append(" "); - sb.append(protocolList); - sb.append(" "); - sb.append(overwriteOption); - sb.append(" "); - sb.append(status); - sb.append(" "); - sb.append(errString); - sb.append(" "); - sb.append(turl); - return sb.toString(); - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/PtPData.java b/src/main/java/it/grid/storm/catalogs/PtPData.java deleted file mode 100644 index bd6ce75b7..000000000 --- a/src/main/java/it/grid/storm/catalogs/PtPData.java +++ /dev/null @@ -1,59 +0,0 @@ -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TSpaceToken; - -public interface PtPData extends FileTransferData { - - /** - * Method that returns the space token supplied for this chunk of the srm - * request. - */ - public TSpaceToken getSpaceToken(); - - /** - * Method that returns the requested pin life time for this chunk of the srm - * request. - */ - public TLifeTimeInSeconds pinLifetime(); - - /** - * Method that returns the requested file life time for this chunk of the srm - * request. - */ - public TLifeTimeInSeconds fileLifetime(); - - /** - * Method that returns the fileStorageType for this chunk of the srm request. - */ - public TFileStorageType fileStorageType(); - - /** - * Method that returns the knownSizeOfThisFile supplied with this chunk of the - * srm request. - */ - public TSizeInBytes expectedFileSize(); - - /** - * Method that returns the overwriteOption specified in the srm request. - */ - public TOverwriteMode overwriteOption(); - - /** - * Method that sets the status of this request to SRM_SPACE_AVAILABLE; it - * needs the explanation String which describes the situation in greater - * detail; if a null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_SPACE_AVAILABLE(String explanation); - - /** - * Method that sets the status of this request to SRM_DUPLICATION_ERROR; it - * needs the explanation String which describes the situation in greater - * detail; if a null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_DUPLICATION_ERROR(String explanation); - -} diff --git a/src/main/java/it/grid/storm/catalogs/PtPPersistentChunkData.java b/src/main/java/it/grid/storm/catalogs/PtPPersistentChunkData.java deleted file mode 100644 index beee449d0..000000000 --- a/src/main/java/it/grid/storm/catalogs/PtPPersistentChunkData.java +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS - * OF ANY KIND, either express or implied. See the License for the specific - * language governing permissions and limitations under the License. - */ - -package it.grid.storm.catalogs; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.griduser.GridUserInterface; -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TSpaceToken; -import it.grid.storm.srm.types.TTURL; - -/** - * This class represents a PrepareToPutChunkData, that is part of a multifile - * PrepareToPut srm request. It contains data about: the requestToken, the - * toSURL, the requested lifeTime of pinning, the requested lifetime of - * volatile, the requested fileStorageType and any available spaceToken, the - * expectedFileSize, the desired transferProtocols in order of preference, the - * overwriteOption to be applied in case the file already exists, the - * transferURL for the supplied SURL. - * - * @author EGRID - ICTP Trieste - * @date June, 2005 - * @version 2.0 - */ -public class PtPPersistentChunkData extends IdentityPtPData implements - PersistentChunkData { - - private static final Logger log = LoggerFactory - .getLogger(PtPPersistentChunkData.class); - - /** - * long representing the primary key for the persistence layer, in the - * status_Put table - */ - private long primaryKey = -1; - - /** - * This is the requestToken of the multifile srm request to which this chunk - * belongs - */ - private final TRequestToken requestToken; - - public PtPPersistentChunkData(GridUserInterface auth, - TRequestToken requestToken, TSURL toSURL, TLifeTimeInSeconds pinLifetime, - TLifeTimeInSeconds fileLifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TSizeInBytes expectedFileSize, - TURLPrefix transferProtocols, TOverwriteMode overwriteOption, - TReturnStatus status, TTURL transferURL) - throws InvalidPtPPersistentChunkDataAttributesException, - InvalidPtPDataAttributesException, - InvalidFileTransferDataAttributesException, - InvalidSurlRequestDataAttributesException { - - super(auth, toSURL, pinLifetime, fileLifetime, fileStorageType, spaceToken, - expectedFileSize, transferProtocols, overwriteOption, status, transferURL); - if (requestToken == null) { - log.debug("PtPPersistentChunkData: requestToken is null!"); - throw new InvalidPtPPersistentChunkDataAttributesException(requestToken, - toSURL, pinLifetime, fileLifetime, fileStorageType, spaceToken, - expectedFileSize, transferProtocols, overwriteOption, status, - transferURL); - } - this.requestToken = requestToken; - } - - /** - * Method used to get the primary key used in the persistence layer! - */ - @Override - public long getPrimaryKey() { - - return primaryKey; - } - - /** - * Method used to set the primary key to be used in the persistence layer! - */ - public void setPrimaryKey(long l) { - - primaryKey = l; - } - - /** - * Method that returns the requestToken of the srm request to which this chunk - * belongs. - */ - @Override - public TRequestToken getRequestToken() { - - return requestToken; - } - - @Override - public long getIdentifier() { - - return getPrimaryKey(); - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#hashCode() - */ - @Override - public int hashCode() { - - final int prime = 31; - int result = super.hashCode(); - result = prime * result + (int) (primaryKey ^ (primaryKey >>> 32)); - result = prime * result - + ((requestToken == null) ? 0 : requestToken.hashCode()); - return result; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#equals(java.lang.Object) - */ - @Override - public boolean equals(Object obj) { - - if (this == obj) { - return true; - } - if (!super.equals(obj)) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - PtPPersistentChunkData other = (PtPPersistentChunkData) obj; - if (primaryKey != other.primaryKey) { - return false; - } - if (requestToken == null) { - if (other.requestToken != null) { - return false; - } - } else if (!requestToken.equals(other.requestToken)) { - return false; - } - return true; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder.append("PtPPersistentChunkData [primaryKey="); - builder.append(primaryKey); - builder.append(", requestToken="); - builder.append(requestToken); - builder.append(", spaceToken="); - builder.append(spaceToken); - builder.append(", pinLifetime="); - builder.append(pinLifetime); - builder.append(", fileLifetime="); - builder.append(fileLifetime); - builder.append(", fileStorageType="); - builder.append(fileStorageType); - builder.append(", overwriteOption="); - builder.append(overwriteOption); - builder.append(", expectedFileSize="); - builder.append(expectedFileSize); - builder.append(", transferProtocols="); - builder.append(transferProtocols); - builder.append(", SURL="); - builder.append(SURL); - builder.append(", status="); - builder.append(status); - builder.append(", transferURL="); - builder.append(transferURL); - builder.append("]"); - return builder.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/ReducedBoLChunkData.java b/src/main/java/it/grid/storm/catalogs/ReducedBoLChunkData.java deleted file mode 100644 index 461a5d8fe..000000000 --- a/src/main/java/it/grid/storm/catalogs/ReducedBoLChunkData.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TStatusCode; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class represents a ReducedBringOnLineChunkData, that is part of a - * multifile PrepareToGet srm request. It is closely related to BoLChunkData but - * it is called Reduced because it only contains the fromSURL, the current - * TReturnStatus, and the primary key of the request. - * - * This class is intended to be used by srmReleaseFiles, where only a limited - * amunt of information is needed instead of full blown BoLChunkData. - * - * @author CNAF - * @date Aug 2009 - * @version 1.0 - */ -public class ReducedBoLChunkData implements ReducedChunkData { - - @SuppressWarnings("unused") - private static final Logger log = LoggerFactory - .getLogger(ReducedBoLChunkData.class); - - private long primaryKey = -1; // long representing the primary key for the - // persistence layer! - private TSURL fromSURL; // SURL that the srm command wants to get - private TReturnStatus status; // return status for this chunk of request - - public ReducedBoLChunkData(TSURL fromSURL, TReturnStatus status) - throws InvalidReducedBoLChunkDataAttributesException { - - boolean ok = status != null && fromSURL != null; - if (!ok) { - throw new InvalidReducedBoLChunkDataAttributesException(fromSURL, status); - } - this.fromSURL = fromSURL; - this.status = status; - } - - @Override - public boolean equals(Object o) { - - if (o == this) { - return true; - } - if (!(o instanceof ReducedBoLChunkData)) { - return false; - } - ReducedBoLChunkData cd = (ReducedBoLChunkData) o; - return (primaryKey == cd.primaryKey) && fromSURL.equals(cd.fromSURL) - && status.equals(cd.status); - } - - /** - * Method that returns the fromSURL of the srm request to which this chunk - * belongs. - */ - public TSURL fromSURL() { - - return fromSURL; - } - - @Override - public int hashCode() { - - int hash = 17; - hash = 37 * hash + new Long(primaryKey).hashCode(); - hash = 37 * hash + fromSURL.hashCode(); - hash = 37 * hash + status.hashCode(); - return hash; - } - - public boolean isPinned() { - - if (status.getStatusCode() == TStatusCode.SRM_SUCCESS) { - return true; - } - return false; - } - - /** - * Method used to get the primary key used in the persistence layer! - */ - public long primaryKey() { - - return primaryKey; - } - - /** - * Method used to set the primary key to be used in the persistence layer! - */ - public void setPrimaryKey(long l) { - - primaryKey = l; - } - - /** - * Method that returns the status for this chunk of the srm request. - */ - public TReturnStatus status() { - - return status; - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("ReducedBoLChunkData\n"); - sb.append("primaryKey="); - sb.append(primaryKey); - sb.append("; "); - sb.append("fromSURL="); - sb.append(fromSURL); - sb.append("; "); - sb.append("status="); - sb.append(status); - sb.append("."); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/ReducedBoLChunkDataTO.java b/src/main/java/it/grid/storm/catalogs/ReducedBoLChunkDataTO.java deleted file mode 100644 index 5e6a3502d..000000000 --- a/src/main/java/it/grid/storm/catalogs/ReducedBoLChunkDataTO.java +++ /dev/null @@ -1,133 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TStatusCode; - -/** - * Class that represents some of the fields in a row in the Persistence Layer: - * this is all raw data referring to the ReducedBoLChunkData proper, that is - * String and primitive types. - * - * @author EGRID ICTP - * @version 1.0 - * @date November, 2006 - */ -public class ReducedBoLChunkDataTO { - - private long primaryKey = -1; // ID primary key of record in DB - private String fromSURL = " "; - private String normalizedStFN = null; - private Integer surlUniqueID = null; - - private int status = StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_REQUEST_QUEUED); - private String errString = " "; - - public String errString() { - - return errString; - } - - public String fromSURL() { - - return fromSURL; - } - - public long primaryKey() { - - return primaryKey; - } - - public void setErrString(String s) { - - errString = s; - } - - public void setFromSURL(String s) { - - fromSURL = s; - } - - public void setPrimaryKey(long n) { - - primaryKey = n; - } - - public void setStatus(int n) { - - status = n; - } - - public int status() { - - return status; - } - - /** - * @param normalizedStFN - * the normalizedStFN to set - */ - public void setNormalizedStFN(String normalizedStFN) { - - this.normalizedStFN = normalizedStFN; - } - - /** - * @return the normalizedStFN - */ - public String normalizedStFN() { - - return normalizedStFN; - } - - /** - * @param surlUniqueID - * the sURLUniqueID to set - */ - public void setSurlUniqueID(Integer surlUniqueID) { - - this.surlUniqueID = surlUniqueID; - } - - /** - * @return the sURLUniqueID - */ - public Integer surlUniqueID() { - - return surlUniqueID; - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append(primaryKey); - sb.append(" "); - sb.append(fromSURL); - sb.append(" "); - sb.append(normalizedStFN); - sb.append(" "); - sb.append(surlUniqueID); - sb.append(" "); - sb.append(status); - sb.append(" "); - sb.append(errString); - sb.append(" "); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/ReducedCopyChunkData.java b/src/main/java/it/grid/storm/catalogs/ReducedCopyChunkData.java deleted file mode 100644 index a98d06d9d..000000000 --- a/src/main/java/it/grid/storm/catalogs/ReducedCopyChunkData.java +++ /dev/null @@ -1,141 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; - -/** - * This class represents a ReducedCopyChunkData, that is part of a multifile - * Copy srm request. It contains data about: the requestToken, the fromSURL, the - * toSURL, return status of the file together with its error string. - * - * @author Michele Dibenedetto - */ -public class ReducedCopyChunkData { - - /* long representing the primary key for the persistence layer! */ - private long primaryKey = -1; - /* SURL from which the srmCopy will get the file */ - private TSURL fromSURL; - /* SURL to which the srmCopy will put the file */ - private TSURL toSURL; - /* Return status for this chunk of request */ - private TReturnStatus status; - - public ReducedCopyChunkData(TSURL fromSURL, TSURL toSURL, TReturnStatus status) - throws InvalidReducedCopyChunkDataAttributesException { - - if (fromSURL == null || toSURL == null || status == null) { - throw new InvalidReducedCopyChunkDataAttributesException(fromSURL, - toSURL, status); - } - - this.fromSURL = fromSURL; - this.toSURL = toSURL; - this.status = status; - } - - /** - * Method used to get the primary key used in the persistence layer! - */ - public long primaryKey() { - - return primaryKey; - } - - /** - * Method used to set the primary key to be used in the persistence layer! - */ - public void setPrimaryKey(long l) { - - primaryKey = l; - } - - /** - * Method that returns the fromSURL of the srm request to which this chunk - * belongs. - */ - public TSURL fromSURL() { - - return fromSURL; - } - - /** - * Method that returns the toSURL of the srm request to which this chunk - * belongs. - */ - public TSURL toSURL() { - - return toSURL; - } - - /** - * Method that returns the status for this chunk of the srm request. - */ - public TReturnStatus status() { - - return status; - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("CopyChunkData\n"); - sb.append("primaryKey="); - sb.append(primaryKey); - sb.append("; "); - sb.append("RequestToken="); - sb.append("fromSURL="); - sb.append(fromSURL); - sb.append("; "); - sb.append("toSURL="); - sb.append(toSURL); - sb.append("; "); - sb.append("status="); - sb.append(status); - sb.append("; "); - return sb.toString(); - } - - @Override - public int hashCode() { - - int hash = 17; - hash = 37 * hash + new Long(primaryKey).hashCode(); - hash = 37 * hash + fromSURL.hashCode(); - hash = 37 * hash + toSURL.hashCode(); - hash = 37 * hash + status.hashCode(); - return hash; - } - - @Override - public boolean equals(Object o) { - - if (o == this) { - return true; - } - if (!(o instanceof ReducedCopyChunkData)) { - return false; - } - ReducedCopyChunkData cd = (ReducedCopyChunkData) o; - return (primaryKey == cd.primaryKey) && fromSURL.equals(cd.fromSURL) - && toSURL.equals(cd.toSURL) && status.equals(cd.status); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/ReducedCopyChunkDataTO.java b/src/main/java/it/grid/storm/catalogs/ReducedCopyChunkDataTO.java deleted file mode 100644 index 7ff37389d..000000000 --- a/src/main/java/it/grid/storm/catalogs/ReducedCopyChunkDataTO.java +++ /dev/null @@ -1,188 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TStatusCode; - -/** - * Class that represents some of the fields in a row in the Persistence Layer: - * this is all raw data referring to the ReducedCopyChunkData proper, that is - * String and primitive types. - * - * All other fields are 0 if int, or a white space if String. - * - * @author Michele Dibenedetto - */ -public class ReducedCopyChunkDataTO { - - /* Database table request_Get fields BEGIN */ - private long primaryKey = -1; // ID primary key of record in DB - private String fromSURL = " "; - private String normalizedSourceStFN = null; - private Integer sourceSurlUniqueID = null; - private String toSURL = " "; - private String normalizedTargetStFN = null; - private Integer targetSurlUniqueID = null; - /* Database table request_Get fields END */ - - private int status = StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_REQUEST_QUEUED); - private String errString = " "; - - public long primaryKey() { - - return primaryKey; - } - - public void setPrimaryKey(long n) { - - primaryKey = n; - } - - public String fromSURL() { - - return fromSURL; - } - - public void setFromSURL(String s) { - - fromSURL = s; - } - - /** - * @return the normalizedStFN - */ - public String normalizedSourceStFN() { - - return normalizedSourceStFN; - } - - /** - * @param normalizedStFN - * the normalizedStFN to set - */ - public void setNormalizedSourceStFN(String normalizedStFN) { - - this.normalizedSourceStFN = normalizedStFN; - } - - /** - * @return the surlUniqueID - */ - public Integer sourceSurlUniqueID() { - - return sourceSurlUniqueID; - } - - /** - * @param surlUniqueID - * the surlUniqueID to set - */ - public void setSourceSurlUniqueID(Integer surlUniqueID) { - - this.sourceSurlUniqueID = surlUniqueID; - } - - public String toSURL() { - - return toSURL; - } - - public void setToSURL(String s) { - - toSURL = s; - } - - /** - * @return the normalizedStFN - */ - public String normalizedTargetStFN() { - - return normalizedTargetStFN; - } - - /** - * @param normalizedStFN - * the normalizedStFN to set - */ - public void setNormalizedTargetStFN(String normalizedStFN) { - - this.normalizedTargetStFN = normalizedStFN; - } - - /** - * @return the surlUniqueID - */ - public Integer targetSurlUniqueID() { - - return targetSurlUniqueID; - } - - /** - * @param surlUniqueID - * the surlUniqueID to set - */ - public void setTargetSurlUniqueID(Integer surlUniqueID) { - - this.targetSurlUniqueID = surlUniqueID; - } - - public int status() { - - return status; - } - - public void setStatus(int n) { - - status = n; - } - - public String errString() { - - return errString; - } - - public void setErrString(String s) { - - errString = s; - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append(primaryKey); - sb.append(" "); - sb.append(fromSURL); - sb.append(" "); - sb.append(normalizedSourceStFN); - sb.append(" "); - sb.append(sourceSurlUniqueID); - sb.append(" "); - sb.append(toSURL); - sb.append(" "); - sb.append(normalizedTargetStFN); - sb.append(" "); - sb.append(targetSurlUniqueID); - sb.append(" "); - sb.append(status); - sb.append(" "); - sb.append(errString); - sb.append(" "); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/ReducedPtGChunkData.java b/src/main/java/it/grid/storm/catalogs/ReducedPtGChunkData.java deleted file mode 100644 index 5446a4257..000000000 --- a/src/main/java/it/grid/storm/catalogs/ReducedPtGChunkData.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TStatusCode; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class represents a ReducedPrepareToGetChunkData, that is part of a - * multifile PrepareToGet srm request. It is closely related to PtGChunkData but - * it is called Reduced because it only contains the fromSURL, the current - * TReturnStatus, and the primary key of the request. - * - * This class is intended to be used by srmReleaseFiles, where only a limited - * amunt of information is needed instead of full blown PtGChunkData. - * - * @author EGRID - ICTP Trieste - * @date November, 2006 - * @version 1.0 - */ -public class ReducedPtGChunkData implements ReducedChunkData { - - @SuppressWarnings("unused") - private static final Logger log = LoggerFactory - .getLogger(ReducedPtGChunkData.class); - - private long primaryKey = -1; // long representing the primary key for the - // persistence layer! - private TSURL fromSURL; // SURL that the srm command wants to get - private TReturnStatus status; // return status for this chunk of request - - public ReducedPtGChunkData(TSURL fromSURL, TReturnStatus status) - throws InvalidReducedPtGChunkDataAttributesException { - - if (status == null || fromSURL == null) { - throw new InvalidReducedPtGChunkDataAttributesException(fromSURL, status); - } - this.fromSURL = fromSURL; - this.status = status; - } - - /** - * Method that returns the fromSURL of the srm request to which this chunk - * belongs. - */ - public TSURL fromSURL() { - - return fromSURL; - } - - @Override - public int hashCode() { - - int hash = 17; - hash = 37 * hash + new Long(primaryKey).hashCode(); - hash = 37 * hash + fromSURL.hashCode(); - hash = 37 * hash + status.hashCode(); - return hash; - } - - public boolean isPinned() { - - if (status.getStatusCode() == TStatusCode.SRM_FILE_PINNED) { - return true; - } - return false; - } - - /** - * Method used to get the primary key used in the persistence layer! - */ - public long primaryKey() { - - return primaryKey; - } - - /** - * Method used to set the primary key to be used in the persistence layer! - */ - public void setPrimaryKey(long l) { - - primaryKey = l; - } - - /** - * Method that returns the status for this chunk of the srm request. - */ - public TReturnStatus status() { - - return status; - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("ReducedPtGChunkData\n"); - sb.append("primaryKey="); - sb.append(primaryKey); - sb.append("; "); - sb.append("fromSURL="); - sb.append(fromSURL); - sb.append("; "); - sb.append("status="); - sb.append(status); - sb.append("."); - return sb.toString(); - } - - @Override - public boolean equals(Object o) { - - if (o == this) { - return true; - } - if (!(o instanceof ReducedPtGChunkData)) { - return false; - } - ReducedPtGChunkData cd = (ReducedPtGChunkData) o; - return (primaryKey == cd.primaryKey) && fromSURL.equals(cd.fromSURL) - && status.equals(cd.status); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/ReducedPtGChunkDataTO.java b/src/main/java/it/grid/storm/catalogs/ReducedPtGChunkDataTO.java deleted file mode 100644 index a73a3b651..000000000 --- a/src/main/java/it/grid/storm/catalogs/ReducedPtGChunkDataTO.java +++ /dev/null @@ -1,133 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TStatusCode; - -/** - * Class that represents some of the fileds in a row in the Persistence Layer: - * this is all raw data referring to the ReducedPtGChunkData proper, that is - * String and primitive types. - * - * @author EGRID ICTP - * @version 1.0 - * @date November, 2006 - */ -public class ReducedPtGChunkDataTO { - - private long primaryKey = -1; // ID primary key of record in DB - private String fromSURL = " "; - private String normalizedStFN = null; - private Integer surlUniqueID = null; - - private int status = StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_REQUEST_QUEUED); - private String errString = " "; - - public long primaryKey() { - - return primaryKey; - } - - public void setPrimaryKey(long n) { - - primaryKey = n; - } - - public String fromSURL() { - - return fromSURL; - } - - public void setFromSURL(String s) { - - fromSURL = s; - } - - /** - * @param normalizedStFN - * the normalizedStFN to set - */ - public void setNormalizedStFN(String normalizedStFN) { - - this.normalizedStFN = normalizedStFN; - } - - /** - * @return the normalizedStFN - */ - public String normalizedStFN() { - - return normalizedStFN; - } - - /** - * @param surlUniqueID - * the sURLUniqueID to set - */ - public void setSurlUniqueID(Integer surlUniqueID) { - - this.surlUniqueID = surlUniqueID; - } - - /** - * @return the sURLUniqueID - */ - public Integer surlUniqueID() { - - return surlUniqueID; - } - - public int status() { - - return status; - } - - public void setStatus(int n) { - - status = n; - } - - public String errString() { - - return errString; - } - - public void setErrString(String s) { - - errString = s; - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append(primaryKey); - sb.append(" "); - sb.append(fromSURL); - sb.append(" "); - sb.append(normalizedStFN); - sb.append(" "); - sb.append(surlUniqueID); - sb.append(" "); - sb.append(status); - sb.append(" "); - sb.append(errString); - sb.append(" "); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/ReducedPtPChunkData.java b/src/main/java/it/grid/storm/catalogs/ReducedPtPChunkData.java deleted file mode 100644 index ac90058c6..000000000 --- a/src/main/java/it/grid/storm/catalogs/ReducedPtPChunkData.java +++ /dev/null @@ -1,175 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class represents a ReducedPrepareToPutChunkData, that is part of a - * multifile PrepareToPut srm request. It is closely related to PtPChunkData but - * it is called Reduced because it only contains the toSURL, the current - * TReturnStatus, the TFileStorageType, the FileLifeTime in case of Volatile, - * the VomsGridUser limited to the DN, and the primary key of the request. - * - * This class is intended to be used by srmPutDone, where only a limited amount - * of information is needed instead of full blown PtPChunkData. It is also used - * by the automatic handlnig of non invoked srmPutDone, during transition to - * SRM_FILE_LIFETIME_EXPIRED. - * - * @author EGRID - ICTP Trieste - * @date January, 2007 - * @version 2.0 - */ -public class ReducedPtPChunkData { - - @SuppressWarnings("unused") - private static final Logger log = LoggerFactory - .getLogger(ReducedPtPChunkData.class); - - private long primaryKey = -1; // long representing the primary key for the - // persistence layer! - private TSURL toSURL; // SURL that the srm command wants to get - private TReturnStatus status; // return status for this chunk of request - private TFileStorageType fileStorageType; // fileStorageType of this shunk of - // the request - private TLifeTimeInSeconds fileLifetime; // requested lifetime for SURL in - // case of Volatile entry. - - public ReducedPtPChunkData(TSURL toSURL, TReturnStatus status, - TFileStorageType fileStorageType, TLifeTimeInSeconds fileLifetime) - throws InvalidReducedPtPChunkDataAttributesException { - - if (status == null || toSURL == null || fileStorageType == null - || fileLifetime == null) { - throw new InvalidReducedPtPChunkDataAttributesException(toSURL, status, - fileStorageType, fileLifetime); - } - this.toSURL = toSURL; - this.status = status; - this.fileStorageType = fileStorageType; - this.fileLifetime = fileLifetime; - } - - /** - * Method used to get the primary key used in the persistence layer! - */ - public long primaryKey() { - - return primaryKey; - } - - /** - * Method used to set the primary key to be used in the persistence layer! - */ - public void setPrimaryKey(long l) { - - primaryKey = l; - } - - /** - * Method that returns the toSURL of the srm request to which this chunk - * belongs. - */ - public TSURL toSURL() { - - return toSURL; - } - - /** - * Method that returns the status for this chunk of the srm request. - */ - public TReturnStatus status() { - - return status; - } - - /** - * Method that returns the TFileStorageType of the srm request to which this - * chunk belongs. - */ - public TFileStorageType fileStorageType() { - - return fileStorageType; - } - - /** - * Method that returns the fileLifetime of the srm request to which this chunk - * belongs. - */ - public TLifeTimeInSeconds fileLifetime() { - - return fileLifetime; - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("ReducedPtPChunkData\n"); - sb.append("primaryKey="); - sb.append(primaryKey); - sb.append("; "); - sb.append("toSURL="); - sb.append(toSURL); - sb.append("; "); - sb.append("status="); - sb.append(status); - sb.append(";"); - sb.append("fileStorageType="); - sb.append(fileStorageType); - sb.append(";"); - sb.append("fileLifetime="); - sb.append(fileLifetime); - sb.append("."); - return sb.toString(); - } - - @Override - public int hashCode() { - - int hash = 17; - hash = 37 * hash + new Long(primaryKey).hashCode(); - hash = 37 * hash + toSURL.hashCode(); - hash = 37 * hash + status.hashCode(); - hash = 37 * hash + fileStorageType.hashCode(); - hash = 37 * hash + fileLifetime.hashCode(); - return hash; - } - - @Override - public boolean equals(Object o) { - - if (o == this) { - return true; - } - if (!(o instanceof ReducedPtPChunkData)) { - return false; - } - ReducedPtPChunkData cd = (ReducedPtPChunkData) o; - return (primaryKey == cd.primaryKey) && toSURL.equals(cd.toSURL) - && status.equals(cd.status) && fileStorageType.equals(cd.fileStorageType) - && fileLifetime.equals(cd.fileLifetime); - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/ReducedPtPChunkDataTO.java b/src/main/java/it/grid/storm/catalogs/ReducedPtPChunkDataTO.java deleted file mode 100644 index e83eb0aa1..000000000 --- a/src/main/java/it/grid/storm/catalogs/ReducedPtPChunkDataTO.java +++ /dev/null @@ -1,164 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TStatusCode; -import it.grid.storm.srm.types.TFileStorageType; - -/** - * Class that represents some of the fields in a row in the Persistence Layer: - * this is all raw data referring to the ReducedPtPChunkData proper, that is - * String and primitive types. - * - * @author EGRID ICTP - * @version 1.0 - * @date January, 2007 - */ -public class ReducedPtPChunkDataTO { - - private long primaryKey = -1; // ID primary key of record in DB - private String toSURL = " "; - private String normalizedStFN = null; - private Integer surlUniqueID = null; - - private int status = StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_REQUEST_QUEUED); - private String errString = " "; - private String fileStorageType = FileStorageTypeConverter.getInstance().toDB( - TFileStorageType.VOLATILE); - private int fileLifetime = -1; - - public long primaryKey() { - - return primaryKey; - } - - public void setPrimaryKey(long n) { - - primaryKey = n; - } - - public String toSURL() { - - return toSURL; - } - - public void setToSURL(String s) { - - toSURL = s; - } - - /** - * @param normalizedStFN - * the normalizedStFN to set - */ - public void setNormalizedStFN(String normalizedStFN) { - - this.normalizedStFN = normalizedStFN; - } - - /** - * @return the normalizedStFN - */ - public String normalizedStFN() { - - return normalizedStFN; - } - - /** - * @param surlUniqueID - * the sURLUniqueID to set - */ - public void setSurlUniqueID(Integer surlUniqueID) { - - this.surlUniqueID = surlUniqueID; - } - - /** - * @return the sURLUniqueID - */ - public Integer surlUniqueID() { - - return surlUniqueID; - } - - public int status() { - - return status; - } - - public void setStatus(int n) { - - status = n; - } - - public String errString() { - - return errString; - } - - public void setErrString(String s) { - - errString = s; - } - - public String fileStorageType() { - - return fileStorageType; - } - - /** - * Method that sets the FileStorageType: if it is null nothing gets set. The - * deafult value is Volatile. - */ - public void setFileStorageType(String s) { - - if (s != null) - fileStorageType = s; - } - - public int fileLifetime() { - - return fileLifetime; - } - - public void setFileLifetime(int n) { - - fileLifetime = n; - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append(primaryKey); - sb.append(" "); - sb.append(toSURL); - sb.append(" "); - sb.append(normalizedStFN); - sb.append(" "); - sb.append(surlUniqueID); - sb.append(" "); - sb.append(status); - sb.append(" "); - sb.append(errString); - sb.append(" "); - sb.append(fileStorageType); - sb.append(" "); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/RequestSummaryCatalog.java b/src/main/java/it/grid/storm/catalogs/RequestSummaryCatalog.java index cc81d144e..afd68faf6 100644 --- a/src/main/java/it/grid/storm/catalogs/RequestSummaryCatalog.java +++ b/src/main/java/it/grid/storm/catalogs/RequestSummaryCatalog.java @@ -17,12 +17,32 @@ package it.grid.storm.catalogs; -import it.grid.storm.catalogs.timertasks.RequestsGarbageCollector; +import static it.grid.storm.srm.types.TRequestType.PREPARE_TO_GET; +import static it.grid.storm.srm.types.TRequestType.PREPARE_TO_PUT; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; + import it.grid.storm.common.types.TimeUnit; -import it.grid.storm.config.Configuration; import it.grid.storm.griduser.FQAN; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.GridUserManager; +import it.grid.storm.persistence.converter.PinLifetimeConverter; +import it.grid.storm.persistence.converter.RequestTypeConverter; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.dao.RequestSummaryDAO; +import it.grid.storm.persistence.exceptions.InvalidRequestSummaryDataAttributesException; +import it.grid.storm.persistence.exceptions.MalformedGridUserException; +import it.grid.storm.persistence.impl.mysql.RequestSummaryDAOMySql; +import it.grid.storm.persistence.model.RequestSummaryData; +import it.grid.storm.persistence.model.RequestSummaryDataTO; import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException; import it.grid.storm.srm.types.TLifeTimeInSeconds; import it.grid.storm.srm.types.TRequestToken; @@ -31,21 +51,11 @@ import it.grid.storm.srm.types.TSURL; import it.grid.storm.srm.types.TStatusCode; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Timer; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.collect.Lists; - /** - * Class that represents the RequestSummaryCatalog of StoRM. The rows in the - * catalog are called RequestSummaryData. Methods are provided to: look up newly - * added requests as identified by their SRM_REQUEST_QUEUED status, to update - * the global status of the request, and to fail a request with SRM_FAILURE. + * Class that represents the RequestSummaryCatalog of StoRM. The rows in the catalog are called + * RequestSummaryData. Methods are provided to: look up newly added requests as identified by their + * SRM_REQUEST_QUEUED status, to update the global status of the request, and to fail a request with + * SRM_FAILURE. * * @author EGRID - ICTP Trieste * @version 2.0 @@ -53,389 +63,306 @@ */ public class RequestSummaryCatalog { - private static final Logger log = LoggerFactory - .getLogger(RequestSummaryCatalog.class); - /** Only instance of RequestSummaryCatalog for StoRM! */ - private static RequestSummaryCatalog cat = new RequestSummaryCatalog(); - /** WARNING!!! TO BE MODIFIED WITH FACTORY!!! */ - private final RequestSummaryDAO dao = RequestSummaryDAO.getInstance(); - /** timer thread that will run a task to clean */ - private Timer clock = null; - /** configuration instance **/ - private final Configuration config = Configuration.getInstance(); - - private RequestSummaryCatalog() { - - clock = new Timer(); - - clock.schedule( - new RequestsGarbageCollector(clock, - config.getRequestPurgerPeriod() * 1000), - config.getRequestPurgerDelay() * 1000); - } - - /** - * Method that returns the only instance of RequestSummaryCatalog present in - * StoRM. - */ - public static RequestSummaryCatalog getInstance() { - - return RequestSummaryCatalog.cat; - } - - /** - * Method in charge of retrieving RequestSummaryData associated to new - * requests, that is those found in SRM_REQUETS_QUEUED global status; such - * requests then transit into SRM_SUCCESS. The actual number of fetched - * requests depends on the configured ceiling. - * - * If no new request is found, an empty Collection is returned. if a request - * is malformed, then that request is failed and an attempt is made to signal - * such occurrence in the DB. Only correctly formed requests are returned. - */ - synchronized public Collection fetchNewRequests( - int capacity) { - - List list = Lists.newArrayList(); - - Collection c = dao.findNew(capacity); - if (c == null || c.isEmpty()) { - return list; - } - int fetched = c.size(); - log.debug("REQUEST SUMMARY CATALOG: {} new requests picked up.", fetched); - for (RequestSummaryDataTO auxTO : c) { - RequestSummaryData aux = null; - try { - aux = makeOne(auxTO); - } catch (IllegalArgumentException e) { - log.error("REQUEST SUMMARY CATALOG: Failure while performing makeOne " - + "operation. IllegalArgumentException: {}", e.getMessage(), e); - continue; - } - if (aux != null) { - log.debug("REQUEST SUMMARY CATALOG: {} associated to {} included " - + "for processing", aux.requestToken(), aux.gridUser().getDn()); - list.add(aux); - } - } - int ret = list.size(); - if (ret < fetched) { - log.warn("REQUEST SUMMARY CATALOG: including {} requests for processing, " - + "since the dropped ones were malformed!", ret); - } else { - log.debug("REQUEST SUMMARY CATALOG: including for processing all {} " - + "requests.", ret); - } - if (!list.isEmpty()) { - log.debug("REQUEST SUMMARY CATALOG: returning {}\n\n", list); - } - return list; - } - - /** - * Private method used to create a RequestSummaryData object, from a - * RequestSummaryDataTO. If a chunk cannot be created, an error messagge gets - * logged and an attempt is made to signal in the DB that the request is - * malformed. - */ - private RequestSummaryData makeOne(RequestSummaryDataTO to) - throws IllegalArgumentException { - - TRequestType auxrtype = RequestTypeConverter.getInstance().toSTORM( - to.requestType()); - if (auxrtype == TRequestType.EMPTY) { - StringBuilder sb = new StringBuilder(); - sb.append("TRequestType could not be created from its String representation "); - sb.append(to.requestType()); - sb.append("\n"); - log.warn(sb.toString()); - throw new IllegalArgumentException( - "Invalid TRequestType in the provided RequestSummaryDataTO"); - } - TRequestToken auxrtoken; - try { - auxrtoken = new TRequestToken(to.requestToken(), to.timestamp()); - } catch (InvalidTRequestTokenAttributesException e) { - log.warn("Unable to create TRequestToken from RequestSummaryDataTO. " - + "InvalidTRequestTokenAttributesException: {}", e.getMessage()); - throw new IllegalArgumentException( - "Unable to create TRequestToken from RequestSummaryDataTO."); - } - GridUserInterface auxgu; - - try { - auxgu = loadVomsGridUser(to.clientDN(), to.vomsAttributes()); - } catch (MalformedGridUserException e) { - StringBuilder sb = new StringBuilder(); - sb.append("VomsGridUser could not be created from DN String "); - sb.append(to.clientDN()); - sb.append(" voms attributes String "); - sb.append(to.vomsAttributes()); - sb.append(" and from request token String "); - sb.append(to.requestToken()); - log.warn("{}. MalformedGridUserException: {}", sb.toString(), e.getMessage()); - throw new IllegalArgumentException( - "Unable to load Voms Grid User from RequestSummaryDataTO. " - + "MalformedGridUserException: " + e.getMessage()); - } - RequestSummaryData data = null; - try { - data = new RequestSummaryData(auxrtype, auxrtoken, auxgu); - data.setPrimaryKey(to.primaryKey()); - } catch (InvalidRequestSummaryDataAttributesException e) { - dao.failRequest(to.primaryKey(), "The request data is malformed!"); - log.warn("REQUEST SUMMARY CATALOG! Unable to create RequestSummaryData. " - + "InvalidRequestSummaryDataAttributesException: {}", e.getMessage(), e); - throw new IllegalArgumentException("Unable to reate RequestSummaryData"); - } - TReturnStatus status = null; - if (to.getStatus() != null) { - TStatusCode code = StatusCodeConverter.getInstance().toSTORM(to.getStatus()); - if (code == TStatusCode.EMPTY) { - log.warn("RequestSummaryDataTO retrieved StatusCode was not " - + "recognised: {}", to.getStatus()); - } else { - status = new TReturnStatus(code, to.getErrstring()); - } - } - data.setUserToken(to.getUserToken()); - data.setRetrytime(to.getRetrytime()); - if (to.getPinLifetime() != null) { - data.setPinLifetime(TLifeTimeInSeconds.make(PinLifetimeConverter - .getInstance().toStoRM(to.getPinLifetime()), TimeUnit.SECONDS)); - } - data.setSpaceToken(to.getSpaceToken()); - data.setStatus(status); - data.setErrstring(to.getErrstring()); - data.setRemainingTotalTime(to.getRemainingTotalTime()); - data.setNbreqfiles(to.getNbreqfiles()); - data.setNumOfCompleted(to.getNumOfCompleted()); - if (to.getFileLifetime() != null) { - data.setFileLifetime(TLifeTimeInSeconds.make(to.getFileLifetime(), - TimeUnit.SECONDS)); - } - - data.setDeferredStartTime(to.getDeferredStartTime()); - data.setNumOfWaiting(to.getNumOfWaiting()); - data.setNumOfFailed(to.getNumOfFailed()); - data.setRemainingDeferredStartTime(to.getRemainingDeferredStartTime()); - return data; - } - - /** - * Private method that holds the logic for creating a VomsGridUser from - * persistence and to load any available Proxy. For the moment the VOMS - * attributes present in persistence are NOT loaded! - */ - private GridUserInterface loadVomsGridUser(String dn, String fqansString) throws MalformedGridUserException { - - log.debug("load VomsGridUser for dn='{}' and fqansString='{}'", dn, fqansString); - - if (dn == null) { - throw new MalformedGridUserException("Invalid null DN"); - } - if (fqansString == null || fqansString.isEmpty()) { - return GridUserManager.makeGridUser(dn); - } - - FQAN[] fqans = new FQAN[fqansString.split("#").length]; - int i = 0; - for (String fqan: fqansString.split("#")) { - fqans[i++] = new FQAN(fqan); - } - try { - return GridUserManager.makeVOMSGridUser(dn, fqans); - } catch (IllegalArgumentException e) { - log.error("Unexpected error on voms grid user creation. " - + "IllegalArgumentException: {}", e.getMessage(), e); - throw new MalformedGridUserException(e.getMessage()); - } - } - - /** - * Method used to update the global status of a request identified by - * TRequestToken, to the supplied TReturnStatus. In case of any exception - * nothing happens. - */ - synchronized public void updateGlobalStatus(TRequestToken rt, - TReturnStatus status) { - - dao.updateGlobalStatus(rt.toString(), StatusCodeConverter.getInstance() - .toDB(status.getStatusCode()), status.getExplanation()); - } - - public void updateFromPreviousGlobalStatus(TRequestToken requestToken, - TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation) { - - dao.updateGlobalStatusOnMatchingGlobalStatus(requestToken, - expectedStatusCode, newStatusCode, explanation); - } - - /** - * Method used to update the global status of a request identified by - * TRequestToken, to the supplied TReturnStatus. The pin lifetime and the file - * lifetime are updated in order to start the countdown from the moment the - * status is updated. In case of any exception nothing happens. - */ - synchronized public void updateGlobalStatusPinFileLifetime(TRequestToken rt, - TReturnStatus status) { - - dao.updateGlobalStatusPinFileLifetime(rt.toString(), StatusCodeConverter - .getInstance().toDB(status.getStatusCode()), status.getExplanation()); - } - - /** - * Method used to change the global status of the supplied request to - * SRM_FAILURE, as well as that of each single chunk in the request. If the - * request type is not supported by the logic, only the global status is - * updated and an error log gets written warning of the unsupported business - * logic. - * - * If the supplied RequestSummaryData is null, nothing gets done; if any DB - * error occurs, no exception gets thrown but proper messagges get logged. - */ - synchronized public void failRequest(RequestSummaryData rsd, - String explanation) { - - if (rsd != null) { - TRequestType rtype = rsd.requestType(); - if (rtype == TRequestType.PREPARE_TO_GET) { - dao.failPtGRequest(rsd.primaryKey(), explanation); - } else if (rtype == TRequestType.PREPARE_TO_PUT) { - dao.failPtPRequest(rsd.primaryKey(), explanation); - } else if (rtype == TRequestType.COPY) { - dao.failCopyRequest(rsd.primaryKey(), explanation); - } else { - dao.failRequest(rsd.primaryKey(), explanation); - } - } - } - - /** - * Method used to abort a request that has not yet been fetched for - * processing; if the status of the request associated to the supplied request - * token tok is different from SRM_REQUEST_QUEUED, then nothing takes place; - * likewise if the supplied token does not correspond to any request, or if it - * is null. - */ - synchronized public void abortRequest(TRequestToken rt) { - - if (rt != null) { - dao.abortRequest(rt.toString()); - } - } - - /** - * Method used to abort a request that has not yet been fetched for - * processing; abort is only applied to those SURLs of the request specified - * in the Collection; if the status of the request associated to the supplied - * request token is different from SRM_REQUEST_QUEUED, then nothing takes - * place; likewise if the supplied token does not correspond to any request, - * if it is null, if the Collection is null, or the Collection does not - * contain TSURLs. - */ - synchronized public void abortChunksOfRequest(TRequestToken rt, - Collection c) { - - if ((rt != null) && (c != null) && (!c.isEmpty())) { - try { - ArrayList aux = new ArrayList(); - for (TSURL tsurl : c) { - aux.add(tsurl.toString()); - } - dao.abortChunksOfRequest(rt.toString(), aux); - } catch (ClassCastException e) { - log.error("REQUEST SUMMARY CATALOG! Unexpected error in " - + "abortChunksOfRequest: the supplied Collection did not contain " - + "TSURLs! Error: {}", e.getMessage(), e); - } - } - } - - /** - * Method used to abort a request that HAS been fetched for processing; abort - * is only applied to those SURLs of the request specified in the Collection; - * if the status of the request associated to the supplied request token is - * different from SRM_REQUEST_INPROGRESS, then nothing takes place; likewise - * if the supplied token does not correspond to any request, if it is null, if - * the Collection is null, or the Collection does not contain TSURLs. - */ - synchronized public void abortChunksOfInProgressRequest(TRequestToken rt, - Collection tsurls) { - - if ((rt != null) && (tsurls != null) && (!tsurls.isEmpty())) { - try { - List aux = new ArrayList(); - for (TSURL tsurl : tsurls) { - aux.add(tsurl.toString()); - } - dao.abortChunksOfInProgressRequest(rt.toString(), aux); - } catch (ClassCastException e) { - log.error("REQUEST SUMMARY CATALOG! Unexpected error in " - + "abortChunksOfInProgressRequest: the supplied Collection did not " - + "contain TSURLs! Error: {}", e.getMessage()); - } - } - } - - synchronized public RequestSummaryData find(TRequestToken requestToken) - throws IllegalArgumentException { - - if (requestToken == null || requestToken.toString().trim().isEmpty()) { - throw new IllegalArgumentException( - "Unable to perform find, illegal arguments: requestToken=" - + requestToken); - } - RequestSummaryDataTO to = dao.find(requestToken.toString()); - if (to != null) { - try { - RequestSummaryData data = makeOne(to); - if (data != null) { - log.debug("REQUEST SUMMARY CATALOG: {} associated to {} retrieved", - data.requestToken(), data.gridUser().getDn()); - return data; - } - } catch (IllegalArgumentException e) { - log.error("REQUEST SUMMARY CATALOG; Failure performing makeOne operation. " - + "IllegalArgumentException: {}", e.getMessage(), e); - } - } else { - log.debug("REQUEST SUMMARY CATALOG: {} token not found", requestToken); - } - return null; - } - - /** - * Method that returns the TRequestType associated to the request with the - * supplied TRequestToken. If no request exists with that token, or the type - * cannot be established from the DB, or the supplied token is null, then an - * EMPTY TRequestType is returned. - */ - synchronized public TRequestType typeOf(TRequestToken rt) { - - TRequestType result = TRequestType.EMPTY; - String type = null; - if (rt != null) { - type = dao.typeOf(rt.toString()); - if (type != null && !type.isEmpty()) - result = RequestTypeConverter.getInstance().toSTORM(type); - } - return result; - } - - /** - * Method used to abort a request that HAS been fetched for processing; if the - * status of the request associated to the supplied request token tok is - * different from SRM_REQUEST_INPROGRESS, then nothing takes place; likewise - * if the supplied token does not correspond to any request, or if it is null. - */ - synchronized public void abortInProgressRequest(TRequestToken rt) { - - if (rt != null) { - dao.abortInProgressRequest(rt.toString()); - } - } + private static final Logger log = LoggerFactory.getLogger(RequestSummaryCatalog.class); + + private static RequestSummaryCatalog instance; + private final RequestSummaryDAO dao; + + public static synchronized RequestSummaryCatalog getInstance() { + if (instance == null) { + instance = new RequestSummaryCatalog(); + } + return instance; + } + + private RequestSummaryCatalog() { + dao = RequestSummaryDAOMySql.getInstance(); + } + + /** + * Method in charge of retrieving RequestSummaryData associated to new requests, that is those + * found in SRM_REQUETS_QUEUED global status; such requests then transit into SRM_SUCCESS. The + * actual number of fetched requests depends on the configured ceiling. + * + * If no new request is found, an empty Collection is returned. if a request is malformed, then + * that request is failed and an attempt is made to signal such occurrence in the DB. Only + * correctly formed requests are returned. + */ + synchronized public Collection fetchNewRequests(int capacity) { + + List list = Lists.newArrayList(); + + Collection c = dao.fetchNewRequests(capacity); + if (c == null || c.isEmpty()) { + return list; + } + int fetched = c.size(); + log.debug("REQUEST SUMMARY CATALOG: {} new requests picked up.", fetched); + for (RequestSummaryDataTO auxTO : c) { + RequestSummaryData aux = null; + try { + aux = makeOne(auxTO); + } catch (IllegalArgumentException e) { + log.error("REQUEST SUMMARY CATALOG: Failure while performing makeOne " + + "operation. IllegalArgumentException: {}", e.getMessage(), e); + continue; + } + if (aux != null) { + log.debug("REQUEST SUMMARY CATALOG: {} associated to {} included " + "for processing", + aux.requestToken(), aux.gridUser().getDn()); + list.add(aux); + } + } + int ret = list.size(); + if (ret < fetched) { + log.warn("REQUEST SUMMARY CATALOG: including {} requests for processing, " + + "since the dropped ones were malformed!", ret); + } else { + log.debug("REQUEST SUMMARY CATALOG: including for processing all {} " + "requests.", ret); + } + if (!list.isEmpty()) { + log.debug("REQUEST SUMMARY CATALOG: returning {}\n\n", list); + } + return list; + } + + /** + * Private method used to create a RequestSummaryData object, from a RequestSummaryDataTO. If a + * chunk cannot be created, an error messagge gets logged and an attempt is made to signal in the + * DB that the request is malformed. + */ + private RequestSummaryData makeOne(RequestSummaryDataTO to) throws IllegalArgumentException { + + TRequestType auxrtype = RequestTypeConverter.getInstance().toSTORM(to.requestType()); + if (auxrtype == TRequestType.EMPTY) { + StringBuilder sb = new StringBuilder(); + sb.append("TRequestType could not be created from its String representation "); + sb.append(to.requestType()); + sb.append("\n"); + log.warn(sb.toString()); + throw new IllegalArgumentException( + "Invalid TRequestType in the provided RequestSummaryDataTO"); + } + TRequestToken auxrtoken; + try { + auxrtoken = new TRequestToken(to.requestToken(), to.timestamp()); + } catch (InvalidTRequestTokenAttributesException e) { + log.warn("Unable to create TRequestToken from RequestSummaryDataTO. " + + "InvalidTRequestTokenAttributesException: {}", e.getMessage()); + throw new IllegalArgumentException( + "Unable to create TRequestToken from RequestSummaryDataTO."); + } + GridUserInterface auxgu; + + try { + auxgu = loadVomsGridUser(to.clientDN(), to.vomsAttributes()); + } catch (MalformedGridUserException e) { + StringBuilder sb = new StringBuilder(); + sb.append("VomsGridUser could not be created from DN String "); + sb.append(to.clientDN()); + sb.append(" voms attributes String "); + sb.append(to.vomsAttributes()); + sb.append(" and from request token String "); + sb.append(to.requestToken()); + log.warn("{}. MalformedGridUserException: {}", sb.toString(), e.getMessage()); + throw new IllegalArgumentException("Unable to load Voms Grid User from RequestSummaryDataTO. " + + "MalformedGridUserException: " + e.getMessage()); + } + RequestSummaryData data = null; + try { + data = new RequestSummaryData(auxrtype, auxrtoken, auxgu); + data.setPrimaryKey(to.primaryKey()); + } catch (InvalidRequestSummaryDataAttributesException e) { + dao.failRequest(to.primaryKey(), "The request data is malformed!"); + log.warn("REQUEST SUMMARY CATALOG! Unable to create RequestSummaryData. " + + "InvalidRequestSummaryDataAttributesException: {}", e.getMessage(), e); + throw new IllegalArgumentException("Unable to reate RequestSummaryData"); + } + TReturnStatus status = null; + if (to.getStatus() != null) { + TStatusCode code = StatusCodeConverter.getInstance().toSTORM(to.getStatus()); + if (code == TStatusCode.EMPTY) { + log.warn("RequestSummaryDataTO retrieved StatusCode was not " + "recognised: {}", + to.getStatus()); + } else { + status = new TReturnStatus(code, to.getErrstring()); + } + } + data.setUserToken(to.getUserToken()); + data.setRetrytime(to.getRetrytime()); + if (to.getPinLifetime() != null) { + data.setPinLifetime(TLifeTimeInSeconds + .make(PinLifetimeConverter.getInstance().toStoRM(to.getPinLifetime()), TimeUnit.SECONDS)); + } + data.setSpaceToken(to.getSpaceToken()); + data.setStatus(status); + data.setErrstring(to.getErrstring()); + data.setRemainingTotalTime(to.getRemainingTotalTime()); + data.setNbreqfiles(to.getNbreqfiles()); + data.setNumOfCompleted(to.getNumOfCompleted()); + if (to.getFileLifetime() != null) { + data.setFileLifetime(TLifeTimeInSeconds.make(to.getFileLifetime(), TimeUnit.SECONDS)); + } + + data.setDeferredStartTime(to.getDeferredStartTime()); + data.setNumOfWaiting(to.getNumOfWaiting()); + data.setNumOfFailed(to.getNumOfFailed()); + data.setRemainingDeferredStartTime(to.getRemainingDeferredStartTime()); + return data; + } + + /** + * Private method that holds the logic for creating a VomsGridUser from persistence and to load + * any available Proxy. For the moment the VOMS attributes present in persistence are NOT loaded! + */ + private GridUserInterface loadVomsGridUser(String dn, String fqansString) + throws MalformedGridUserException { + + log.debug("load VomsGridUser for dn='{}' and fqansString='{}'", dn, fqansString); + + if (dn == null) { + throw new MalformedGridUserException("Invalid null DN"); + } + if (fqansString == null || fqansString.isEmpty()) { + return GridUserManager.makeGridUser(dn); + } + + FQAN[] fqans = new FQAN[fqansString.split("#").length]; + int i = 0; + for (String fqan : fqansString.split("#")) { + fqans[i++] = new FQAN(fqan); + } + try { + return GridUserManager.makeVOMSGridUser(dn, fqans); + } catch (IllegalArgumentException e) { + log.error("Unexpected error on voms grid user creation. " + "IllegalArgumentException: {}", + e.getMessage(), e); + throw new MalformedGridUserException(e.getMessage()); + } + } + + /** + * Method used to update the global status of a request identified by TRequestToken, to the + * supplied TReturnStatus. In case of any exception nothing happens. + */ + synchronized public void updateGlobalStatus(TRequestToken rt, TReturnStatus status) { + + dao.updateGlobalStatus(rt, status.getStatusCode(), status.getExplanation()); + } + + public void updateFromPreviousGlobalStatus(TRequestToken requestToken, + TStatusCode expectedStatusCode, TStatusCode newStatusCode, String explanation) { + + dao.updateGlobalStatusOnMatchingGlobalStatus(requestToken, expectedStatusCode, newStatusCode, + explanation); + } + + /** + * Method used to update the global status of a request identified by TRequestToken, to the + * supplied TReturnStatus. The pin lifetime and the file lifetime are updated in order to start + * the countdown from the moment the status is updated. In case of any exception nothing happens. + */ + synchronized public void updateGlobalStatusPinFileLifetime(TRequestToken rt, + TReturnStatus status) { + + dao.updateGlobalStatusPinFileLifetime(rt, status.getStatusCode(), status.getExplanation()); + } + + /** + * Method used to change the global status of the supplied request to SRM_FAILURE, as well as that + * of each single chunk in the request. If the request type is not supported by the logic, only + * the global status is updated and an error log gets written warning of the unsupported business + * logic. + */ + public synchronized void failRequest(RequestSummaryData rsd, String explanation) { + + Preconditions.checkNotNull(rsd); + TRequestType rtype = rsd.requestType(); + if (PREPARE_TO_GET.equals(rtype)) { + dao.failPtGRequest(rsd.primaryKey(), explanation); + } else if (PREPARE_TO_PUT.equals(rtype)) { + dao.failPtPRequest(rsd.primaryKey(), explanation); + } else { + dao.failRequest(rsd.primaryKey(), explanation); + } + } + + /** + * Method used to abort a request that HAS been fetched for processing; abort is only applied to + * those SURLs of the request specified in the Collection; if the status of the request associated + * to the supplied request token is different from SRM_REQUEST_INPROGRESS, then nothing takes + * place; likewise if the supplied token does not correspond to any request, if it is null, if the + * Collection is null, or the Collection does not contain TSURLs. + */ + synchronized public void abortChunksOfInProgressRequest(TRequestToken rt, + Collection tsurls) { + + if ((rt != null) && (tsurls != null) && (!tsurls.isEmpty())) { + try { + List aux = new ArrayList(); + for (TSURL tsurl : tsurls) { + aux.add(tsurl.toString()); + } + dao.abortChunksOfInProgressRequest(rt, aux); + } catch (ClassCastException e) { + log.error("REQUEST SUMMARY CATALOG! Unexpected error in " + + "abortChunksOfInProgressRequest: the supplied Collection did not " + + "contain TSURLs! Error: {}", e.getMessage()); + } + } + } + + synchronized public RequestSummaryData find(TRequestToken requestToken) + throws IllegalArgumentException { + + if (requestToken == null || requestToken.toString().trim().isEmpty()) { + throw new IllegalArgumentException( + "Unable to perform find, illegal arguments: requestToken=" + requestToken); + } + RequestSummaryDataTO to = dao.find(requestToken); + if (to != null) { + try { + RequestSummaryData data = makeOne(to); + if (data != null) { + log.debug("REQUEST SUMMARY CATALOG: {} associated to {} retrieved", data.requestToken(), + data.gridUser().getDn()); + return data; + } + } catch (IllegalArgumentException e) { + log.error("REQUEST SUMMARY CATALOG; Failure performing makeOne operation. " + + "IllegalArgumentException: {}", e.getMessage(), e); + } + } else { + log.debug("REQUEST SUMMARY CATALOG: {} token not found", requestToken); + } + return null; + } + + /** + * Method that returns the TRequestType associated to the request with the supplied TRequestToken. + * If no request exists with that token, or the type cannot be established from the DB, or the + * supplied token is null, then an EMPTY TRequestType is returned. + */ + synchronized public TRequestType typeOf(TRequestToken rt) { + + TRequestType result = TRequestType.EMPTY; + if (rt != null) { + result = dao.getRequestType(rt); + } + return result; + } + + /** + * Method used to abort a request that HAS been fetched for processing; if the status of the + * request associated to the supplied request token tok is different from SRM_REQUEST_INPROGRESS, + * then nothing takes place; likewise if the supplied token does not correspond to any request, or + * if it is null. + */ + synchronized public void abortInProgressRequest(TRequestToken rt) { + + if (rt != null) { + dao.abortInProgressRequest(rt); + } + } } diff --git a/src/main/java/it/grid/storm/catalogs/RequestSummaryDAO.java b/src/main/java/it/grid/storm/catalogs/RequestSummaryDAO.java deleted file mode 100644 index 3b55f7b51..000000000 --- a/src/main/java/it/grid/storm/catalogs/RequestSummaryDAO.java +++ /dev/null @@ -1,1390 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import com.google.common.collect.Lists; - -import it.grid.storm.config.Configuration; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TRequestType; -import it.grid.storm.srm.types.TStatusCode; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.SQLWarning; -import java.sql.Statement; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.Timer; -import java.util.TimerTask; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * DAO class for RequestSummaryCatalog. This DAO is specifically designed to - * connect to a MySQL DB. - * - * @author EGRID ICTP - * @version 3.0 - * @date May 2005 - */ -public class RequestSummaryDAO { - - private static final Logger log = LoggerFactory - .getLogger(RequestSummaryDAO.class); - - /** String with the name of the class for the DB driver */ - private final String driver = Configuration.getInstance().getDBDriver(); - /** String referring to the URL of the DB */ - private final String url = Configuration.getInstance().getStormDbURL(); - /** String with the password for the DB */ - private final String password = Configuration.getInstance().getDBPassword(); - /** String with the name for the DB */ - private final String name = Configuration.getInstance().getDBUserName(); - /** maximum number of requests that will be retrieved */ - private int limit; - /** Connection to DB - WARNING!!! It is kept open all the time! */ - private Connection con = null; - - /** milliseconds that must pass before reconnecting to DB */ - private final long period = Configuration.getInstance() - .getDBReconnectPeriod() * 1000; - /** initial delay in milliseconds before starting timer */ - private final long delay = Configuration.getInstance().getDBReconnectDelay() * 1000; - /** timer thread that will run a task to alert when reconnecting is necessary! */ - private Timer clock = null; - /** - * timer task that will update the boolean signaling that a reconnection is - * needed! - */ - private TimerTask clockTask = null; - /** boolean that tells whether reconnection is needed because of MySQL bug! */ - private boolean reconnect = false; - - private static final RequestSummaryDAO dao = new RequestSummaryDAO(); - - private RequestSummaryDAO() { - - int aux = Configuration.getInstance().getPickingMaxBatchSize(); - if (aux > 1) { - limit = aux; - } else { - limit = 1; - } - setUpConnection(); - clock = new Timer(); - clockTask = new TimerTask() { - - @Override - public void run() { - - reconnect = true; - } - }; // clock task - clock.scheduleAtFixedRate(clockTask, delay, period); - } - - /** - * Method that returns the only instance of the RequestSummaryDAO. - */ - public static RequestSummaryDAO getInstance() { - - return dao; - } - - /** - * Method that retrieves requests in the SRM_REQUEST_QUEUED status: retrieved - * requests are limited to the number specified by the Configuration method - * getPicker2MaxBatchSize. All retrieved requests get their global status - * transited to SRM_REQUEST_INPROGRESS. A Collection of RequestSummaryDataTO - * is returned: if none are found, an empty collection is returned. - */ - public Collection findNew(int freeSlot) { - - PreparedStatement stmt = null; - ResultSet rs = null; - List list = Lists.newArrayList(); - - if (!checkConnection()) { - log - .error("REQUEST SUMMARY DAO - findNew: unable to get a valid connection!"); - return list; - } - // RequestSummaryDataTO - try { - // start transaction - con.setAutoCommit(false); - - int howMuch = -1; - if (freeSlot > limit) { - howMuch = limit; - } else { - howMuch = freeSlot; - } - - String query = "SELECT ID, config_RequestTypeID, r_token, timeStamp, " - + "client_dn, proxy FROM request_queue WHERE status=? LIMIT ?"; - - // get id, request type, request token and client_DN of newly added - // requests, which must be in SRM_REQUEST_QUEUED state - stmt = con.prepareStatement(query); - logWarnings(con.getWarnings()); - - stmt.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_QUEUED)); - stmt.setInt(2, howMuch); - - rs = stmt.executeQuery(); - logWarnings(stmt.getWarnings()); - - List rowids = new ArrayList(); // arraylist with selected ids - RequestSummaryDataTO aux = null; // RequestSummaryDataTO made from - // retrieved row - long auxid; // primary key of retrieved row - while (rs.next()) { - auxid = rs.getLong("ID"); - rowids.add(Long.valueOf(auxid)); - aux = new RequestSummaryDataTO(); - aux.setPrimaryKey(auxid); - aux.setRequestType(rs.getString("config_RequestTypeID")); - aux.setRequestToken(rs.getString("r_token")); - aux.setClientDN(rs.getString("client_dn")); - aux.setTimestamp(rs.getTimestamp("timeStamp")); - - /** - * This code is only for the 1.3.18. This is a workaround to get FQANs - * using the proxy field on request_queue. The FE use the proxy field of - * request_queue to insert a single FQAN string containing all FQAN - * separated by the "#" char. The proxy is a BLOB, hence it has to be - * properly converted in string. - */ - java.sql.Blob blob = rs.getBlob("proxy"); - if (blob != null) { - byte[] bdata = blob.getBytes(1, (int) blob.length()); - aux.setVomsAttributes(new String(bdata)); - } - - list.add(aux); - } - close(rs); - close(stmt); - - // transit state from SRM_REQUEST_QUEUED to SRM_REQUEST_INPROGRESS - if (!list.isEmpty()) { - logWarnings(con.getWarnings()); - String where = makeWhereString(rowids); - String update = "UPDATE request_queue SET status=" - + StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_REQUEST_INPROGRESS) + ", errstring=?" - + " WHERE ID IN " + where; - stmt = con.prepareStatement(update); - logWarnings(stmt.getWarnings()); - stmt.setString(1, "Request handled!"); - logWarnings(stmt.getWarnings()); - log.trace("REQUEST SUMMARY DAO - findNew: executing {}", stmt); - stmt.executeUpdate(); - close(stmt); - } - - // commit and finish transaction - con.commit(); - logWarnings(con.getWarnings()); - con.setAutoCommit(true); - logWarnings(con.getWarnings()); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO - findNew: Unable to complete picking. " - + "Error: {}. Rolling back!", e.getMessage(), e); - } finally { - close(rs); - close(stmt); - } - // return collection of requests - if (!list.isEmpty()) { - log.debug("REQUEST SUMMARY DAO - findNew: returning {}", list); - } - return list; - } - - /** - * Method used to signal in the DB that a request failed: the status of the - * request identified by the primary key index is transited to SRM_FAILURE, - * with the supplied explanation String. The supplied index is the primary key - * of the global request. In case of any error, nothing gets done and no - * exception is thrown, but proper error messages get logged. - */ - public void failRequest(long index, String explanation) { - - if (!checkConnection()) { - log - .error("REQUEST SUMMARY DAO - failRequest: unable to get a valid connection!"); - return; - } - String signalSQL = "UPDATE request_queue r " + "SET r.status=" - + StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FAILURE) - + ", r.errstring=? " + "WHERE r.ID=?"; - PreparedStatement signal = null; - try { - signal = con.prepareStatement(signalSQL); - logWarnings(con.getWarnings()); - signal.setString(1, explanation); // Prepared statement spares - // DB-specific String notation! - logWarnings(signal.getWarnings()); - signal.setLong(2, index); - logWarnings(signal.getWarnings()); - log.trace("REQUEST SUMMARY DAO! failRequest executing: {}", signal); - signal.executeUpdate(); - logWarnings(signal.getWarnings()); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO! Unable to transit request identified by " - + "ID {} to SRM_FAILURE! Error: {}", index, e.getMessage(), e); - } finally { - close(signal); - } - } - - /** - * Method used to signal in the DB that a PtGRequest failed. The global status - * transits to SRM_FAILURE, as well as that of each chunk associated to the - * request. The supplied explanation string is used both for the global status - * as well as for each individual chunk. The supplied index is the primary key - * of the global request. In case of any error, nothing gets done and no - * exception is thrown, but proper error messages get logged. - */ - public void failPtGRequest(long index, String explanation) { - - if (!checkConnection()) { - log - .error("REQUEST SUMMARY DAO - failPtGRequest: unable to get a valid connection!"); - return; - } - String requestSQL = "UPDATE request_queue r " - + "SET r.status=?, r.errstring=? " + "WHERE r.ID=?"; - String chunkSQL = "UPDATE " - + "status_Get s JOIN (request_queue r, request_Get g) ON s.request_GetID=g.ID AND g.request_queueID=r.ID " - + "SET s.statusCode=?, s.explanation=? " + "WHERE r.ID=?"; - PreparedStatement request = null; - PreparedStatement chunk = null; - int failCode = StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_FAILURE); - try { - // start transaction - con.setAutoCommit(false); - - // update global status - request = con.prepareStatement(requestSQL); - logWarnings(con.getWarnings()); - request.setInt(1, failCode); - logWarnings(request.getWarnings()); - request.setString(2, explanation); // Prepared statement spares - // DB-specific String notation! - logWarnings(request.getWarnings()); - request.setLong(3, index); - logWarnings(request.getWarnings()); - log.trace("REQUEST SUMMARY DAO! failPtGRequest executing: {}", request); - request.executeUpdate(); - logWarnings(request.getWarnings()); - - // update each chunk status - chunk = con.prepareStatement(chunkSQL); - logWarnings(con.getWarnings()); - chunk.setInt(1, failCode); - logWarnings(chunk.getWarnings()); - chunk.setString(2, explanation); // Prepared statement spares - // DB-specific String notation! - logWarnings(chunk.getWarnings()); - chunk.setLong(3, index); - logWarnings(chunk.getWarnings()); - log.trace("REQUEST SUMMARY DAO! failPtGRequest executing: {}", chunk); - chunk.executeUpdate(); - logWarnings(chunk.getWarnings()); - - // commit and finish transaction - con.commit(); - logWarnings(con.getWarnings()); - con.setAutoCommit(true); - logWarnings(con.getWarnings()); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO! Unable to transit PtG request identified " - + "by ID {} to SRM_FAILURE! Error: {}\nRolling back...", index, - e.getMessage(), e); - rollback(con); - } finally { - close(request); - close(chunk); - } - } - - /** - * Method used to signal in the DB that a PtPRequest failed. The global status - * transits to SRM_FAILURE, as well as that of each chunk associated to the - * request. The supplied explanation string is used both for the global status - * as well as for each individual chunk. The supplied index is the primary key - * of the global request. In case of any error, nothing gets done and no - * exception is thrown, but proper error messagges get logged. - */ - public void failPtPRequest(long index, String explanation) { - - if (!checkConnection()) { - log - .error("REQUEST SUMMARY DAO - failPtPRequest: unable to get a valid connection!"); - return; - } - String requestSQL = "UPDATE request_queue r " - + "SET r.status=?, r.errstring=? " + "WHERE r.ID=?"; - String chunkSQL = "UPDATE " - + "status_Put s JOIN (request_queue r, request_Put p) ON s.request_PutID=p.ID AND p.request_queueID=r.ID " - + "SET s.statusCode=?, s.explanation=? " + "WHERE r.ID=?"; - PreparedStatement request = null; - PreparedStatement chunk = null; - int failCode = StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_FAILURE); - try { - // start transaction - con.setAutoCommit(false); - - // update global status - request = con.prepareStatement(requestSQL); - logWarnings(con.getWarnings()); - request.setInt(1, failCode); - logWarnings(request.getWarnings()); - request.setString(2, explanation); // Prepared statement spares - // DB-specific String notation! - logWarnings(request.getWarnings()); - request.setLong(3, index); - logWarnings(request.getWarnings()); - log.trace("REQUEST SUMMARY DAO! failPtPRequest executing: {}", request); - request.executeUpdate(); - logWarnings(request.getWarnings()); - - // update each chunk status - chunk = con.prepareStatement(chunkSQL); - logWarnings(con.getWarnings()); - chunk.setInt(1, failCode); - logWarnings(chunk.getWarnings()); - chunk.setString(2, explanation); // Prepared statement spares - // DB-specific String notation! - logWarnings(chunk.getWarnings()); - chunk.setLong(3, index); - logWarnings(chunk.getWarnings()); - log.trace("REQUEST SUMMARY DAO! failPtPRequest executing: {}", chunk); - chunk.executeUpdate(); - logWarnings(chunk.getWarnings()); - - // commit and finish transaction - con.commit(); - logWarnings(con.getWarnings()); - con.setAutoCommit(true); - logWarnings(con.getWarnings()); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO! Unable to transit PtP request identified " - + "by ID {} to SRM_FAILURE! Error: {}\nRolling back...", index, - e.getMessage(), e); - rollback(con); - } finally { - close(request); - close(chunk); - } - } - - /** - * Method used to signal in the DB that a CopyRequest failed. The global - * status transits to SRM_FAILURE, as well as that of each chunk associated to - * the request. The supplied explanation string is used both for the global - * status as well as for each individual chunk. The supplied index is the - * primary key of the global request. In case of any error, nothing gets done - * and no exception is thrown, but proper error messagges get logged. - */ - public void failCopyRequest(long index, String explanation) { - - if (!checkConnection()) { - log - .error("REQUEST SUMMARY DAO - failCopyRequest: unable to get a valid connection!"); - return; - } - String requestSQL = "UPDATE request_queue r " - + "SET r.status=?, r.errstring=? " + "WHERE r.ID=?"; - String chunkSQL = "UPDATE " - + "status_Copy s JOIN (request_queue r, request_Copy c) ON s.request_CopyID=c.ID AND c.request_queueID=r.ID " - + "SET s.statusCode=?, s.explanation=? " + "WHERE r.ID=?"; - PreparedStatement request = null; - PreparedStatement chunk = null; - int failCode = StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_FAILURE); - try { - // start transaction - con.setAutoCommit(false); - - // update global status - request = con.prepareStatement(requestSQL); - logWarnings(con.getWarnings()); - request.setInt(1, failCode); - logWarnings(request.getWarnings()); - request.setString(2, explanation); // Prepared statement spares - // DB-specific String notation! - logWarnings(request.getWarnings()); - request.setLong(3, index); - logWarnings(request.getWarnings()); - log.trace("REQUEST SUMMARY DAO! failCopyRequest executing: {}", request); - request.executeUpdate(); - logWarnings(request.getWarnings()); - - // update each chunk status - chunk = con.prepareStatement(chunkSQL); - logWarnings(con.getWarnings()); - chunk.setInt(1, failCode); - logWarnings(chunk.getWarnings()); - chunk.setString(2, explanation); // Prepared statement spares - // DB-specific String notation! - logWarnings(chunk.getWarnings()); - chunk.setLong(3, index); - logWarnings(chunk.getWarnings()); - log.trace("REQUEST SUMMARY DAO! failCopyRequest executing: {}", chunk); - chunk.executeUpdate(); - logWarnings(chunk.getWarnings()); - - // commit and finish transaction - con.commit(); - logWarnings(con.getWarnings()); - con.setAutoCommit(true); - logWarnings(con.getWarnings()); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO! Unable to transit Copy request identified " - + "by ID {} to SRM_FAILURE! Error: {}\nRolling back...", index, - e.getMessage(), e); - rollback(con); - } finally { - close(request); - close(chunk); - } - } - - /** - * Method used to update the global status of the request identified by the - * RequestToken rt. It gets updated the supplied status, with the supplied - * explanation String. If the supplied request token does not exist, nothing - * happens. - */ - public void updateGlobalStatus(String rt, int status, String explanation) { - - if (!checkConnection()) { - log - .error("REQUEST SUMMARY DAO - updateGlobalStatus: unable to get a valid connection!"); - return; - } - PreparedStatement update = null; - try { - update = con - .prepareStatement("UPDATE request_queue SET status=?, errstring=? WHERE r_token=?"); - logWarnings(con.getWarnings()); - update.setInt(1, status); - logWarnings(update.getWarnings()); - update.setString(2, explanation); - logWarnings(update.getWarnings()); - update.setString(3, rt); - logWarnings(update.getWarnings()); - log.trace("REQUEST SUMMARY DAO - updateGlobalStatus: executing {}", update); - update.executeUpdate(); - logWarnings(update.getWarnings()); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO: {}", e.getMessage(), e); - } finally { - close(update); - } - } - - public void updateGlobalStatusOnMatchingGlobalStatus( - TRequestToken requestToken, TStatusCode expectedStatusCode, - TStatusCode newStatusCode, String explanation) { - - if (!checkConnection()) { - log - .error("REQUEST SUMMARY DAO - updateGlobalStatusOnMatchingGlobalStatus: " - + "unable to get a valid connection!"); - return; - } - PreparedStatement update = null; - try { - update = con - .prepareStatement("UPDATE request_queue SET status=?, errstring=? WHERE r_token=? AND status=?"); - logWarnings(con.getWarnings()); - update.setInt(1, StatusCodeConverter.getInstance().toDB(newStatusCode)); - logWarnings(update.getWarnings()); - update.setString(2, explanation); - logWarnings(update.getWarnings()); - update.setString(3, requestToken.toString()); - logWarnings(update.getWarnings()); - update.setInt(4, - StatusCodeConverter.getInstance().toDB(expectedStatusCode)); - logWarnings(update.getWarnings()); - log.trace("REQUEST SUMMARY DAO - updateGlobalStatusOnMatchingGlobalStatus: " - + "executing {}", update); - update.executeUpdate(); - logWarnings(update.getWarnings()); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO: {}", e.getMessage(), e); - } finally { - close(update); - } - } - - /** - * Method used to update the global status of the request identified by the - * RequestToken rt. It gets updated the supplied status, with the supplied - * explanation String and pin and file lifetimes are updated in order to start - * the countdown from now. If the supplied request token does not exist, - * nothing happens. - */ - public void updateGlobalStatusPinFileLifetime(String rt, int status, - String explanation) { - - if (!checkConnection()) { - log.error("REQUEST SUMMARY DAO - updateGlobalStatusPinFileLifetime: " - + "unable to get a valid connection!"); - return; - } - PreparedStatement update = null; - - String query = "UPDATE request_queue SET status=?, errstring=?, " - + "pinLifetime=pinLifetime+(UNIX_TIMESTAMP()-UNIX_TIMESTAMP(timeStamp)) " - + "WHERE r_token=?"; - - try { - update = con.prepareStatement(query); - logWarnings(con.getWarnings()); - - update.setInt(1, status); - logWarnings(update.getWarnings()); - - update.setString(2, explanation); - logWarnings(update.getWarnings()); - - update.setString(3, rt); - logWarnings(update.getWarnings()); - - log.trace("REQUEST SUMMARY DAO - updateGlobalStatus: executing {}", update); - - update.executeUpdate(); - logWarnings(update.getWarnings()); - - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO: {}", e.getMessage(), e); - } finally { - close(update); - } - } - - /** - * Method used to transit the status of a request that is in - * SRM_REQUEST_QUEUED state, to SRM_ABORTED. All files associated with the - * request will also get their status changed to SRM_ABORTED. If the supplied - * token is null, or not found, or not in the SRM_REQUEST_QUEUED state, then - * nothing happens. - */ - public void abortRequest(String rt) { - - if (!checkConnection()) { - log.error("REQUEST SUMMARY DAO - abortRequest: unable to get a valid connection!"); - return; - } - PreparedStatement update = null; - PreparedStatement query = null; - ResultSet rs = null; - try { - query = con - .prepareStatement("SELECT ID,config_RequestTypeID FROM request_queue WHERE r_token=? AND status=?"); - logWarnings(con.getWarnings()); - query.setString(1, rt); - logWarnings(query.getWarnings()); - query.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_QUEUED)); - logWarnings(query.getWarnings()); - log.trace("REQUEST SUMMARY DAO - abortRequest - {}", query); - rs = query.executeQuery(); - logWarnings(query.getWarnings()); - if (rs.next()) { - long id = rs.getLong("ID"); - String type = rs.getString("config_RequestTypeID"); - update = con - .prepareStatement("UPDATE request_queue SET status=?, errstring=? WHERE ID=?"); - logWarnings(con.getWarnings()); - update.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED)); - logWarnings(update.getWarnings()); - update.setString(2, "User aborted request!"); - logWarnings(update.getWarnings()); - update.setLong(3, id); - logWarnings(update.getWarnings()); - log.trace("REQUEST SUMMARY DAO - abortRequest - {}", update); - update.executeUpdate(); - logWarnings(update.getWarnings()); - close(update); - // update single chunk file statuses - TRequestType rtyp = RequestTypeConverter.getInstance().toSTORM(type); - String statusTable = null; - String requestTable = null; - String joinColumn = null; - if (rtyp != TRequestType.EMPTY) { - if (rtyp == TRequestType.PREPARE_TO_GET) { - statusTable = "status_Get"; - requestTable = "request_Get"; - joinColumn = "request_GetID"; - } else if (rtyp == TRequestType.PREPARE_TO_PUT) { - requestTable = "request_Put"; - statusTable = "status_Put"; - joinColumn = "request_PutID"; - } else if (rtyp == TRequestType.COPY) { - requestTable = "request_Copy"; - statusTable = "status_Copy"; - joinColumn = "request_CopyID"; - } else { - requestTable = "request_BoL"; - statusTable = "status_BoL"; - joinColumn = "request_BoLID"; - } - String auxstr = "UPDATE " + statusTable - + " s JOIN (request_queue r, " + requestTable + " t) ON (s." - + joinColumn + "=t.ID AND t.request_queueID=r.ID) " - + "SET s.statusCode=?, s.explanation=? " + "WHERE r.ID=?"; - update = con.prepareStatement(auxstr); - logWarnings(con.getWarnings()); - update.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED)); - logWarnings(update.getWarnings()); - update.setString(2, "User aborted request!"); - logWarnings(update.getWarnings()); - update.setLong(3, id); - logWarnings(update.getWarnings()); - log.trace("REQUEST SUMMARY DAO - abortRequest - {}", update); - update.executeUpdate(); - logWarnings(update.getWarnings()); - } else { - log.error("REQUEST SUMMARY DAO - Unable to complete abortRequest: " - + "could not update file statuses because the request type could " - + "not be translated from the DB!"); - } - } - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO - abortRequest: {}", e.getMessage(), e); - } finally { - close(rs); - close(query); - close(update); - } - } - - /** - * Method used to transit the status of a request that is in - * SRM_REQUEST_INPROGRESS state, to SRM_ABORTED. All files associated with the - * request will also get their status changed to SRM_ABORTED. If the supplied - * token is null, or not found, or not in the SRM_REQUEST_INPROGRESS state, - * then nothing happens. - */ - public void abortInProgressRequest(String rt) { - - if (!checkConnection()) { - log.error("REQUEST SUMMARY DAO - abortInProgressRequest: unable to get " - + "a valid connection!"); - return; - } - PreparedStatement update = null; - PreparedStatement query = null; - ResultSet rs = null; - try { - query = con - .prepareStatement("SELECT ID,config_RequestTypeID FROM request_queue WHERE r_token=? AND status=?"); - logWarnings(con.getWarnings()); - query.setString(1, rt); - logWarnings(query.getWarnings()); - query.setInt(2, StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_INPROGRESS)); - logWarnings(query.getWarnings()); - log.trace("REQUEST SUMMARY DAO - abortInProgressRequest - {}", query); - rs = query.executeQuery(); - logWarnings(query.getWarnings()); - if (rs.next()) { - // token found... - // get ID - long id = rs.getLong("ID"); - String type = rs.getString("config_RequestTypeID"); - // update global request status - update = con - .prepareStatement("UPDATE request_queue SET status=?, errstring=? WHERE ID=?"); - logWarnings(con.getWarnings()); - update.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED)); - logWarnings(update.getWarnings()); - update.setString(2, "User aborted request!"); - logWarnings(update.getWarnings()); - update.setLong(3, id); - logWarnings(update.getWarnings()); - log.trace("REQUEST SUMMARY DAO - abortInProgressRequest - {}", update); - update.executeUpdate(); - logWarnings(update.getWarnings()); - close(update); - // update single chunk file statuses - TRequestType rtyp = RequestTypeConverter.getInstance().toSTORM(type); - String statusTable = null; - String requestTable = null; - String joinColumn = null; - if (rtyp != TRequestType.EMPTY) { - if (rtyp == TRequestType.PREPARE_TO_GET) { - requestTable = "request_Get"; - statusTable = "status_Get"; - joinColumn = "request_GetID"; - } else if (rtyp == TRequestType.PREPARE_TO_PUT) { - requestTable = "request_Put"; - statusTable = "status_Put"; - joinColumn = "request_PutID"; - } else if (rtyp == TRequestType.COPY) { - requestTable = "request_Copy"; - statusTable = "status_Copy"; - joinColumn = "request_CopyID"; - } else { - requestTable = "request_BoL"; - statusTable = "status_BoL"; - joinColumn = "request_BoLID"; - } - String auxstr = "UPDATE " + statusTable - + " s JOIN (request_queue r, " + requestTable + " t ON s." - + joinColumn + "=t.ID AND t.request_queueID=r.ID )" - + "SET s.statusCode=?, s.explanation=? " + "WHERE r.ID=?"; - update = con.prepareStatement(auxstr); - logWarnings(con.getWarnings()); - update.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED)); - logWarnings(update.getWarnings()); - update.setString(2, "User aborted request!"); - logWarnings(update.getWarnings()); - update.setLong(3, id); - logWarnings(update.getWarnings()); - log.trace("REQUEST SUMMARY DAO - abortInProgressRequest - {}", update); - update.executeUpdate(); - logWarnings(update.getWarnings()); - } else { - log.error("REQUEST SUMMARY DAO - Unable to complete " - + "abortInProgressRequest: could not update file statuses because " - + "the request type could not be translated from the DB!"); - } - } - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO - abortInProgressRequest: {}", - e.getMessage(), e); - } finally { - close(rs); - close(query); - close(update); - } - } - - /** - * Method used to transit the status of chunks of a request that is in - * SRM_REQUEST_QUEUED state, to SRM_ABORTED. If the supplied token is null, or - * not found, or not in the SRM_REQUEST_QUEUED state, then nothing happens. - */ - public void abortChunksOfRequest(String rt, Collection surls) { - - if (!checkConnection()) { - log.error("REQUEST SUMMARY DAO - abortChunksOfRequest: unable to get a valid connection!"); - return; - } - PreparedStatement update = null; - PreparedStatement query = null; - ResultSet rs = null; - try { - query = con - .prepareStatement("SELECT ID,config_RequestTypeID FROM request_queue WHERE r_token=? AND status=?"); - logWarnings(con.getWarnings()); - query.setString(1, rt); - logWarnings(query.getWarnings()); - query.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_QUEUED)); - logWarnings(query.getWarnings()); - log.trace("REQUEST SUMMARY DAO - abortChunksOfRequest - {}", query); - rs = query.executeQuery(); - logWarnings(query.getWarnings()); - if (rs.next()) { - long id = rs.getLong("ID"); - String type = rs.getString("config_RequestTypeID"); - // update single chunk file statuses - TRequestType rtyp = RequestTypeConverter.getInstance().toSTORM(type); - String statusTable = null; - String requestTable = null; - String joinColumn = null; - String surlColumn = null; - if (rtyp != TRequestType.EMPTY) { - if (rtyp == TRequestType.PREPARE_TO_GET) { - requestTable = "request_Get"; - statusTable = "status_Get"; - joinColumn = "request_GetID"; - surlColumn = "sourceSURL"; - } else if (rtyp == TRequestType.PREPARE_TO_PUT) { - requestTable = "request_Put"; - statusTable = "status_Put"; - joinColumn = "request_PutID"; - surlColumn = "targetSURL"; - } else if (rtyp == TRequestType.COPY) { - requestTable = "request_Copy"; - statusTable = "status_Copy"; - joinColumn = "request_CopyID"; - surlColumn = "targetSURL"; - } else { - requestTable = "request_BoL"; - statusTable = "status_BoL"; - joinColumn = "request_BoLID"; - surlColumn = "sourceSURL"; - } - String auxstr = "UPDATE " + statusTable - + " s JOIN (request_queue r, " + requestTable + " t ON s." - + joinColumn + "=t.ID AND t.request_queueID=r.ID " - + "SET s.statusCode=?, s.explanation=? " + "WHERE r.ID=? AND " - + surlColumn + " IN " + makeInString(surls); - update = con.prepareStatement(auxstr); - logWarnings(con.getWarnings()); - update.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED)); - logWarnings(update.getWarnings()); - update.setString(2, "User aborted request!"); - logWarnings(update.getWarnings()); - update.setLong(3, id); - logWarnings(update.getWarnings()); - log.trace("REQUEST SUMMARY DAO - abortChunksOfRequest - {}", update); - update.executeUpdate(); - logWarnings(update.getWarnings()); - } else { - log.error("REQUEST SUMMARY DAO - Unable to complete " - + "abortChunksOfRequest: could not update file statuses because " - + "the request type could not be translated from the DB!"); - } - } - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO - abortChunksOfRequest: {}", - e.getMessage(), e); - } finally { - close(rs); - close(query); - close(update); - } - } - - /** - * Method used to transit the status of chunks of a request that is in - * SRM_REQUEST_INPROGRESS state, to SRM_ABORTED. If the supplied token is - * null, or not found, or not in the SRM_REQUEST_INPROGRESS state, then - * nothing happens. - */ - public void abortChunksOfInProgressRequest(String rt, Collection surls) { - - if (!checkConnection()) { - log.error("REQUEST SUMMARY DAO - abortChunksOfInProgressRequest: unable " - + "to get a valid connection!"); - return; - } - PreparedStatement update = null; - PreparedStatement query = null; - ResultSet rs = null; - try { - query = con - .prepareStatement("SELECT ID,config_RequestTypeID FROM request_queue WHERE r_token=? AND status=?"); - logWarnings(con.getWarnings()); - query.setString(1, rt); - logWarnings(query.getWarnings()); - query.setInt(2, StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_INPROGRESS)); - logWarnings(query.getWarnings()); - log.trace("REQUEST SUMMARY DAO - abortChunksOfInProgressRequest - {}", query); - rs = query.executeQuery(); - logWarnings(query.getWarnings()); - if (rs.next()) { - long id = rs.getLong("ID"); - String type = rs.getString("config_RequestTypeID"); - // update single chunk file statuses - TRequestType rtyp = RequestTypeConverter.getInstance().toSTORM(type); - String statusTable = null; - String requestTable = null; - String joinColumn = null; - String surlColumn = null; - if (rtyp != TRequestType.EMPTY) { - if (rtyp == TRequestType.PREPARE_TO_GET) { - requestTable = "request_Get"; - statusTable = "status_Get"; - joinColumn = "request_GetID"; - surlColumn = "sourceSURL"; - } else if (rtyp == TRequestType.PREPARE_TO_PUT) { - requestTable = "request_Put"; - statusTable = "status_Put"; - joinColumn = "request_PutID"; - surlColumn = "targetSURL"; - } else if (rtyp == TRequestType.COPY) { - requestTable = "request_Copy"; - statusTable = "status_Copy"; - joinColumn = "request_CopyID"; - surlColumn = "targetSURL"; - } else { - requestTable = "request_BoL"; - statusTable = "status_BoL"; - joinColumn = "request_BoLID"; - surlColumn = "sourceSURL"; - } - String auxstr = "UPDATE " + statusTable - + " s JOIN (request_queue r, " + requestTable + " t ON s." - + joinColumn + "=t.ID AND t.request_queueID=r.ID " - + "SET s.statusCode=?, s.explanation=? " + "WHERE r.ID=? AND " - + surlColumn + " IN " + makeInString(surls); - update = con.prepareStatement(auxstr); - logWarnings(con.getWarnings()); - update.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED)); - logWarnings(update.getWarnings()); - update.setString(2, "User aborted request!"); - logWarnings(update.getWarnings()); - update.setLong(3, id); - logWarnings(update.getWarnings()); - log.trace("REQUEST SUMMARY DAO - abortChunksOfInProgressRequest " - + "- {}", update); - update.executeUpdate(); - logWarnings(update.getWarnings()); - } else { - log.error("REQUEST SUMMARY DAO - Unable to complete " - + "abortChunksOfInProgressRequest: could not update file statuses " - + "because the request type could not be translated from the DB!"); - } - } - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO - abortChunksOfInProgressRequest: {}", - e.getMessage(), e); - } finally { - close(rs); - close(query); - close(update); - } - } - - /** - * Private method that returns a String of all SURLS in the collection of - * String. - */ - private String makeInString(Collection c) { - - StringBuilder sb = new StringBuilder("("); - for (Iterator i = c.iterator(); i.hasNext();) { - sb.append(i.next()); - if (i.hasNext()) { - sb.append(","); - } - } - sb.append(")"); - return sb.toString(); - } - - /** - * Method that returns the config_RequestTypeID field present in request_queue - * table, for the request with the specified request token rt. In case of any - * error, the empty String "" is returned. - */ - public String typeOf(String rt) { - - PreparedStatement query = null; - ResultSet rs = null; - String result = ""; - if (!checkConnection()) { - log.error("REQUEST SUMMARY DAO - typeOf: unable to get a valid connection!"); - return result; - } - try { - query = con - .prepareStatement("SELECT config_RequestTypeID from request_queue WHERE r_token=?"); - logWarnings(con.getWarnings()); - query.setString(1, rt); - logWarnings(query.getWarnings()); - log.trace("REQUEST SUMMARY DAO - typeOf - {}", query); - rs = query.executeQuery(); - logWarnings(query.getWarnings()); - if (rs.next()) { - result = rs.getString("config_RequestTypeID"); - } - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO - typeOf - {}", e.getMessage(), e); - } finally { - close(rs); - close(query); - } - return result; - } - - /** - * Method that returns the config_RequestTypeID field present in request_queue - * table, for the request with the specified request token rt. In case of any - * error, the empty String "" is returned. - */ - public RequestSummaryDataTO find(String rt) { - - PreparedStatement query = null; - ResultSet rs = null; - RequestSummaryDataTO to = null; - if (!checkConnection()) { - log.error("REQUEST SUMMARY DAO - find: unable to get a valid connection!"); - return null; - } - try { - query = con - .prepareStatement("SELECT * from request_queue WHERE r_token=?"); - logWarnings(con.getWarnings()); - query.setString(1, rt); - con.setAutoCommit(false); - - rs = query.executeQuery(); - logWarnings(query.getWarnings()); - if (!rs.first()) { - log.debug("No requests found with token {}", rt); - return null; - } - to = new RequestSummaryDataTO(); - to.setPrimaryKey(rs.getLong("ID")); - to.setRequestType(rs.getString("config_RequestTypeID")); - to.setClientDN(rs.getString("client_dn")); - to.setUserToken(rs.getString("u_token")); - to.setRetrytime(rs.getInt("retrytime")); - to.setPinLifetime(rs.getInt("pinLifetime")); - to.setSpaceToken(rs.getString("s_token")); - to.setStatus(rs.getInt("status")); - to.setErrstring(rs.getString("errstring")); - to.setRequestToken(rs.getString("r_token")); - to.setRemainingTotalTime(rs.getInt("remainingTotalTime")); - to.setFileLifetime(rs.getInt("fileLifetime")); - to.setNbreqfiles(rs.getInt("nbreqfiles")); - to.setNumOfCompleted(rs.getInt("numOfCompleted")); - to.setNumOfWaiting(rs.getInt("numOfWaiting")); - to.setNumOfFailed(rs.getInt("numOfFailed")); - to.setTimestamp(rs.getTimestamp("timeStamp")); - - - java.sql.Blob blob = rs.getBlob("proxy"); - if (blob != null) { - byte[] bdata = blob.getBytes(1, (int) blob.length()); - to.setVomsAttributes(new String(bdata)); - } - to.setDeferredStartTime(rs.getInt("deferredStartTime")); - to.setRemainingDeferredStartTime(rs.getInt("remainingDeferredStartTime")); - - if (rs.next()) { - log.warn("More than a row matches token {}", rt); - } - close(rs); - close(query); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO - find - {}", e.getMessage(), e); - } finally { - close(rs); - close(query); - } - return to; - } - - /** - * Method that purges expired requests: it only removes up to a fixed value of - * expired requests at a time. The value is configured and obtained from the - * configuration property getPurgeBatchSize. A List of Strings with the - * request tokens removed is returned. In order to completely remove all - * expired requests, simply keep invoking this method until an empty List is - * returned. This batch processing is needed because there could be millions - * of expired requests which are likely to result in out-of-memory problems. - * Notice that in case of errors only error messages get logged. An empty List - * is also returned. - */ - public List purgeExpiredRequests(long expiredRequestTime, int purgeSize) { - - PreparedStatement ps = null; - ResultSet rs = null; - List requestTokens = Lists.newArrayList(); - List ids = Lists.newArrayList(); - - if (!checkConnection()) { - log.error("REQUEST SUMMARY DAO - purgeExpiredRequests: unable to get a " - + "valid connection!"); - return requestTokens; - } - - try { - // start transaction - con.setAutoCommit(false); - String stmt = "SELECT ID, r_token FROM request_queue WHERE UNIX_TIMESTAMP(NOW()) - UNIX_TIMESTAMP(timeStamp) > ? AND status <> ? AND status <> ? LIMIT ?"; - ps = con.prepareStatement(stmt); - ps.setLong(1, expiredRequestTime); - ps.setInt(2, StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_QUEUED)); - ps.setInt(3, StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_INPROGRESS)); - ps.setInt(4, purgeSize); - logWarnings(con.getWarnings()); - log.trace("REQUEST SUMMARY DAO - purgeExpiredRequests - {}", ps); - - rs = ps.executeQuery(); - logWarnings(ps.getWarnings()); - - while (rs.next()) { - requestTokens.add(rs.getString("r_token")); - ids.add(new Long(rs.getLong("ID"))); - } - - close(rs); - close(ps); - - if (!ids.isEmpty()) { - // REMOVE BATCH OF EXPIRED REQUESTS! - stmt = "DELETE FROM request_queue WHERE ID in " + makeWhereString(ids); - - ps = con.prepareStatement(stmt); - logWarnings(con.getWarnings()); - log.trace("REQUEST SUMMARY DAO - purgeExpiredRequests - {}", stmt); - - int deleted = ps.executeUpdate(); - logWarnings(ps.getWarnings()); - if (deleted > 0) { - log.info("REQUEST SUMMARY DAO - purgeExpiredRequests - Deleted {} " - + "expired requests.", deleted); - } else { - log.trace("REQUEST SUMMARY DAO - purgeExpiredRequests - No deleted " - + "expired requests."); - } - - close(ps); - - stmt = "DELETE request_DirOption FROM request_DirOption " - + " LEFT JOIN request_Get ON request_DirOption.ID = request_Get.request_DirOptionID" - + " LEFT JOIN request_BoL ON request_DirOption.ID = request_BoL.request_DirOptionID " - + " LEFT JOIN request_Copy ON request_DirOption.ID = request_Copy.request_DirOptionID" - + " WHERE request_Copy.request_DirOptionID IS NULL AND" - + " request_Get.request_DirOptionID IS NULL AND" - + " request_BoL.request_DirOptionID IS NULL;"; - - ps = con.prepareStatement(stmt); - logWarnings(con.getWarnings()); - log.trace("REQUEST SUMMARY DAO - purgeExpiredRequests - {}", stmt); - deleted = ps.executeUpdate(); - logWarnings(ps.getWarnings()); - - if (deleted > 0) { - log.info("REQUEST SUMMARY DAO - purgeExpiredRequests - Deleted {} " - + "DirOption related to expired requests.", deleted); - } else { - log.trace("REQUEST SUMMARY DAO - purgeExpiredRequests - No Deleted " - + "DirOption related to expired requests."); - } - close(ps); - - } - // commit and finish transaction - con.commit(); - logWarnings(con.getWarnings()); - con.setAutoCommit(true); - logWarnings(con.getWarnings()); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO - purgeExpiredRequests - Rolling back " - + "because of error: {}", e.getMessage(), e); - rollback(con); - } finally { - close(rs); - close(ps); - } - return requestTokens; - } - - /** - * Retrieve the total number of expired requests. - * - * @return - */ - public int getNumberExpired() { - - int rowCount = 0; - - if (!checkConnection()) { - log.error("REQUEST SUMMARY DAO - getNumberExpired: unable to get a " - + "valid connection!"); - return 0; - } - - PreparedStatement ps = null; - ResultSet rs = null; - - try { - // start transaction - con.setAutoCommit(false); - - String stmt = "SELECT count(*) FROM request_queue WHERE UNIX_TIMESTAMP(NOW()) - UNIX_TIMESTAMP(timeStamp) > ? AND status <> ? AND status <> ? "; - ps = con.prepareStatement(stmt); - ps.setLong(1, Configuration.getInstance().getExpiredRequestTime()); - ps.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_QUEUED)); - ps.setInt(3, StatusCodeConverter.getInstance() - .toDB(TStatusCode.SRM_REQUEST_INPROGRESS)); - - logWarnings(con.getWarnings()); - log.trace("REQUEST SUMMARY DAO - Number of expired requests: {}", ps); - rs = ps.executeQuery(); - logWarnings(ps.getWarnings()); - - // Get the number of rows from the result set - rs.next(); - rowCount = rs.getInt(1); - log.debug("Nr of expired requests is: {}", rowCount); - - close(rs); - close(ps); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO - purgeExpiredRequests - Rolling back " - + "because of error: {}", e.getMessage(), e); - rollback(con); - } finally { - close(rs); - close(ps); - } - - return rowCount; - - } - - /** - * Private method that returns a String of all IDs retrieved by the last - * SELECT. - */ - private String makeWhereString(List rowids) { - - StringBuilder sb = new StringBuilder("("); - for (Iterator i = rowids.iterator(); i.hasNext();) { - sb.append(i.next()); - if (i.hasNext()) { - sb.append(","); - } - } - sb.append(")"); - return sb.toString(); - } - - /** - * Auxiliary method that sets up the connection to the DB, as well as the - * prepared statement. - */ - private boolean setUpConnection() { - - boolean response = false; - try { - Class.forName(driver); - con = DriverManager.getConnection(url, name, password); - logWarnings(con.getWarnings()); - response = con.isValid(0); - } catch (ClassNotFoundException | SQLException e) { - log.error("REQUEST SUMMARY DAO! Exception in setUpConnection! {}", e.getMessage(), e); - } - return response; - } - - /** - * Auxiliary method that tales down a connection to the DB. - */ - private void takeDownConnection() { - - if (con != null) { - try { - con.close(); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO! Exception in takeDownConnection " - + "method: {}", e.getMessage(), e); - } - } - } - - /** - * Auxiliary method that checks if time for resetting the connection has come, - * and eventually takes it down and up back again. - */ - private boolean checkConnection() { - - boolean response = true; - if (reconnect) { - takeDownConnection(); - response = setUpConnection(); - if (response) { - reconnect = false; - } - } - return response; - } - - /** - * Auxiliary method used to close a Statement - */ - private void close(Statement stmt) { - - if (stmt != null) { - try { - stmt.close(); - } catch (Exception e) { - log.error("REQUEST SUMMARY DAO! Unable to close Statement {} - " - + "Error: {}", stmt.toString(), e.getMessage(), e); - } - } - } - - /** - * Auxiliary method used to close a ResultSet - */ - private void close(ResultSet rset) { - - if (rset != null) { - try { - rset.close(); - } catch (Exception e) { - log.error("REQUEST SUMMARY DAO! Unable to close ResultSet! Error: {}", - e.getMessage(), e); - } - } - } - - /** - * Auxiliary method used to roll back a transaction - */ - private void rollback(Connection con) { - - if (con != null) { - try { - con.rollback(); - logWarnings(con.getWarnings()); - log.error("PICKER2: roll back successful!"); - } catch (SQLException e2) { - log.error("PICKER2: roll back failed! {}", e2.getMessage(), e2); - } - } - } - - /** - * Private auxiliary method used to log SQLWarnings. - */ - private void logWarnings(SQLWarning warning) { - - if (warning != null) { - log.debug("REQUEST SUMMARY DAO: {}", warning); - while ((warning = warning.getNextWarning()) != null) { - log.debug("REQUEST SUMMARY DAO: {}", warning); - } - } - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/RequestSummaryData.java b/src/main/java/it/grid/storm/catalogs/RequestSummaryData.java deleted file mode 100644 index 4921fe128..000000000 --- a/src/main/java/it/grid/storm/catalogs/RequestSummaryData.java +++ /dev/null @@ -1,540 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TRequestType; -import it.grid.storm.srm.types.TReturnStatus; -// import it.grid.storm.griduser.VomsGridUser; -import it.grid.storm.griduser.GridUserInterface; - -/** - * This class represents the SummaryData associated with the SRM request. It - * contains info about: Primary Key of request, TRequestType, TRequestToken, - * VomsGridUser. - * - * @author EGRID - ICTP Trieste - * @date March 18th, 2005 - * @version 4.0 - */ -public class RequestSummaryData { - - private TRequestType requestType = null; // request type of SRM request - private TRequestToken requestToken = null; // TRequestToken of SRM request - private GridUserInterface gu = null; // VomsGridUser that issued This request - private long id = -1; // long representing This object in persistence - - private String userToken = null; - private Integer retrytime = null; - private TLifeTimeInSeconds pinLifetime = null; - private String spaceToken = null; - private TReturnStatus status = null; - private String errstring = null; - private Integer remainingTotalTime = null; - private Integer nbreqfiles = null; - private Integer numOfCompleted = null; - private TLifeTimeInSeconds fileLifetime = null; - private Integer deferredStartTime = null; - private Integer numOfWaiting = null; - private Integer numOfFailed = null; - private Integer remainingDeferredStartTime = null; - - public RequestSummaryData(TRequestType rtype, TRequestToken rtoken, - GridUserInterface gu) throws InvalidRequestSummaryDataAttributesException { - - boolean ok = rtype != null && rtoken != null && gu != null; - if (!ok) - throw new InvalidRequestSummaryDataAttributesException(rtype, rtoken, gu); - this.requestType = rtype; - this.requestToken = rtoken; - this.gu = gu; - } - - /** - * Method that returns the type of SRM request - */ - public TRequestType requestType() { - - return requestType; - } - - /** - * Method that returns the SRM request TRequestToken - */ - public TRequestToken requestToken() { - - return requestToken; - } - - /** - * Method that returns the VomsGridUser that issued this request - */ - public GridUserInterface gridUser() { - - return gu; - } - - /** - * Method that returns a long corresponding to the identifier of This object - * in persistence. - */ - public long primaryKey() { - - return id; - } - - /** - * Method used to set the log corresponding to the identifier of This object - * in persistence. - */ - public void setPrimaryKey(long l) { - - this.id = l; - } - - /** - * @return the userToken - */ - public String getUserToken() { - - return userToken; - } - - /** - * @return the retrytime - */ - public Integer getRetrytime() { - - return retrytime; - } - - /** - * @return the pinLifetime - */ - public TLifeTimeInSeconds getPinLifetime() { - - return pinLifetime; - } - - /** - * @return the spaceToken - */ - public String getSpaceToken() { - - return spaceToken; - } - - /** - * @return the status - */ - public TReturnStatus getStatus() { - - return status; - } - - /** - * @return the errstring - */ - public String getErrstring() { - - return errstring; - } - - /** - * @return the remainingTotalTime - */ - public Integer getRemainingTotalTime() { - - return remainingTotalTime; - } - - /** - * @return the nbreqfiles - */ - public Integer getNbreqfiles() { - - return nbreqfiles; - } - - /** - * @return the numOfCompleted - */ - public Integer getNumOfCompleted() { - - return numOfCompleted; - } - - /** - * @return the fileLifetime - */ - public TLifeTimeInSeconds getFileLifetime() { - - return fileLifetime; - } - - /** - * @return the deferredStartTime - */ - public Integer getDeferredStartTime() { - - return deferredStartTime; - } - - /** - * @return the numOfWaiting - */ - public Integer getNumOfWaiting() { - - return numOfWaiting; - } - - /** - * @return the numOfFailed - */ - public Integer getNumOfFailed() { - - return numOfFailed; - } - - /** - * @return the remainingDeferredStartTime - */ - public Integer getRemainingDeferredStartTime() { - - return remainingDeferredStartTime; - } - - public void setUserToken(String userToken) { - - this.userToken = userToken; - } - - public void setRetrytime(Integer retrytime) { - - this.retrytime = retrytime; - - } - - public void setPinLifetime(TLifeTimeInSeconds pinLifetime) { - - this.pinLifetime = pinLifetime; - - } - - public void setSpaceToken(String spaceToken) { - - this.spaceToken = spaceToken; - - } - - public void setStatus(TReturnStatus status) { - - this.status = status; - - } - - public void setErrstring(String errstring) { - - this.errstring = errstring; - - } - - public void setRemainingTotalTime(Integer remainingTotalTime) { - - this.remainingTotalTime = remainingTotalTime; - - } - - public void setNbreqfiles(Integer nbreqfiles) { - - this.nbreqfiles = nbreqfiles; - - } - - public void setNumOfCompleted(Integer numOfCompleted) { - - this.numOfCompleted = numOfCompleted; - - } - - public void setFileLifetime(TLifeTimeInSeconds fileLifetime) { - - this.fileLifetime = fileLifetime; - - } - - public void setDeferredStartTime(Integer deferredStartTime) { - - this.deferredStartTime = deferredStartTime; - - } - - public void setNumOfWaiting(Integer numOfWaiting) { - - this.numOfWaiting = numOfWaiting; - - } - - public void setNumOfFailed(Integer numOfFailed) { - - this.numOfFailed = numOfFailed; - - } - - public void setRemainingDeferredStartTime(Integer remainingDeferredStartTime) { - - this.remainingDeferredStartTime = remainingDeferredStartTime; - - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder.append("RequestSummaryData [requestType="); - builder.append(requestType); - builder.append(", requestToken="); - builder.append(requestToken); - builder.append(", gu="); - builder.append(gu); - builder.append(", id="); - builder.append(id); - builder.append(", userToken="); - builder.append(userToken); - builder.append(", retrytime="); - builder.append(retrytime); - builder.append(", pinLifetime="); - builder.append(pinLifetime); - builder.append(", spaceToken="); - builder.append(spaceToken); - builder.append(", status="); - builder.append(status); - builder.append(", errstring="); - builder.append(errstring); - builder.append(", remainingTotalTime="); - builder.append(remainingTotalTime); - builder.append(", nbreqfiles="); - builder.append(nbreqfiles); - builder.append(", numOfCompleted="); - builder.append(numOfCompleted); - builder.append(", fileLifetime="); - builder.append(fileLifetime); - builder.append(", deferredStartTime="); - builder.append(deferredStartTime); - builder.append(", numOfWaiting="); - builder.append(numOfWaiting); - builder.append(", numOfFailed="); - builder.append(numOfFailed); - builder.append(", remainingDeferredStartTime="); - builder.append(remainingDeferredStartTime); - builder.append("]"); - return builder.toString(); - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#hashCode() - */ - @Override - public int hashCode() { - - final int prime = 31; - int result = 1; - result = prime * result - + ((deferredStartTime == null) ? 0 : deferredStartTime.hashCode()); - result = prime * result + ((errstring == null) ? 0 : errstring.hashCode()); - result = prime * result - + ((fileLifetime == null) ? 0 : fileLifetime.hashCode()); - result = prime * result + ((gu == null) ? 0 : gu.hashCode()); - result = prime * result + (int) (id ^ (id >>> 32)); - result = prime * result - + ((nbreqfiles == null) ? 0 : nbreqfiles.hashCode()); - result = prime * result - + ((numOfCompleted == null) ? 0 : numOfCompleted.hashCode()); - result = prime * result - + ((numOfFailed == null) ? 0 : numOfFailed.hashCode()); - result = prime * result - + ((numOfWaiting == null) ? 0 : numOfWaiting.hashCode()); - result = prime * result - + ((pinLifetime == null) ? 0 : pinLifetime.hashCode()); - result = prime - * result - + ((remainingDeferredStartTime == null) ? 0 : remainingDeferredStartTime - .hashCode()); - result = prime * result - + ((remainingTotalTime == null) ? 0 : remainingTotalTime.hashCode()); - result = prime * result - + ((requestToken == null) ? 0 : requestToken.hashCode()); - result = prime * result - + ((requestType == null) ? 0 : requestType.hashCode()); - result = prime * result + ((retrytime == null) ? 0 : retrytime.hashCode()); - result = prime * result - + ((spaceToken == null) ? 0 : spaceToken.hashCode()); - result = prime * result + ((status == null) ? 0 : status.hashCode()); - result = prime * result + ((userToken == null) ? 0 : userToken.hashCode()); - return result; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#equals(java.lang.Object) - */ - @Override - public boolean equals(Object obj) { - - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - RequestSummaryData other = (RequestSummaryData) obj; - if (deferredStartTime == null) { - if (other.deferredStartTime != null) { - return false; - } - } else if (!deferredStartTime.equals(other.deferredStartTime)) { - return false; - } - if (errstring == null) { - if (other.errstring != null) { - return false; - } - } else if (!errstring.equals(other.errstring)) { - return false; - } - if (fileLifetime == null) { - if (other.fileLifetime != null) { - return false; - } - } else if (!fileLifetime.equals(other.fileLifetime)) { - return false; - } - if (gu == null) { - if (other.gu != null) { - return false; - } - } else if (!gu.equals(other.gu)) { - return false; - } - if (id != other.id) { - return false; - } - if (nbreqfiles == null) { - if (other.nbreqfiles != null) { - return false; - } - } else if (!nbreqfiles.equals(other.nbreqfiles)) { - return false; - } - if (numOfCompleted == null) { - if (other.numOfCompleted != null) { - return false; - } - } else if (!numOfCompleted.equals(other.numOfCompleted)) { - return false; - } - if (numOfFailed == null) { - if (other.numOfFailed != null) { - return false; - } - } else if (!numOfFailed.equals(other.numOfFailed)) { - return false; - } - if (numOfWaiting == null) { - if (other.numOfWaiting != null) { - return false; - } - } else if (!numOfWaiting.equals(other.numOfWaiting)) { - return false; - } - if (pinLifetime == null) { - if (other.pinLifetime != null) { - return false; - } - } else if (!pinLifetime.equals(other.pinLifetime)) { - return false; - } - if (remainingDeferredStartTime == null) { - if (other.remainingDeferredStartTime != null) { - return false; - } - } else if (!remainingDeferredStartTime - .equals(other.remainingDeferredStartTime)) { - return false; - } - if (remainingTotalTime == null) { - if (other.remainingTotalTime != null) { - return false; - } - } else if (!remainingTotalTime.equals(other.remainingTotalTime)) { - return false; - } - if (requestToken == null) { - if (other.requestToken != null) { - return false; - } - } else if (!requestToken.equals(other.requestToken)) { - return false; - } - if (requestType != other.requestType) { - return false; - } - if (retrytime == null) { - if (other.retrytime != null) { - return false; - } - } else if (!retrytime.equals(other.retrytime)) { - return false; - } - if (spaceToken == null) { - if (other.spaceToken != null) { - return false; - } - } else if (!spaceToken.equals(other.spaceToken)) { - return false; - } - if (status == null) { - if (other.status != null) { - return false; - } - } else if (!status.equals(other.status)) { - return false; - } - if (userToken == null) { - if (other.userToken != null) { - return false; - } - } else if (!userToken.equals(other.userToken)) { - return false; - } - return true; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/RequestSummaryDataTO.java b/src/main/java/it/grid/storm/catalogs/RequestSummaryDataTO.java deleted file mode 100644 index 76dabb7bb..000000000 --- a/src/main/java/it/grid/storm/catalogs/RequestSummaryDataTO.java +++ /dev/null @@ -1,540 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import java.sql.Timestamp; - -/** - * Class that represents data of an asynchrnous Request, regardless of whether - * it is a Put, Get or Copy, in the Persistence Layer: this is all raw data - * referring to the request proper, that is, String and primitive types. - * - * @author EGRID ICTP - * @version 2.0 - * @date June 2005 - */ -public class RequestSummaryDataTO { - - public static final String PTG_REQUEST_TYPE = "PTG"; - public static final String PTP_REQUEST_TYPE = "PTP"; - public static final String BOL_REQUEST_TYPE = "BOL"; - public static final String COPY_REQUEST_TYPE = "COP"; - - private long id = -1; // id of request in persistence - private String requestType = ""; // request type - private String requestToken = ""; // request token - private String clientDN = ""; // DN that issued request - private String vomsAttributes = ""; // String containing all VOMS attributes - private Timestamp timestamp = null; - - private boolean empty = true; - private String userToken = null; - private Integer retrytime = null; - private Integer pinLifetime = null; - private String spaceToken = null; - private Integer status = null; - private String errstring = null; - private Integer remainingTotalTime = null; - private Integer nbreqfiles = null; - private Integer numOfCompleted = null; - private Integer fileLifetime = null; - private Integer deferredStartTime = null; - private Integer numOfWaiting = null; - private Integer numOfFailed = null; - private Integer remainingDeferredStartTime = null; - - public boolean isEmpty() { - - return empty; - } - - public long primaryKey() { - - return id; - } - - public void setPrimaryKey(long l) { - - empty = false; - id = l; - } - - public String requestType() { - - return requestType; - } - - public void setRequestType(String s) { - - empty = false; - requestType = s; - } - - public String requestToken() { - - return requestToken; - } - - public void setRequestToken(String s) { - - empty = false; - requestToken = s; - } - - public String clientDN() { - - return clientDN; - } - - public void setClientDN(String s) { - - empty = false; - clientDN = s; - } - - public String vomsAttributes() { - - return vomsAttributes; - } - - public void setVomsAttributes(String s) { - - empty = false; - vomsAttributes = s; - } - - public Timestamp timestamp() { - - return timestamp; - } - - public void setTimestamp(Timestamp timestamp) { - - empty = false; - this.timestamp = timestamp; - } - - /** - * @return the userToken - */ - public String getUserToken() { - - return userToken; - } - - /** - * @return the retrytime - */ - public Integer getRetrytime() { - - return retrytime; - } - - /** - * @return the pinLifetime - */ - public Integer getPinLifetime() { - - return pinLifetime; - } - - /** - * @return the spaceToken - */ - public String getSpaceToken() { - - return spaceToken; - } - - /** - * @return the status - */ - public Integer getStatus() { - - return status; - } - - /** - * @return the errstring - */ - public String getErrstring() { - - return errstring; - } - - /** - * @return the remainingTotalTime - */ - public Integer getRemainingTotalTime() { - - return remainingTotalTime; - } - - /** - * @return the nbreqfiles - */ - public Integer getNbreqfiles() { - - return nbreqfiles; - } - - /** - * @return the numOfCompleted - */ - public Integer getNumOfCompleted() { - - return numOfCompleted; - } - - /** - * @return the fileLifetime - */ - public Integer getFileLifetime() { - - return fileLifetime; - } - - /** - * @return the deferredStartTime - */ - public Integer getDeferredStartTime() { - - return deferredStartTime; - } - - /** - * @return the numOfWaiting - */ - public Integer getNumOfWaiting() { - - return numOfWaiting; - } - - /** - * @return the numOfFailed - */ - public Integer getNumOfFailed() { - - return numOfFailed; - } - - /** - * @return the remainingDeferredStartTime - */ - public Integer getRemainingDeferredStartTime() { - - return remainingDeferredStartTime; - } - - public void setUserToken(String userToken) { - - this.userToken = userToken; - } - - public void setRetrytime(Integer retrytime) { - - this.retrytime = retrytime; - - } - - public void setPinLifetime(Integer pinLifetime) { - - this.pinLifetime = pinLifetime; - - } - - public void setSpaceToken(String spaceToken) { - - this.spaceToken = spaceToken; - - } - - public void setStatus(Integer status) { - - this.status = status; - - } - - public void setErrstring(String errstring) { - - this.errstring = errstring; - - } - - public void setRemainingTotalTime(Integer remainingTotalTime) { - - this.remainingTotalTime = remainingTotalTime; - - } - - public void setNbreqfiles(Integer nbreqfiles) { - - this.nbreqfiles = nbreqfiles; - - } - - public void setNumOfCompleted(Integer numOfCompleted) { - - this.numOfCompleted = numOfCompleted; - - } - - public void setFileLifetime(Integer fileLifetime) { - - this.fileLifetime = fileLifetime; - - } - - public void setDeferredStartTime(Integer deferredStartTime) { - - this.deferredStartTime = deferredStartTime; - - } - - public void setNumOfWaiting(Integer numOfWaiting) { - - this.numOfWaiting = numOfWaiting; - - } - - public void setNumOfFailed(Integer numOfFailed) { - - this.numOfFailed = numOfFailed; - - } - - public void setRemainingDeferredStartTime(Integer remainingDeferredStartTime) { - - this.remainingDeferredStartTime = remainingDeferredStartTime; - - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder.append("RequestSummaryDataTO [id="); - builder.append(id); - builder.append(", requestType="); - builder.append(requestType); - builder.append(", requestToken="); - builder.append(requestToken); - builder.append(", clientDN="); - builder.append(clientDN); - builder.append(", vomsAttributes="); - builder.append(vomsAttributes); - builder.append(", timestamp="); - builder.append(timestamp); - builder.append(", empty="); - builder.append(empty); - builder.append(", userToken="); - builder.append(userToken); - builder.append(", retrytime="); - builder.append(retrytime); - builder.append(", pinLifetime="); - builder.append(pinLifetime); - builder.append(", spaceToken="); - builder.append(spaceToken); - builder.append(", status="); - builder.append(status); - builder.append(", errstring="); - builder.append(errstring); - builder.append(", remainingTotalTime="); - builder.append(remainingTotalTime); - builder.append(", nbreqfiles="); - builder.append(nbreqfiles); - builder.append(", numOfCompleted="); - builder.append(numOfCompleted); - builder.append(", fileLifetime="); - builder.append(fileLifetime); - builder.append(", deferredStartTime="); - builder.append(deferredStartTime); - builder.append(", numOfWaiting="); - builder.append(numOfWaiting); - builder.append(", numOfFailed="); - builder.append(numOfFailed); - builder.append(", remainingDeferredStartTime="); - builder.append(remainingDeferredStartTime); - builder.append("]"); - return builder.toString(); - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#hashCode() - */ - @Override - public int hashCode() { - - final int prime = 31; - int result = 1; - result = prime * result + ((clientDN == null) ? 0 : clientDN.hashCode()); - result = prime * result - + (int) (deferredStartTime ^ (deferredStartTime >>> 32)); - result = prime * result + (empty ? 1231 : 1237); - result = prime * result + ((errstring == null) ? 0 : errstring.hashCode()); - result = prime * result + (int) (fileLifetime ^ (fileLifetime >>> 32)); - result = prime * result + (int) (id ^ (id >>> 32)); - result = prime * result + (int) (nbreqfiles ^ (nbreqfiles >>> 32)); - result = prime * result + (int) (numOfCompleted ^ (numOfCompleted >>> 32)); - result = prime * result + (int) (numOfFailed ^ (numOfFailed >>> 32)); - result = prime * result + (int) (numOfWaiting ^ (numOfWaiting >>> 32)); - result = prime * result + (int) (pinLifetime ^ (pinLifetime >>> 32)); - result = prime - * result - + (int) (remainingDeferredStartTime ^ (remainingDeferredStartTime >>> 32)); - result = prime * result - + (int) (remainingTotalTime ^ (remainingTotalTime >>> 32)); - result = prime * result - + ((requestToken == null) ? 0 : requestToken.hashCode()); - result = prime * result - + ((requestType == null) ? 0 : requestType.hashCode()); - result = prime * result + (int) (retrytime ^ (retrytime >>> 32)); - result = prime * result - + ((spaceToken == null) ? 0 : spaceToken.hashCode()); - result = prime * result + (int) (status ^ (status >>> 32)); - result = prime * result + ((timestamp == null) ? 0 : timestamp.hashCode()); - result = prime * result + ((userToken == null) ? 0 : userToken.hashCode()); - result = prime * result - + ((vomsAttributes == null) ? 0 : vomsAttributes.hashCode()); - return result; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#equals(java.lang.Object) - */ - @Override - public boolean equals(Object obj) { - - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - RequestSummaryDataTO other = (RequestSummaryDataTO) obj; - if (clientDN == null) { - if (other.clientDN != null) { - return false; - } - } else if (!clientDN.equals(other.clientDN)) { - return false; - } - if (deferredStartTime != other.deferredStartTime) { - return false; - } - if (empty != other.empty) { - return false; - } - if (errstring == null) { - if (other.errstring != null) { - return false; - } - } else if (!errstring.equals(other.errstring)) { - return false; - } - if (fileLifetime != other.fileLifetime) { - return false; - } - if (id != other.id) { - return false; - } - if (nbreqfiles != other.nbreqfiles) { - return false; - } - if (numOfCompleted != other.numOfCompleted) { - return false; - } - if (numOfFailed != other.numOfFailed) { - return false; - } - if (numOfWaiting != other.numOfWaiting) { - return false; - } - if (pinLifetime != other.pinLifetime) { - return false; - } - if (remainingDeferredStartTime != other.remainingDeferredStartTime) { - return false; - } - if (remainingTotalTime != other.remainingTotalTime) { - return false; - } - if (requestToken == null) { - if (other.requestToken != null) { - return false; - } - } else if (!requestToken.equals(other.requestToken)) { - return false; - } - if (requestType == null) { - if (other.requestType != null) { - return false; - } - } else if (!requestType.equals(other.requestType)) { - return false; - } - if (retrytime != other.retrytime) { - return false; - } - if (spaceToken == null) { - if (other.spaceToken != null) { - return false; - } - } else if (!spaceToken.equals(other.spaceToken)) { - return false; - } - if (status != other.status) { - return false; - } - if (timestamp == null) { - if (other.timestamp != null) { - return false; - } - } else if (!timestamp.equals(other.timestamp)) { - return false; - } - if (userToken == null) { - if (other.userToken != null) { - return false; - } - } else if (!userToken.equals(other.userToken)) { - return false; - } - if (vomsAttributes == null) { - if (other.vomsAttributes != null) { - return false; - } - } else if (!vomsAttributes.equals(other.vomsAttributes)) { - return false; - } - return true; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/ReservedSpaceCatalog.java b/src/main/java/it/grid/storm/catalogs/ReservedSpaceCatalog.java index 675dc182f..eebecee4b 100644 --- a/src/main/java/it/grid/storm/catalogs/ReservedSpaceCatalog.java +++ b/src/main/java/it/grid/storm/catalogs/ReservedSpaceCatalog.java @@ -21,18 +21,6 @@ package it.grid.storm.catalogs; -import it.grid.storm.griduser.GridUserInterface; -import it.grid.storm.persistence.DAOFactory; -import it.grid.storm.persistence.PersistenceDirector; -import it.grid.storm.persistence.dao.StorageSpaceDAO; -import it.grid.storm.persistence.exceptions.DataAccessException; -import it.grid.storm.persistence.model.StorageSpaceTO; -import it.grid.storm.persistence.model.TransferObjectDecodingException; -import it.grid.storm.space.StorageSpaceData; -import it.grid.storm.srm.types.ArrayOfTSpaceToken; -import it.grid.storm.srm.types.InvalidTSpaceTokenAttributesException; -import it.grid.storm.srm.types.TSpaceToken; - import java.io.File; import java.util.Calendar; import java.util.Collection; @@ -40,7 +28,6 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; -import java.util.LinkedList; import java.util.List; import org.slf4j.Logger; @@ -48,664 +35,443 @@ import com.google.common.collect.Lists; +import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.dao.StorageSpaceDAO; +import it.grid.storm.persistence.exceptions.DataAccessException; +import it.grid.storm.persistence.impl.mysql.StorageSpaceDAOMySql; +import it.grid.storm.persistence.model.StorageSpaceTO; +import it.grid.storm.persistence.model.TransferObjectDecodingException; +import it.grid.storm.space.StorageSpaceData; +import it.grid.storm.srm.types.ArrayOfTSpaceToken; +import it.grid.storm.srm.types.InvalidTSpaceTokenAttributesException; +import it.grid.storm.srm.types.TSpaceToken; + /** * */ public class ReservedSpaceCatalog { - private static final Logger log = LoggerFactory - .getLogger(ReservedSpaceCatalog.class); - private static HashSet voSA_spaceTokenSet = new HashSet(); - private static HashMap voSA_UpdateTime = new HashMap(); - - private static final long NOT_INITIALIZED_SIZE_VALUE = -1L; - - private final DAOFactory daoFactory; - private StorageSpaceDAO ssDAO; - - /********************************************* - * STATIC METHODS - *********************************************/ - public static void addSpaceToken(TSpaceToken token) { - - voSA_spaceTokenSet.add(token); - voSA_UpdateTime.put(token, null); - } - - public static HashSet getTokenSet() { - - return voSA_spaceTokenSet; - } - - public static void clearTokenSet() { - - voSA_spaceTokenSet.clear(); - voSA_UpdateTime.clear(); - } - - public static void setUpdateTime(TSpaceToken token, Date updateTime) { - - if (voSA_UpdateTime.containsKey(token)) { - voSA_UpdateTime.put(token, updateTime); - } else { - log.warn("Failing while Trying to set update time in Catalog cache."); - } - } - - public static Date getUpdateTime(TSpaceToken token) { - - Date result = null; - if (voSA_UpdateTime.containsKey(token)) { - result = voSA_UpdateTime.get(token); - } else { - log.warn("Failing while Trying to set update time in Catalog cache."); - } - return result; - } - - /********************************************* - * CLASS METHODS - *********************************************/ - /** - * Default constructor - */ - public ReservedSpaceCatalog() { - - log.debug("Building Reserve Space Catalog..."); - // Binding to the persistence component - daoFactory = PersistenceDirector.getDAOFactory(); - } - - /** - * Basic method used to retrieve all the information about a StorageSpace - - * StorageSpace is selected by SpaceToken - * - * @param spaceToken - * TSpaceToken - * @return StorageSpaceData, null if no-one SS exists with the specified - * spaceToken - * @throws DataAccessException - */ - public StorageSpaceData getStorageSpace(TSpaceToken spaceToken) - throws TransferObjectDecodingException, DataAccessException { - - StorageSpaceData result = null; - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - StorageSpaceTO ssTO = ssDAO.getStorageSpaceByToken(spaceToken.getValue()); - log.debug("Storage Space retrieved by Token. "); - if (ssTO != null) { - try { - result = new StorageSpaceData(ssTO); - } catch (IllegalArgumentException e) { - log.error("Error building StorageSpaceData from StorageSpaceTO " - + "IllegalArgumentException: {}", e.getLocalizedMessage(), e); - throw new TransferObjectDecodingException( - "Unable to build StorageSpaceData from StorageSpaceTO"); - } - } else { - log.info("Unable to build StorageSpaceData. No StorageSpaceTO built " - + "from the DB"); - } - return result; - } - - /** - * Create a new StorageSpace entry into the DB. It is used for - STATIC Space - * Creation - DYNAMIC Space Reservation - * - * @param ssd - * @throws NoDataFoundException - * @throws InvalidRetrievedDataException - * @throws MultipleDataEntriesException - */ - public void addStorageSpace(StorageSpaceData ssd) throws DataAccessException { - - log.debug("ADD StorageSpace Start..."); - StorageSpaceTO ssTO = new StorageSpaceTO(ssd); - log.debug("Storage Space TO Created"); - ssTO.setUpdateTime(new Date()); - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - ssDAO.addStorageSpace(ssTO); - log.debug("StorageSpaceTO inserted in Persistence"); - } - - /** - * Update all the fields apart from the alias of a storage space row given the - * input StorageSpaceData - * - * @param ssd - * - * @throws DataAccessException - */ - public void updateStorageSpace(StorageSpaceData ssd) throws DataAccessException { - - updateStorageSpace(ssd, null); - } - - /** - * @param ssd - * @param updateTime - * - * @throws DataAccessException - */ - public void updateStorageSpace(StorageSpaceData ssd, Date updateTime) throws DataAccessException { - - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - - StorageSpaceTO ssTO = new StorageSpaceTO(ssd); - updateTime = updateTime == null ? new Date() : updateTime; - ssTO.setUpdateTime(updateTime); - - ssDAO.updateStorageSpace(ssTO); - log.debug("StorageSpaceTO updated in Persistence"); - } - - /** - * @param ssd - */ - public void updateStorageSpaceFreeSpace(StorageSpaceData ssd) - throws DataAccessException { - - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - StorageSpaceTO ssTO = new StorageSpaceTO(ssd); - log.debug("Storage Space TO Created"); - ssTO.setUpdateTime(new Date()); - ssDAO.updateStorageSpaceFreeSpace(ssTO); - log.debug("StorageSpaceTO updated in Persistence"); - - } - - /** - * @param ssd - * @throws NoDataFoundException - * @throws InvalidRetrievedDataException - * @throws MultipleDataEntriesException - */ - public void updateAllStorageSpace(StorageSpaceData ssd) - throws NoDataFoundException, InvalidRetrievedDataException, - MultipleDataEntriesException { - - updateAllStorageSpace(ssd, null); - } - - /** - * Update StorageSpace. This method is used to update the StorageSpace into - * the ReserveSpace Catalog. The update operation take place after a - * AbortRequest for a PrepareToPut operation done with the spaceToken.(With or - * without the size specified). - */ - - public void updateAllStorageSpace(StorageSpaceData ssd, Date updateTime) - throws NoDataFoundException, InvalidRetrievedDataException, - MultipleDataEntriesException { - - log.debug("UPDATE StorageSpace Start..."); - // Build StorageSpaceTO from SpaceData - StorageSpaceTO ssTO = new StorageSpaceTO(ssd); - log.debug("Storage Space TO Created"); - if (updateTime == null) { - // The update time of the information is now - ssTO.setUpdateTime(new Date()); - } else { - ssTO.setUpdateTime(updateTime); - } - - // Retrieve the Data Access Object from the factory - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpaceDAO: {}", daEx.getMessage(), - daEx); - } - - // Add the row to the persistence.. - try { - ssDAO.updateAllStorageSpace(ssTO); - log.debug("StorageSpaceTO updated in Persistence"); - } catch (DataAccessException daEx) { - log.error("Error while inserting new row in StorageSpace: {}", - daEx.getMessage(), daEx); - } - } - - /** - * @param desc - * @return - */ - public StorageSpaceData getStorageSpaceByAlias(String desc) { - - StorageSpaceData result = null; // new StorageSpaceData(); - log.debug("Retrieve Storage Space start... "); - - // Retrieve the Data Access Object from the factory - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.debug("Error while retrieving StorageSpaceDAO: {}", daEx.getMessage(), - daEx); - } - - // Get StorageSpaceTO form persistence - try { - Collection cl = ssDAO.getStorageSpaceByAliasOnly(desc); - if (cl != null && !cl.isEmpty()) { - log.debug("Storage Space retrieved by Token. "); - // Build the result - try { - result = new StorageSpaceData(cl.toArray(new StorageSpaceTO[0])[0]); - } catch (IllegalArgumentException e) { - log.error("unable to build StorageSpaceData from StorageSpaceTO " - + "IllegalArgumentException: {}", e.getMessage(), e); - } - } - } catch (DataAccessException daEx) { - log.debug("Error while retrieving StorageSpace: {}", daEx.getMessage(), - daEx); - } - - return result; - } - - /** - * Provides a list of storage spaces not initialized by comparing the used - * space stored against the well know not initialized value - * NOT_INITIALIZED_SIZE_VALUE - * - * @return SpaceData - */ - public List getStorageSpaceNotInitialized() { - - log.debug("Retrieve Storage Space not initialized start "); - List result = Lists.newLinkedList(); - // Retrieve the Data Access Object from the factory - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.debug("Error while retrieving StorageSpaceDAO: {}", daEx.getMessage(), - daEx); - } - // Get StorageSpaceTO form persistence - try { - Collection storagesSpaceTOCollection = ssDAO - .getStorageSpaceByUnavailableUsedSpace(NOT_INITIALIZED_SIZE_VALUE); - log.debug("Storage Space retrieved by not initialized used space. "); - for (StorageSpaceTO storagesSpaceTO : storagesSpaceTOCollection) { - if (storagesSpaceTO != null) { - try { - result.add(new StorageSpaceData(storagesSpaceTO)); - } catch (IllegalArgumentException e) { - log.error("unable to build StorageSpaceData from StorageSpaceTO. " - + "IllegalArgumentException: {}", e.getMessage(), e); - } - } else { - log.warn("Received a collection of StorageSpaceTO containing null " - + "elements, skipping them"); - } - } - } catch (DataAccessException daEx) { - log.debug("Error while retrieving StorageSpace", daEx); - } - return result; - } - - /** - * Provides a list of storage spaces not updated since the provided timestamp - * - * @param lastUpdateTimestamp - * @return - */ - - public List getStorageSpaceByLastUpdate( - Date lastUpdateTimestamp) { - - log.debug("Retrieve Storage Space not initialized start "); - LinkedList result = new LinkedList(); - // Retrieve the Data Access Object from the factory - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.debug("Error while retrieving StorageSpaceDAO: {}", daEx.getMessage(), - daEx); - } - // GetStorageSpaceTO form persistence - try { - Collection storagesSpaceTOCollection = ssDAO - .getStorageSpaceByPreviousLastUpdate(lastUpdateTimestamp); - log.debug("Storage Space retrieved by Token previous last update. "); - for (StorageSpaceTO storagesSpaceTO : storagesSpaceTOCollection) { - if (storagesSpaceTO != null) { - try { - result.add(new StorageSpaceData(storagesSpaceTO)); - } catch (IllegalArgumentException e) { - log.error("unable to build StorageSpaceData from StorageSpaceTO " - + "IllegalArgumentException: {}", e.getMessage(), e); - } - } else { - log.warn("Received a collection of StorageSpaceTO containing null " - + "elements, skipping them"); - } - } - } catch (DataAccessException daEx) { - log.debug("Error while retrieving StorageSpace: {}", daEx.getMessage(), - daEx); - } - return result; - } - - /** - * - * @param user - * VomsGridUser - * @param spaceAlias - * String - * @return ArrayOfTSpaceToken - */ - public ArrayOfTSpaceToken getSpaceTokens(GridUserInterface user, - String spaceAlias) { - - ArrayOfTSpaceToken result = new ArrayOfTSpaceToken(); - - log.debug("Retrieving space tokens..."); - - // Retrieve the Data Access Object from the factory - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpaceDAO: {}", daEx.getMessage(), - daEx); - } - - // Get StorageSpaceTO form persistence - try { - - Collection listOfStorageSpace = ssDAO.getStorageSpaceByOwner(user, - spaceAlias); - - int nItems = listOfStorageSpace.size(); - log.debug("getSpaceTokens : Number of Storage spaces retrieved with " - + "Alias '{}': {}", spaceAlias, nItems); - Iterator j_ssTO = listOfStorageSpace.iterator(); - - while (j_ssTO.hasNext()) { - StorageSpaceTO ssTO = (StorageSpaceTO) j_ssTO.next(); - try { - TSpaceToken spaceToken = TSpaceToken.make(ssTO.getSpaceToken()); - result.addTSpaceToken(spaceToken); - } catch (InvalidTSpaceTokenAttributesException ex2) { - log.error("Retrieved invalid Space token from DB"); - } - } - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpace: {}", daEx.getMessage(), - daEx); - } catch (Exception e) { - log.error("Exception while retrieving Storage Space: {}", e.getMessage(), - e); - } - return result; - } - - /** - * This method is used for the VOspaceArea Check. - * - * @param spaceAlias - * @return - */ - - public ArrayOfTSpaceToken getSpaceTokensByAlias(String spaceAlias) { - - ArrayOfTSpaceToken result = new ArrayOfTSpaceToken(); - - log.debug("Retrieving space tokens..."); - - // Retrieve the Data Access Object from the factory - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpaceDAO: {}", daEx.getMessage(), - daEx); - } - - // Get StorageSpaceTO form persistence - try { - Collection listOfStorageSpace = ssDAO - .getStorageSpaceByAliasOnly(spaceAlias); - - int nItems = listOfStorageSpace.size(); - log.debug("Number of Storage spaces retrieved: {}", nItems); - Iterator j_ssTO = listOfStorageSpace.iterator(); - - while (j_ssTO.hasNext()) { - StorageSpaceTO ssTO = (StorageSpaceTO) j_ssTO.next(); - try { - TSpaceToken spaceToken = TSpaceToken.make(ssTO.getSpaceToken()); - result.addTSpaceToken(spaceToken); - } catch (InvalidTSpaceTokenAttributesException ex2) { - log.error("Retrieved invalid Space token from DB"); - } - } - - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpace: {}", daEx.getMessage(), - daEx); - } catch (Exception e) { - log.error("Error getting data! Error: {}", e.getMessage(), e); - } - return result; - } - - /** - * This method is used for the VOspaceArea Check. - * - * @param VOname - * @return - */ - - public ArrayOfTSpaceToken getSpaceTokensBySpaceType(String stype) { - - ArrayOfTSpaceToken result = new ArrayOfTSpaceToken(); - - log.debug("Retrieving space tokens..."); - - // Retrieve the Data Access Object from the factory - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpaceDAO: {}", daEx.getMessage(), - daEx); - } - - // Get StorageSpaceTO form persistence - try { - Collection listOfStorageSpace = ssDAO.getStorageSpaceBySpaceType(stype); - - int nItems = listOfStorageSpace.size(); - log.debug("Number of Storage spaces retrieved: {}", nItems); - Iterator j_ssTO = listOfStorageSpace.iterator(); - - while (j_ssTO.hasNext()) { - StorageSpaceTO ssTO = (StorageSpaceTO) j_ssTO.next(); - try { - TSpaceToken spaceToken = TSpaceToken.make(ssTO.getSpaceToken()); - result.addTSpaceToken(spaceToken); - } catch (InvalidTSpaceTokenAttributesException ex2) { - log.error("Retrieved invalid Space token from DB"); - } - } - - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpace: {}", daEx.getMessage(), daEx); - } catch (Exception e) { - log.error("Generic Error while retrieving StorageSpace: {}", e.getMessage(), e); - } - return result; - } - - // ************************ CHECH BELOW METHODS *************************** - - /** - * - * @param user - * GridUserInterface - * @param spaceToken - * TSpaceToken - * @return boolean - */ - public boolean release(GridUserInterface user, final TSpaceToken spaceToken) { - - log.debug("Delete storage spaceToken info from persistence: {}", spaceToken); - - // Retrieve the Data Access Object from the factory - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpaceDAO: {}", - daEx.getMessage(), daEx); - } - boolean rowRemoved = true; - // Delete the row from persistence. - try { - ssDAO.removeStorageSpace(user, spaceToken.getValue()); - log.debug("spaceToken removed from DB."); - } catch (DataAccessException daEx) { - log.error("spaceToken not found in the DB: {}", spaceToken.getValue()); - rowRemoved = false; - } - return rowRemoved; - } - - /** - * Method that purges the catalog, removing expired space reservation. The - * spacefile with lifetime expired are removed from the file systems. - * - */ - public void purge() { - - log.debug("Space Garbage Collector start!"); - Calendar rightNow = Calendar.getInstance(); - - // Retrieve the Data Access Object from the factory - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpaceDAO: {}", daEx.getMessage(), - daEx); - } - // Get the Collection of Space Resrvation Expired - Collection expiredSpaceTO; - try { - expiredSpaceTO = ssDAO.getExpired(rightNow.getTimeInMillis() / 1000); - } catch (DataAccessException e) { - // No space expired FOUND - log.debug("Space Garbage Collector: no space expired found."); - return; - } - - // For each entry expired - // 1) Delete the related space file - // 2) Remove the entry from the DB - - StorageSpaceTO spaceTO = null; - log.debug("Space Garbage Collector: Number of SpaceFile to remove {}.", - expiredSpaceTO.size()); - - for (Iterator i = expiredSpaceTO.iterator(); i.hasNext();) { - spaceTO = (StorageSpaceTO) i.next(); - // Deleteing space File - String spaceFileName = spaceTO.getSpaceFile(); - File sfile = new File(spaceFileName); - log.debug("Space Garbage Collector: SpaceFile to remove {}.", spaceFileName); - - if (sfile.delete()) { - log.debug("Space Garbage Collector: SpaceFile {} removed.", spaceFileName); - } else { - log.warn("Space Garbage Collector: problem removing {}", spaceFileName); - } - - // Removing space entry from the DB - try { - ssDAO.removeStorageSpace(spaceTO.getSpaceToken()); - } catch (DataAccessException e) { - log.warn("Space Garbage Collector: error removing space entry from catalog."); - } - - } - - } - - public boolean increaseUsedSpace(String spaceToken, Long usedSpaceToAdd) { - - log.debug("Increase {} the used space of storage spaceToken: {}", - usedSpaceToAdd, spaceToken); - - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpaceDAO: {}", - daEx.getMessage(), daEx); - return false; - } - int n = 0; - try { - n = ssDAO.increaseUsedSpace(spaceToken, usedSpaceToAdd); - } catch (DataAccessException daEx) { - log.error( - "Error during the increase of used space for spaceToken {}: {}", - spaceToken, daEx.getMessage()); - return false; - } - if (n == 0) { - log.warn( - "No errors caught but it seems no used space updates done on space token {}", - spaceToken); - } - log.debug("{} increaseUsedSpace += {}", spaceToken, usedSpaceToAdd); - return n > 0; - } - - public boolean decreaseUsedSpace(String spaceToken, Long usedSpaceToRemove) { - - log.debug("Decrease {} the used space of storage spaceToken: {}", - usedSpaceToRemove, spaceToken); - - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpaceDAO: {}", - daEx.getMessage(), daEx); - return false; - } - int n = 0; - try { - n = ssDAO.decreaseUsedSpace(spaceToken, usedSpaceToRemove); - } catch (DataAccessException daEx) { - log.error( - "Error during the decrease of used space for spaceToken {}: {}", - spaceToken, daEx.getMessage()); - return false; - } - if (n == 0) { - log.warn( - "No errors caught but it seems no used space updates done on space token {}", - spaceToken); - } - log.debug("{} decreaseUsedSpace -= {}", spaceToken, usedSpaceToRemove); - return n > 0; - } + private static final Logger log = LoggerFactory.getLogger(ReservedSpaceCatalog.class); + + private static HashSet voSA_spaceTokenSet = new HashSet(); + private static HashMap voSA_UpdateTime = new HashMap(); + + private static final long NOT_INITIALIZED_SIZE_VALUE = -1L; + + private static ReservedSpaceCatalog instance; + + public static synchronized ReservedSpaceCatalog getInstance() { + if (instance == null) { + instance = new ReservedSpaceCatalog(); + } + return instance; + } + + private StorageSpaceDAO ssDAO; + + private ReservedSpaceCatalog() { + + log.debug("Building Reserve Space Catalog..."); + ssDAO = StorageSpaceDAOMySql.getInstance(); + } + + /********************************************* + * STATIC METHODS + *********************************************/ + public static void addSpaceToken(TSpaceToken token) { + + voSA_spaceTokenSet.add(token); + voSA_UpdateTime.put(token, null); + } + + public static HashSet getTokenSet() { + + return voSA_spaceTokenSet; + } + + public static void clearTokenSet() { + + voSA_spaceTokenSet.clear(); + voSA_UpdateTime.clear(); + } + + /** + * Basic method used to retrieve all the information about a StorageSpace - StorageSpace is + * selected by SpaceToken + * + * @param spaceToken TSpaceToken + * @return StorageSpaceData, null if no-one SS exists with the specified spaceToken + * @throws DataAccessException + */ + public StorageSpaceData getStorageSpace(TSpaceToken spaceToken) + throws TransferObjectDecodingException, DataAccessException { + + StorageSpaceData result = null; + ssDAO = StorageSpaceDAOMySql.getInstance(); + log.debug("Storage Space DAO retrieved."); + StorageSpaceTO ssTO = ssDAO.getStorageSpaceByToken(spaceToken.getValue()); + log.debug("Storage Space retrieved by Token. "); + if (ssTO != null) { + try { + result = new StorageSpaceData(ssTO); + } catch (IllegalArgumentException e) { + log.error( + "Error building StorageSpaceData from StorageSpaceTO " + "IllegalArgumentException: {}", + e.getLocalizedMessage(), e); + throw new TransferObjectDecodingException( + "Unable to build StorageSpaceData from StorageSpaceTO"); + } + } else { + log.info("Unable to build StorageSpaceData. No StorageSpaceTO built " + "from the DB"); + } + return result; + } + + /** + * Create a new StorageSpace entry into the DB. It is used for - STATIC Space Creation - DYNAMIC + * Space Reservation + * + * @param ssd + * @throws NoDataFoundException + * @throws InvalidRetrievedDataException + * @throws MultipleDataEntriesException + */ + public void addStorageSpace(StorageSpaceData ssd) throws DataAccessException { + + log.debug("ADD StorageSpace Start..."); + StorageSpaceTO ssTO = new StorageSpaceTO(ssd); + log.debug("Storage Space TO Created"); + ssTO.setUpdateTime(new Date()); + log.debug("Storage Space DAO retrieved."); + ssDAO.addStorageSpace(ssTO); + log.debug("StorageSpaceTO inserted in Persistence"); + } + + /** + * Update all the fields apart from the alias of a storage space row given the input + * StorageSpaceData + * + * @param ssd + * + * @throws DataAccessException + */ + public void updateStorageSpace(StorageSpaceData ssd) throws DataAccessException { + + log.debug("Storage Space DAO retrieved."); + + StorageSpaceTO ssTO = new StorageSpaceTO(ssd); + ssTO.setCreated(null); // we don't want to update the creation timestamp + ssTO.setUpdateTime(new Date()); + + ssDAO.updateStorageSpace(ssTO); + log.debug("StorageSpaceTO updated in Persistence"); + } + + /** + * @param ssd + */ + public void updateStorageSpaceFreeSpace(StorageSpaceData ssd) throws DataAccessException { + + log.debug("Storage Space DAO retrieved."); + StorageSpaceTO ssTO = new StorageSpaceTO(ssd); + log.debug("Storage Space TO Created"); + ssTO.setUpdateTime(new Date()); + ssDAO.updateStorageSpaceFreeSpace(ssTO); + log.debug("StorageSpaceTO updated in Persistence"); + + } + + /** + * @param ssd + * @throws NoDataFoundException + * @throws InvalidRetrievedDataException + * @throws MultipleDataEntriesException + */ + public void updateAllStorageSpace(StorageSpaceData ssd) { + + updateAllStorageSpace(ssd, null); + } + + /** + * Update StorageSpace. This method is used to update the StorageSpace into the ReserveSpace + * Catalog. The update operation take place after a AbortRequest for a PrepareToPut operation done + * with the spaceToken.(With or without the size specified). + */ + + public void updateAllStorageSpace(StorageSpaceData ssd, Date updateTime) { + + log.debug("UPDATE StorageSpace Start..."); + // Build StorageSpaceTO from SpaceData + StorageSpaceTO ssTO = new StorageSpaceTO(ssd); + log.debug("Storage Space TO Created"); + if (updateTime == null) { + // The update time of the information is now + ssTO.setUpdateTime(new Date()); + } else { + ssTO.setUpdateTime(updateTime); + } + + // Add the row to the persistence.. + try { + ssDAO.updateAllStorageSpace(ssTO); + log.debug("StorageSpaceTO updated in Persistence"); + } catch (DataAccessException daEx) { + log.error("Error while inserting new row in StorageSpace: {}", daEx.getMessage(), daEx); + } + } + + /** + * @param desc + * @return + */ + public StorageSpaceData getStorageSpaceByAlias(String desc) { + + StorageSpaceData result = null; // new StorageSpaceData(); + log.debug("Retrieve Storage Space start... "); + + // Get StorageSpaceTO form persistence + try { + Collection cl = ssDAO.getStorageSpaceByAliasOnly(desc); + if (cl != null && !cl.isEmpty()) { + log.debug("Storage Space retrieved by Token. "); + // Build the result + try { + result = new StorageSpaceData(cl.toArray(new StorageSpaceTO[0])[0]); + } catch (IllegalArgumentException e) { + log.error("unable to build StorageSpaceData from StorageSpaceTO " + + "IllegalArgumentException: {}", e.getMessage(), e); + } + } + } catch (DataAccessException daEx) { + log.debug("Error while retrieving StorageSpace: {}", daEx.getMessage(), daEx); + } + + return result; + } + + /** + * Provides a list of storage spaces not initialized by comparing the used space stored against + * the well know not initialized value NOT_INITIALIZED_SIZE_VALUE + * + * @return SpaceData + */ + public List getStorageSpaceNotInitialized() { + + log.debug("Retrieve Storage Space not initialized start "); + List result = Lists.newLinkedList(); + + // Get StorageSpaceTO form persistence + try { + Collection storagesSpaceTOCollection = + ssDAO.getStorageSpaceByUnavailableUsedSpace(NOT_INITIALIZED_SIZE_VALUE); + log.debug("Storage Space retrieved by not initialized used space. "); + for (StorageSpaceTO storagesSpaceTO : storagesSpaceTOCollection) { + if (storagesSpaceTO != null) { + try { + result.add(new StorageSpaceData(storagesSpaceTO)); + } catch (IllegalArgumentException e) { + log.error("unable to build StorageSpaceData from StorageSpaceTO. " + + "IllegalArgumentException: {}", e.getMessage(), e); + } + } else { + log.warn("Received a collection of StorageSpaceTO containing null " + + "elements, skipping them"); + } + } + } catch (DataAccessException daEx) { + log.debug("Error while retrieving StorageSpace", daEx); + } + return result; + } + + /** + * + * @param user VomsGridUser + * @param spaceAlias String + * @return ArrayOfTSpaceToken + */ + public ArrayOfTSpaceToken getSpaceTokens(GridUserInterface user, String spaceAlias) { + + ArrayOfTSpaceToken result = new ArrayOfTSpaceToken(); + + log.debug("Retrieving space tokens..."); + + try { + + Collection listOfStorageSpace = ssDAO.getStorageSpaceByOwner(user, spaceAlias); + int nItems = listOfStorageSpace.size(); + log.debug("getSpaceTokens : Number of Storage spaces retrieved with " + "Alias '{}': {}", + spaceAlias, nItems); + Iterator j_ssTO = listOfStorageSpace.iterator(); + + while (j_ssTO.hasNext()) { + StorageSpaceTO ssTO = (StorageSpaceTO) j_ssTO.next(); + try { + TSpaceToken spaceToken = TSpaceToken.make(ssTO.getSpaceToken()); + result.addTSpaceToken(spaceToken); + } catch (InvalidTSpaceTokenAttributesException ex2) { + log.error("Retrieved invalid Space token from DB"); + } + } + + } catch (DataAccessException daEx) { + log.error("Error while retrieving StorageSpace: {}", daEx.getMessage(), daEx); + } catch (Exception e) { + log.error("Exception while retrieving Storage Space: {}", e.getMessage(), e); + } + return result; + } + + /** + * This method is used for the VOspaceArea Check. + * + * @param spaceAlias + * @return + */ + + public ArrayOfTSpaceToken getSpaceTokensByAlias(String spaceAlias) { + + ArrayOfTSpaceToken result = new ArrayOfTSpaceToken(); + + log.debug("Retrieving space tokens..."); + + try { + + Collection listOfStorageSpace = ssDAO.getStorageSpaceByAliasOnly(spaceAlias); + int nItems = listOfStorageSpace.size(); + log.debug("Number of Storage spaces retrieved: {}", nItems); + Iterator j_ssTO = listOfStorageSpace.iterator(); + + while (j_ssTO.hasNext()) { + StorageSpaceTO ssTO = (StorageSpaceTO) j_ssTO.next(); + try { + TSpaceToken spaceToken = TSpaceToken.make(ssTO.getSpaceToken()); + result.addTSpaceToken(spaceToken); + } catch (InvalidTSpaceTokenAttributesException ex2) { + log.error("Retrieved invalid Space token from DB"); + } + } + + } catch (DataAccessException daEx) { + log.error("Error while retrieving StorageSpace: {}", daEx.getMessage(), daEx); + } catch (Exception e) { + log.error("Error getting data! Error: {}", e.getMessage(), e); + } + return result; + } + + // ************************ CHECH BELOW METHODS *************************** + + /** + * + * @param user GridUserInterface + * @param spaceToken TSpaceToken + * @return boolean + */ + public boolean release(GridUserInterface user, final TSpaceToken spaceToken) { + + log.debug("Delete storage spaceToken info from persistence: {}", spaceToken); + + boolean rowRemoved = true; + // Delete the row from persistence. + try { + ssDAO.removeStorageSpace(user, spaceToken.getValue()); + log.debug("spaceToken removed from DB."); + } catch (DataAccessException daEx) { + log.error("spaceToken not found in the DB: {}", spaceToken.getValue()); + rowRemoved = false; + } + return rowRemoved; + } + + /** + * Method that purges the catalog, removing expired space reservation. The spacefile with lifetime + * expired are removed from the file systems. + * + */ + public void purge() { + + log.debug("Space Garbage Collector start!"); + Calendar rightNow = Calendar.getInstance(); + + // Get the Collection of Space Reservation Expired + Collection expiredSpaceTO; + try { + expiredSpaceTO = ssDAO.getExpired(rightNow.getTimeInMillis() / 1000); + } catch (DataAccessException e) { + // No space expired FOUND + log.debug("Space Garbage Collector: no space expired found."); + return; + } + + // For each entry expired + // 1) Delete the related space file + // 2) Remove the entry from the DB + + StorageSpaceTO spaceTO = null; + log.debug("Space Garbage Collector: Number of SpaceFile to remove {}.", expiredSpaceTO.size()); + + for (Iterator i = expiredSpaceTO.iterator(); i.hasNext();) { + spaceTO = (StorageSpaceTO) i.next(); + // Deleting space File + String spaceFileName = spaceTO.getSpaceFile(); + File sfile = new File(spaceFileName); + log.debug("Space Garbage Collector: SpaceFile to remove {}.", spaceFileName); + + if (sfile.delete()) { + log.debug("Space Garbage Collector: SpaceFile {} removed.", spaceFileName); + } else { + log.warn("Space Garbage Collector: problem removing {}", spaceFileName); + } + + // Removing space entry from the DB + try { + ssDAO.removeStorageSpace(spaceTO.getSpaceToken()); + } catch (DataAccessException e) { + log.warn("Space Garbage Collector: error removing space entry from catalog."); + } + } + } + + public boolean increaseUsedSpace(String spaceToken, Long usedSpaceToAdd) { + + log.debug("Increase {} the used space of storage spaceToken: {}", usedSpaceToAdd, spaceToken); + + int n = 0; + try { + n = ssDAO.increaseUsedSpace(spaceToken, usedSpaceToAdd); + } catch (DataAccessException daEx) { + log.error("Error during the increase of used space for spaceToken {}: {}", spaceToken, + daEx.getMessage()); + return false; + } + if (n == 0) { + log.warn("No errors caught but it seems no used space updates done on space token {}", + spaceToken); + } + log.debug("{} increaseUsedSpace += {}", spaceToken, usedSpaceToAdd); + return n > 0; + } + + public boolean decreaseUsedSpace(String spaceToken, Long usedSpaceToRemove) { + + log.debug("Decrease {} the used space of storage spaceToken: {}", usedSpaceToRemove, + spaceToken); + + int n = 0; + try { + n = ssDAO.decreaseUsedSpace(spaceToken, usedSpaceToRemove); + } catch (DataAccessException daEx) { + log.error("Error during the decrease of used space for spaceToken {}: {}", spaceToken, + daEx.getMessage()); + return false; + } + if (n == 0) { + log.warn("No errors caught but it seems no used space updates done on space token {}", + spaceToken); + } + log.debug("{} decreaseUsedSpace -= {}", spaceToken, usedSpaceToRemove); + return n > 0; + } } diff --git a/src/main/java/it/grid/storm/catalogs/SizeInBytesIntConverter.java b/src/main/java/it/grid/storm/catalogs/SizeInBytesIntConverter.java deleted file mode 100644 index bc48611b3..000000000 --- a/src/main/java/it/grid/storm/catalogs/SizeInBytesIntConverter.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TSizeInBytes; - -/** - * Class that handles DB representation of a TSizeInBytes, in particular it - * takes care of the NULL logic of the DB: 0/null are used to mean an empty - * field, whereas StoRM Object model uses the type TSizeInBytes.makeEmpty(); - * moreover StoRM does accept 0 as a valid TSizeInBytes, so it _is_ important to - * use this converter! - * - * @author EGRID ICTP - * @version 2.0 - * @date July 2005 - */ -public class SizeInBytesIntConverter { - - private static SizeInBytesIntConverter stc = new SizeInBytesIntConverter(); - - private SizeInBytesIntConverter() { - - } - - /** - * Method that returns the only instance of SizeInBytesIntConverter - */ - public static SizeInBytesIntConverter getInstance() { - - return stc; - } - - /** - * Method that transaltes the Empty TSizeInBytes into the empty representation - * of DB which is 0. Any other int is left as is. - */ - public long toDB(long s) { - - if (s == TSizeInBytes.makeEmpty().value()) - return 0; - return s; - } - - /** - * Method that returns the int as is, except if it is 0 which DB interprests - * as empty field: in that case it then returns the Empty TSizeInBytes int - * representation. - */ - public long toStoRM(long s) { - - if (s == 0) - return TSizeInBytes.makeEmpty().value(); - return s; - } -} diff --git a/src/main/java/it/grid/storm/catalogs/SpaceTokenStringConverter.java b/src/main/java/it/grid/storm/catalogs/SpaceTokenStringConverter.java deleted file mode 100644 index 75c79230f..000000000 --- a/src/main/java/it/grid/storm/catalogs/SpaceTokenStringConverter.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TSpaceToken; - -/** - * Class that handles DPM DB representation of a SpaceToken, in particular it - * takes care of the NULL/EMPTY logic of DPM. In particular DPM uses the empty - * string "" as meaning the absence of a value for the field, wheras StoRM - * accepts it as a valis String with which to create a TSpaceToken; moreover - * StoRM uses an Empty TSpaceToken type. - * - * @author EGRID ICTP - * @version 1.0 - * @date June 2005 - */ -class SpaceTokenStringConverter { - - private static SpaceTokenStringConverter stc = new SpaceTokenStringConverter(); - - private SpaceTokenStringConverter() { - - } - - /** - * Method that returns the only instance od SpaceTokenConverter - */ - public static SpaceTokenStringConverter getInstance() { - - return stc; - } - - /** - * Method that translates StoRM Empty TSpaceToken String representation into - * DPM empty representation; all other Strings are left as are. - */ - public String toDB(String s) { - - if (s.equals(TSpaceToken.makeEmpty().toString())) - return ""; - return s; - } - - /** - * Method that translates DPM String representing an Empty TSpaceToken into - * StoRM representation; any other String is left as is. - */ - public String toStoRM(String s) { - - if ((s == null) || (s.equals(""))) - return TSpaceToken.makeEmpty().toString(); - return s; - } -} diff --git a/src/main/java/it/grid/storm/catalogs/StoRMDataSource.java b/src/main/java/it/grid/storm/catalogs/StoRMDataSource.java deleted file mode 100644 index 1db3057f5..000000000 --- a/src/main/java/it/grid/storm/catalogs/StoRMDataSource.java +++ /dev/null @@ -1,177 +0,0 @@ -package it.grid.storm.catalogs; - -import it.grid.storm.config.Configuration; - -import java.sql.Connection; -import java.sql.SQLException; - -import javax.sql.DataSource; - -import org.apache.commons.dbcp2.BasicDataSource; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - - -public class StoRMDataSource { - - public static final Logger log = LoggerFactory - .getLogger(StoRMDataSource.class); - - public static class Builder{ - - private static final String VALIDATION_QUERY = "select 1 from dual"; - - private String driver; - private String url; - - private String username; - private String password; - - private int maxPooledConnections = 200; - private int initialPoolSize = 10; - - private BasicDataSource ds; - - public Builder() { - } - - public Builder driver(String driver){ - this.driver = driver; - return this; - } - - public Builder url(String url){ - this.url = url; - return this; - } - - public Builder username(String username){ - this.username = username; - return this; - } - - public Builder password(String password){ - this.password = password; - return this; - } - - public Builder maxPooledConnections(int maxPool){ - if (maxPool < 1){ - throw new IllegalArgumentException("maxPooledConnections must be >= 1"); - } - this.maxPooledConnections = maxPool; - return this; - } - - public Builder initialPoolSize(int initialSize){ - if (initialSize <= 0){ - throw new IllegalArgumentException("initialSize must be >= 0"); - } - this.initialPoolSize = initialSize; - return this; - } - - private void sanityChecks(){ - if ((username == null) || (username.isEmpty())) - throw new IllegalArgumentException("null or empty username"); - - if ((driver == null) || (driver.isEmpty())) - throw new IllegalArgumentException("null or empty driver"); - - if ((url == null) || (url.isEmpty())) - throw new IllegalArgumentException("null or empty url"); - - if ((password == null) || (password.isEmpty())) - throw new IllegalArgumentException("null or empty password"); - } - - private void logConfiguration(){ - if (log.isDebugEnabled()){ - log.debug("driver: {}", driver); - log.debug("url: {}", url); - log.debug("username: {}", username); - log.debug("password: {}", password); - log.debug("initialPoolSize: {}", initialPoolSize); - log.debug("maxPooledConnections: {}", maxPooledConnections); - } - } - public StoRMDataSource build(){ - sanityChecks(); - logConfiguration(); - ds = new BasicDataSource(); - ds.setDriverClassName(driver); - ds.setUrl(url); - ds.setUsername(username); - ds.setPassword(password); - ds.setInitialSize(initialPoolSize); - ds.setMaxTotal(maxPooledConnections); - ds.setValidationQuery(VALIDATION_QUERY); - ds.setTestWhileIdle(true); - ds.setPoolPreparedStatements(true); - ds.setMaxOpenPreparedStatements(200); - return new StoRMDataSource(this); - } - - } - - private StoRMDataSource(Builder b) { - this.dataSource = b.ds; - } - - private BasicDataSource dataSource; - - - /** - * @return the dataSource - */ - public DataSource getDataSource() { - return dataSource; - } - - - /** - * @throws SQLException - * @see org.apache.commons.dbcp.BasicDataSource#close() - */ - public void close() throws SQLException { - dataSource.close(); - } - - - - /** - * @return - * @throws SQLException - * @see org.apache.commons.dbcp.BasicDataSource#getConnection() - */ - public Connection getConnection() throws SQLException { - return dataSource.getConnection(); - } - - private static volatile StoRMDataSource instance = null; - - public static synchronized StoRMDataSource getInstance(){ - return instance; - } - - public static synchronized void init(){ - if (instance != null){ - log.warn("Called init on already initialized Storm data source."); - log.warn("The datasource will be closed and re-initialized."); - try { - instance.close(); - } catch (SQLException e) { - log.error("Error closing storm data source: {}", e.getMessage(), e); - } - } - - log.info("Initializing StoRM datasource"); - Configuration conf = Configuration.getInstance(); - instance = new StoRMDataSource.Builder() - .driver(conf.getDBDriver()) - .url(conf.getStormDbURL()) - .username(conf.getDBUserName()) - .password(conf.getDBPassword()) - .build(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/SurlRequestData.java b/src/main/java/it/grid/storm/catalogs/SurlRequestData.java deleted file mode 100644 index f56079a43..000000000 --- a/src/main/java/it/grid/storm/catalogs/SurlRequestData.java +++ /dev/null @@ -1,269 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import java.util.Map; - -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TStatusCode; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * @author Michele Dibenedetto - * - */ -public abstract class SurlRequestData implements RequestData { - - private static final Logger log = LoggerFactory - .getLogger(SurlRequestData.class); - - protected TSURL SURL; - protected TReturnStatus status; - - public SurlRequestData(TSURL toSURL, TReturnStatus status) - throws InvalidSurlRequestDataAttributesException { - - if (toSURL == null || status == null || status.getStatusCode() == null) { - throw new InvalidSurlRequestDataAttributesException(toSURL, status); - } - this.SURL = toSURL; - this.status = status; - } - - /** - * Method that returns the TURL for this chunk of the srm request. - */ - @Override - public final TSURL getSURL() { - - return SURL; - } - - /** - * Method that returns the status for this chunk of the srm request. - */ - @Override - public final TReturnStatus getStatus() { - - return status; - } - - /** - * Method used to set the Status associated to this chunk. If status is null, - * then nothing gets set! - */ - public void setStatus(TReturnStatus status) { - - if (status != null) { - this.status = status; - } - } - - protected void setStatus(TStatusCode statusCode, String explanation) { - - if (explanation == null) { - status = new TReturnStatus(statusCode); - } else { - status = new TReturnStatus(statusCode, explanation); - } - } - - /** - * Method that sets the status of this request to SRM_REQUEST_QUEUED; it needs - * the explanation String which describes the situation in greater detail; if - * a null is passed, then an empty String is used as explanation. - */ - @Override - public final void changeStatusSRM_REQUEST_QUEUED(String explanation) { - - setStatus(TStatusCode.SRM_REQUEST_QUEUED, explanation); - } - - /** - * Method that sets the status of this request to SRM_REQUEST_INPROGRESS; it - * needs the explanation String which describes the situation in greater - * detail; if a null is passed, then an empty String is used as explanation. - */ - @Override - public final void changeStatusSRM_REQUEST_INPROGRESS(String explanation) { - - setStatus(TStatusCode.SRM_REQUEST_INPROGRESS, explanation); - } - - /** - * Method that sets the status of this request to SRM_SUCCESS; it needs the - * explanation String which describes the situation in greater detail; if a - * null is passed, then an empty String is used as explanation. - */ - @Override - public final void changeStatusSRM_SUCCESS(String explanation) { - - setStatus(TStatusCode.SRM_SUCCESS, explanation); - } - - /** - * Method that sets the status of this request to SRM_INTERNAL_ERROR; it needs - * the explanation String which describes the situation in greater detail; if - * a null is passed, then an empty String is used as explanation. - */ - @Override - public final void changeStatusSRM_INTERNAL_ERROR(String explanation) { - - setStatus(TStatusCode.SRM_INTERNAL_ERROR, explanation); - } - - /** - * Method that sets the status of this request to SRM_INVALID_REQUEST; it - * needs the explanation String which describes the situation in greater - * detail; if a null is passed, then an empty String is used as explanation. - */ - @Override - public final void changeStatusSRM_INVALID_REQUEST(String explanation) { - - setStatus(TStatusCode.SRM_INVALID_REQUEST, explanation); - } - - /** - * Method that sets the status of this request to SRM_AUTHORIZATION_FAILURE; - * it needs the explanation String which describes the situation in greater - * detail; if a null is passed, then an empty String is used as explanation. - */ - @Override - public final void changeStatusSRM_AUTHORIZATION_FAILURE(String explanation) { - - setStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, explanation); - } - - /** - * Method that sets the status of this request to SRM_ABORTED; it needs the - * explanation String which describes the situation in greater detail; if a - * null is passed, then an empty String is used as explanation. - */ - @Override - public final void changeStatusSRM_ABORTED(String explanation) { - - setStatus(TStatusCode.SRM_ABORTED, explanation); - } - - @Override - public final void changeStatusSRM_FILE_BUSY(String explanation) { - - setStatus(TStatusCode.SRM_FILE_BUSY, explanation); - } - - @Override - public final void changeStatusSRM_INVALID_PATH(String explanation) { - - setStatus(TStatusCode.SRM_INVALID_PATH, explanation); - } - - @Override - public final void changeStatusSRM_NOT_SUPPORTED(String explanation) { - - setStatus(TStatusCode.SRM_NOT_SUPPORTED, explanation); - } - - @Override - public final void changeStatusSRM_FAILURE(String explanation) { - - setStatus(TStatusCode.SRM_FAILURE, explanation); - } - - @Override - public final void changeStatusSRM_SPACE_LIFETIME_EXPIRED(String explanation) { - - setStatus(TStatusCode.SRM_SPACE_LIFETIME_EXPIRED, explanation); - } - - @Override - public String display(Map map) { - - // nonsense method - return ""; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#hashCode() - */ - @Override - public int hashCode() { - - final int prime = 31; - int result = 1; - result = prime * result + ((SURL == null) ? 0 : SURL.hashCode()); - result = prime * result + ((status == null) ? 0 : status.hashCode()); - return result; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#equals(java.lang.Object) - */ - @Override - public boolean equals(Object obj) { - - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - SurlRequestData other = (SurlRequestData) obj; - if (SURL == null) { - if (other.SURL != null) { - return false; - } - } else if (!SURL.equals(other.SURL)) { - return false; - } - if (status == null) { - if (other.status != null) { - return false; - } - } else if (!status.equals(other.status)) { - return false; - } - return true; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder.append("SurlRequestData [SURL="); - builder.append(SURL); - builder.append(", status="); - builder.append(status); - builder.append("]"); - return builder.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/TURLConverter.java b/src/main/java/it/grid/storm/catalogs/TURLConverter.java deleted file mode 100644 index c20bece1f..000000000 --- a/src/main/java/it/grid/storm/catalogs/TURLConverter.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TTURL; - -/** - * Class that handles DPM DB representation of a TTURL, in particular it takes - * care of the NULL/EMPTY logic of DPM. Indeed DPM uses 0/null to mean an empty - * field, whereas StoRM uses the type TTURL.makeEmpty(); in particular StoRM - * converts an empty String or a null to an Empty TTURL! - * - * @author EGRID ICTP - * @version 1.0 - * @date March 2006 - */ -public class TURLConverter { - - private static TURLConverter stc = new TURLConverter(); // only instance - - private TURLConverter() { - - } - - /** - * Method that returns the only instance of SizeInBytesIntConverter - */ - public static TURLConverter getInstance() { - - return stc; - } - - /** - * Method that transaltes the Empty TTURL into the empty representation of DPM - * which is a null! Any other String is left as is. - */ - public String toDB(String s) { - - if (s.equals(TTURL.makeEmpty().toString())) - return null; - return s; - } - - /** - * Method that translates DPMs "" or null String as the Empty TTURL String - * representation. Any other String is left as is. - */ - public String toStoRM(String s) { - - if ((s == null) || (s.equals(""))) - return TTURL.makeEmpty().toString(); - return s; - } -} diff --git a/src/main/java/it/grid/storm/catalogs/TransferProtocolListConverter.java b/src/main/java/it/grid/storm/catalogs/TransferProtocolListConverter.java deleted file mode 100644 index 5eb9a5c97..000000000 --- a/src/main/java/it/grid/storm/catalogs/TransferProtocolListConverter.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TURLPrefix; -import java.util.Iterator; -import java.util.List; -import java.util.ArrayList; -import it.grid.storm.namespace.model.Protocol; - -/** - * Package private auxiliary class used to convert between the DB raw data - * representation and StoRM s Object model list of transfer protocols. - * - */ - -class TransferProtocolListConverter { - - /** - * Method that returns a List of Uppercase Strings used in the DB to represent - * the given TURLPrefix. An empty List is returned in case the conversion does - * not succeed, a null TURLPrefix is supplied, or its size is 0. - */ - public static List toDB(TURLPrefix turlPrefix) { - - List result = new ArrayList(); - Protocol protocol; - for (Iterator it = turlPrefix.getDesiredProtocols().iterator(); it - .hasNext();) { - protocol = it.next(); - result.add(protocol.getSchema()); - } - return result; - } - - /** - * Method that returns a TURLPrefix of transfer protocol. If the translation - * cannot take place, a TURLPrefix of size 0 is returned. Likewise if a null - * List is supplied. - */ - public static TURLPrefix toSTORM(List listOfProtocol) { - - TURLPrefix turlPrefix = new TURLPrefix(); - Protocol protocol = null; - for (Iterator i = listOfProtocol.iterator(); i.hasNext();) { - protocol = Protocol.getProtocol(i.next()); - if (!(protocol.equals(Protocol.UNKNOWN))) - turlPrefix.addProtocol(protocol); - } - return turlPrefix; - } -} diff --git a/src/main/java/it/grid/storm/catalogs/VolatileAndJiTCatalog.java b/src/main/java/it/grid/storm/catalogs/VolatileAndJiTCatalog.java index 5f2ef76a1..0af59a4e6 100644 --- a/src/main/java/it/grid/storm/catalogs/VolatileAndJiTCatalog.java +++ b/src/main/java/it/grid/storm/catalogs/VolatileAndJiTCatalog.java @@ -17,18 +17,6 @@ package it.grid.storm.catalogs; -import it.grid.storm.acl.AclManager; -import it.grid.storm.acl.AclManagerFS; -import it.grid.storm.common.types.PFN; -import it.grid.storm.common.types.TimeUnit; -import it.grid.storm.config.Configuration; -import it.grid.storm.filesystem.FilesystemPermission; -import it.grid.storm.filesystem.LocalFile; -import it.grid.storm.griduser.LocalUser; -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.srm.types.TLifeTimeInSeconds; - -import java.util.ArrayList; import java.util.Calendar; import java.util.Collection; import java.util.Iterator; @@ -39,41 +27,50 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.common.collect.Lists; + +import it.grid.storm.acl.AclManager; +import it.grid.storm.acl.AclManagerFS; +import it.grid.storm.common.types.PFN; +import it.grid.storm.common.types.TimeUnit; +import it.grid.storm.config.Configuration; +import it.grid.storm.filesystem.FilesystemPermission; +import it.grid.storm.filesystem.LocalFile; +import it.grid.storm.griduser.LocalUser; +import it.grid.storm.namespace.Namespace; +import it.grid.storm.persistence.dao.VolatileAndJiTDAO; +import it.grid.storm.persistence.impl.mysql.VolatileAndJiTDAOMySql; +import it.grid.storm.persistence.model.JiTData; +import it.grid.storm.srm.types.TLifeTimeInSeconds; + /** - * This catalog holds all info needed to pin files for JiT ACL tracking, and - * for keeping track of Volatile files. pinLifetime is the time Jit ACLs will be - * in place: upon expiry ACLs are removed; fileLifetime is the time Volatile - * files will remain in the system: upon expiry those files are removed. In - * particular the srmPrepareToPut analyzes the request and if the specified file - * is set to Volatile, then it calls on the catalog to add the corresponding - * entry for the given fileLifetime. If StoRM is configured for JiT, another - * method is invoked to add an entry to keep track of the ACLs for the desired - * pinLifetime. For srmPrepareToGet, only if StoRM is configured for JiT ACLs - * then a method is invoked to add the corresponding entry for the given - * pinLifetime. Repeatedly putting the same Volatile file, will overwrite - * existing fileLifetime only if the overwrite option allows file overwriting. - * If JiT is enabled and it is a new user that is putting again the same file - * in, a new pinLifetime entry is added; but if it is the same user, the - * pinLifetime WILL be changed provided the new expiry exceeds the current one! - * Repeatedly invoking PtG on the same file behaves similarly: different users - * will have their own pinLifetime record, but the same user WILL change the - * pinLifetime provided the new expiry exceeds the current one! In case the - * pinLifetime exceeds the fileLifetime, the fileLifetime is used as ceiling. - * This may occur when a file is Put and defined Volatile, but with a - * pinLifetime that is longer than that of the pin. Or if _subsequent_ calls to - * PtG specify a pinLifetime that lasts longer. To be more precise, the - * pinLifetime gets recorded as requested, but upon expiry of the volatile entry - * any associated acl will get removed as well, regardless of the acl expiry. - * When lifetime expires: volatile files get erased from the system and their - * entries in the catalog are removed; tracked ACLs get removed from the files - * WITHOUT erasing the files, and their entries in the catalog are removed; - * finally for Volatile files with ACLs set up on them, the ACLs are removed AND - * the files are erased, also cleaning up the catalog. As a last note, the - * catalog checks periodically its entries for any expired ones, and then - * proceeds with purging; this frequency of cleaning is specified in a - * configuration parameter, and the net effect is that the pinning/volatile may - * actually last longer (but never less) because the self cleaning mechanism is - * active only at those predetermined times. + * This catalog holds all info needed to pin files for JiT ACL tracking, and for keeping track of + * Volatile files. pinLifetime is the time Jit ACLs will be in place: upon expiry ACLs are removed; + * fileLifetime is the time Volatile files will remain in the system: upon expiry those files are + * removed. In particular the srmPrepareToPut analyzes the request and if the specified file is set + * to Volatile, then it calls on the catalog to add the corresponding entry for the given + * fileLifetime. If StoRM is configured for JiT, another method is invoked to add an entry to keep + * track of the ACLs for the desired pinLifetime. For srmPrepareToGet, only if StoRM is configured + * for JiT ACLs then a method is invoked to add the corresponding entry for the given pinLifetime. + * Repeatedly putting the same Volatile file, will overwrite existing fileLifetime only if the + * overwrite option allows file overwriting. If JiT is enabled and it is a new user that is putting + * again the same file in, a new pinLifetime entry is added; but if it is the same user, the + * pinLifetime WILL be changed provided the new expiry exceeds the current one! Repeatedly invoking + * PtG on the same file behaves similarly: different users will have their own pinLifetime record, + * but the same user WILL change the pinLifetime provided the new expiry exceeds the current one! In + * case the pinLifetime exceeds the fileLifetime, the fileLifetime is used as ceiling. This may + * occur when a file is Put and defined Volatile, but with a pinLifetime that is longer than that of + * the pin. Or if _subsequent_ calls to PtG specify a pinLifetime that lasts longer. To be more + * precise, the pinLifetime gets recorded as requested, but upon expiry of the volatile entry any + * associated acl will get removed as well, regardless of the acl expiry. When lifetime expires: + * volatile files get erased from the system and their entries in the catalog are removed; tracked + * ACLs get removed from the files WITHOUT erasing the files, and their entries in the catalog are + * removed; finally for Volatile files with ACLs set up on them, the ACLs are removed AND the files + * are erased, also cleaning up the catalog. As a last note, the catalog checks periodically its + * entries for any expired ones, and then proceeds with purging; this frequency of cleaning is + * specified in a configuration parameter, and the net effect is that the pinning/volatile may + * actually last longer (but never less) because the self cleaning mechanism is active only at those + * predetermined times. * * @author EGRID - ICTP Trieste * @version 2.0 @@ -81,536 +78,486 @@ */ public class VolatileAndJiTCatalog { - private static final Logger log = LoggerFactory - .getLogger(VolatileAndJiTCatalog.class); - - /** only instance of Catalog! */ - private static final VolatileAndJiTCatalog cat = new VolatileAndJiTCatalog(); - /** only instance of DAO object! */ - private static final VolatileAndJiTDAO dao = VolatileAndJiTDAO.getInstance(); - /** Timer object in charge of cleaning periodically the Catalog! */ - private final Timer cleaner = new Timer(); - /** Delay time before starting cleaning thread! Set to 1 minute */ - private final long delay = Configuration.getInstance() - .getCleaningInitialDelay() * 1000; - /** Period of execution of cleaning! Set to 1 hour */ - private final long period = Configuration.getInstance() - .getCleaningTimeInterval() * 1000; - /** fileLifetime to use if user specified a non-positive value */ - private final long defaultFileLifetime = Configuration.getInstance() - .getFileLifetimeDefault(); - /** Number of seconds to use as default if the supplied lifetime is zero! */ - private final long floor = Configuration.getInstance() - .getPinLifetimeDefault(); - /** - * Maximum number of seconds that an ACL can live: the life time requested by - * the user cannot be greater than this value! This ceiling is needed because - * of the cron job that removes pool account mappings: when the mapping is - * removed, there must NOT be ANY ACL for that pool-user left! - */ - private final long ceiling = Configuration.getInstance() - .getPinLifetimeMaximum(); - - /** - * Private constructor that starts the cleaning timer. - */ - private VolatileAndJiTCatalog() { - - TimerTask cleaningTask = new TimerTask() { - - @Override - public void run() { - - purge(); - } - }; - cleaner.scheduleAtFixedRate(cleaningTask, delay, period); - } - - /** - * Method that returns the only instance of PinnedFilesCatalog. - */ - public static VolatileAndJiTCatalog getInstance() { - - return cat; - } - - /** - * Checks whether the given file exists in the volatile table or not. - * - * @param filename - * @return true if there is antry for the given file in the - * volatilte table, false otherwise. - */ - synchronized public boolean exists(PFN pfn) { - - return dao.exists(pfn.getValue()); - } - - /** - * Method used to expire _all_ related entries in the JiT catalogue, that were - * setup during a PtG operation. The method is intended to be used by code - * handling srmAbort command. Notice that the Traverse on the parents is NOT - * removed! This is to accomodate for the use case of a user that has run many - * PtG on different SURLs but all contained in the same directory tree! In - * practice this method removes the R permission. If any entry does not exist, - * then nothing happens and a warning gets written in the logs; otherwise - * entries get their start time set to now, and the lifetime set to zero; in - * case more than one matching entry is found, a message gets written to the - * logs, and the updating continues anyway as explained. At this point, when - * the garbage collector wakes up the entries get cleanly handled (physical - * ACL is removed, catalog entry removed, etc.); or an earlier cleaning can be - * forced by invoking directly the purge mehod. The method returns FALSE in - * case an entry was not found or the supplied parameters were null, and TRUE - * otherwise. Yet keep in mind that it says nothing of whether the DB - * operation was successful or not. - */ - synchronized public boolean expireGetJiTs(PFN pfn, LocalUser localUser) { - - if (pfn != null && localUser != null) { - return expireJiT(pfn, localUser, FilesystemPermission.Read); - } - log.error("VolatileAndJiT CATALOG: programming bug! expireGetJiTs invoked " - + "on null attributes; pfn={} localUser={}", pfn, localUser); - return false; - } - - /** - * Method used to expire an entry in the JiT catalogue. The method is intended - * to be used by code handling srmAbort command. If the entry does not exist, - * then nothing happens and a warning gets written in the logs; otherwise the - * entry gets its start time set to now, and its lifetime set to zero; in case - * more than one matching entry is found, a message gets written to the logs, - * and the updating continues anyway as explained. At this point, when the - * garbage collector wakes up the entry is cleanly handled (physical ACL is - * removed, catalog entry removed, etc.); or an earlier cleaning can be forced - * by invoking directly the purge method. The method returns FALSE in case no - * entry was found or the supplied parameters were null, and TRUE otherwise. - * Yet keep in mind that is says nothing of whether the DB operation was - * successful or not. - */ - synchronized public boolean expireJiT(PFN pfn, LocalUser localUser, - FilesystemPermission acl) { - - if (pfn != null && localUser != null && acl != null) { - String fileName = pfn.getValue(); - int uid = localUser.getUid(); - int intacl = acl.getInt(); - // from the current time we remove 10 seconds because it was observed - // that when executing purge right after invoking this method, less - // than 1 second elapses, so no purging takes place at all since expiry - // is not yet reached! - // Seconds needed and not milliseconds! - long pinStart = (Calendar.getInstance().getTimeInMillis() / 1000) - 10; - long pinTime = 0; // set to zero the lifetime! - int n = dao.numberJiT(fileName, uid, intacl); - if (n == 0) { - log.warn("VolatileAndJiT CATALOG: expireJiT found no entry for ({}, {}, " - + "{})!", fileName, uid, intacl); - return false; - } - dao.forceUpdateJiT(fileName, uid, intacl, pinStart, pinTime); - if (n > 1) { - log.warn("VolatileAndJiT CATALOG: expireJiT found more than one entry " - + "for ({}, {}, {}); the catalogue could be corrupt!", fileName, uid, intacl); - } - return true; - } - log.error("VolatileAndJiT CATALOG: programming bug! expireJiT invoked on " - + "null attributes; pfn={} localUser={} acl={}", pfn, localUser, acl); - return false; - } - - /** - * Method used to expire _all_ related entries in the JiT catalogue, that were - * setup during a PtP operation. The method is intended to be used by code - * handling srmAbort command, and by srmPutDone. Notice that the Traverse on - * the parents is NOT removed! This is to accomodate for the use case of a - * user that has run many PtP on different SURLs but that are all contained in - * the same directory tree! In practice, this method removes R and W - * permissions. If any entry does not exist, then nothing happens and a - * warning gets written in the logs; otherwise entries get their start time - * set to now, and the lifetime set to zero; in case more than one matching - * entry is found, a message gets written to the logs, and the updating - * continues anyway as explained. At this point, when the garbage collector - * wakes up the entries get cleanly handled (physical ACL is removed, catalog - * entry removed, etc.); or an earlier cleaning can be forced by invoking - * directly the purge mehod. The method returns FALSE in case an entry was not - * found or the supplied parameters were null, and TRUE otherwise. Yet keep in - * mind that is says nothing of whether the DB operation was successful or - * not. - */ - synchronized public boolean expirePutJiTs(PFN pfn, LocalUser localUser) { - - if (pfn != null && localUser != null) { - return expireJiT(pfn, localUser, FilesystemPermission.Read) - && expireJiT(pfn, localUser, FilesystemPermission.Write); - } - - log.error("VolatileAndJiT CATALOG: programming bug! expirePutJiTs invoked " - + "on null attributes; pfn={} localUser={}", pfn, localUser); - return false; - } - - /** - * Method that purges the catalog, removing expired ACLs and deleting expired - * Volatile files. When Volatile entries expire, any realted JiT will - * automatically expire too, regardless of the specified pinLifetime: that is, - * fileLifetime wins over pinLifetime. WARNING! Notice that the catalogue DOES - * get cleaned up even if the physical removal of the ACL or erasing of the - * file fails. - */ - public synchronized void purge() { - - log.debug("VolatileAndJiT CATALOG! Executing purge!"); - Calendar rightNow = Calendar.getInstance(); - /** - * removes all expired entries from storm_pin and storm_track, returning two - * Collections: one with the PFN of Volatile files, and the other with PFN + - * GridUser couple of the entries that were just being tracked for the ACLs - * set up on them. - */ - Collection[] expired = dao.removeExpired(rightNow.getTimeInMillis() / 1000); - Collection expiredVolatile = expired[0]; - Collection expiredJiT = expired[1]; - if (expiredVolatile.size() == 0) { - log.debug("VolatileAndJiT CATALOG! No expired Volatile entries found."); - } else { - log.info("VolatileAndJiT CATALOG! Found and purged the following expired " - + "Volatile entries:\n {}", volatileString(expired[0])); - } - if (expiredJiT.size() == 0) { - log.debug("VolatileAndJiT CATALOG! No JiT entries found."); - } else { - log.info("VolatileAndJiT CATALOG! Found and purged the following expired " - + "JiT ACLs entries:\n {}", jitString(expired[1])); - } - // Remove ACLs - JiTData aux = null; - for (Iterator i = expiredJiT.iterator(); i.hasNext();) { - aux = (JiTData) i.next(); - int jitacl = aux.acl(); - String jitfile = aux.pfn(); - int jituid = aux.uid(); - int jitgid = aux.gid(); - try { - log.info("VolatileAndJiT CATALOG. Removing ACL {} on file {} for " - + "user {},{}", jitacl, jitfile, jituid, jitgid); - LocalFile auxFile = NamespaceDirector.getNamespace() - .resolveStoRIbyPFN(PFN.make(jitfile)).getLocalFile(); - LocalUser auxUser = new LocalUser(jituid, jitgid); - FilesystemPermission auxACL = new FilesystemPermission(jitacl); - - AclManager manager = AclManagerFS.getInstance(); - if (auxFile == null) { - log.warn("VolatileAndJiT CATALOG! Unable to setting up the ACL. " - + "LocalFile is null!"); - } else { - try { - manager.revokeUserPermission(auxFile, auxUser, auxACL); - } catch (IllegalArgumentException e) { - log.error("Unable to revoke user permissions on the file. " - + "IllegalArgumentException: {}", e.getMessage(), e); - } - } - } catch (Exception e) { - log.error("VolatileAndJiT CATALOG! Entry removed from Catalog, but " - + "physical ACL {} for user {}, could NOT be removed from {}", - jitacl, jituid, jitgid, jitfile); - log.error("VolatileAndJiT CATALOG! {}", e.getMessage(), e); - } - } - // Delete files - String auxPFN = null; - for (Iterator i = expiredVolatile.iterator(); i.hasNext();) { - auxPFN = (String) i.next(); - try { - log.info("VolatileAndJiT CATALOG. Deleting file {}", auxPFN); - LocalFile auxFile = NamespaceDirector.getNamespace() - .resolveStoRIbyPFN(PFN.make(auxPFN)).getLocalFile(); - boolean ok = auxFile.delete(); - if (!ok) { - throw new Exception("Java File deletion failed!"); - } - } catch (Exception e) { - log.error("VolatileAndJiT CATALOG! Entry removed from Catalog, but " - + "physical file {} could NOT be deleted!", auxPFN); - log.error("VolatileAndJiT CATALOG! {}", e.getMessage(), e); - } - } - } - - /** - * Method used upon expiry of SRM_SPACE_AVAILABLE to remove all JiT entries in - * the DB table, related to the given PFN; Notice that _no_ distinction is - * made aboutthe specific user! This is because upon expiry of - * SRM_SPACE_AVAILABLE the file gets erased, so all JiTs on that file are - * automatically erased. This implies that all catalogue entries get removed. - * If no entries are present nothing happens. - */ - public synchronized void removeAllJiTsOn(PFN pfn) { - - if (pfn != null) { - dao.removeAllJiTsOn(pfn.getValue()); - return; - } - log.error("VolatileAndJiT CATALOG: programming bug! removeAllJiTsOn " - + "invoked on null pfn!"); - } - - /** - * Method used to remove a Volatile entry that matches the supplied pfn, from - * the DB. If null is supplied, an error message gets logged and nothing - * happens. If PFN is not found, nothing happens and _no_ message gets logged. - */ - public synchronized void removeVolatile(PFN pfn) { - - if (pfn != null) { - dao.removeVolatile(pfn.getValue()); - return; - } - log.warn("VolatileAndJiT CATALOG: programming bug! removeVolatile invoked " - + "on null pfn!"); - } - - /** - * Method used to keep track of an ACL set up on a PFN; it needs the PFN, the - * LocalUser, the ACL and the desired pinLifeTime. If the 3-ple (PFN, ACL, - * LocalUser) is not present, it gets added; if it is already present, - * provided the new desired expiry occurs after the present one, it gets - * changed. If the supplied lifetime is zero, then a default value is used - * instead. If it is larger than a ceiling, that ceiling is used instead. The - * floor value in seconds can be set from the configuration file, with the - * property: pinLifetime.minimum While the ceiling value in seconds is set - * with: pinLifetime.maximum BEWARE: The intended use case is in both - * srmPrepareToGet and srmPrepareToPut, for the case of the _JiT_ security - * mechanism. The maximum is necessary because JiT ACLs cannot last longer - * than the amount of time the pool account is leased. Notice that for - * Volatile entries, a pinLifetime larger than the fileLifetime can be - * specified. However, when Volatile files expire any related JiTs - * automatically expire in anticipation! - */ - public synchronized void trackJiT(PFN pfn, LocalUser localUser, - FilesystemPermission acl, Calendar start, TLifeTimeInSeconds pinLifetime) { - - if (pfn != null && localUser != null && acl != null && start != null - && pinLifetime != null) { - - String fileName = pfn.getValue(); - int uid = localUser.getUid(); - int gid = localUser.getPrimaryGid(); - int intacl = acl.getInt(); - // seconds needed and not milliseconds! - long pinStart = start.getTimeInMillis() / 1000; - long pinTime = validatePinLifetime(pinLifetime.value()); - int n = dao.numberJiT(fileName, uid, intacl); - if (n == 0) { - dao.addJiT(fileName, uid, gid, intacl, pinStart, pinTime); - } else { - dao.updateJiT(fileName, uid, intacl, pinStart, pinTime); - if (n > 1) { - log.warn("VolatileAndJiT CATALOG: More than one entry found for " - + "({}, {}, {}); the catalogue could be corrupt!", fileName, uid, - intacl); - } - } - return; - } - log.error("VolatileAndJiT CATALOG: programming bug! TrackACL invoked on " - + "null attributes; pfn={} localUser={} acl={} start={} pinLifetime={}", - pfn, localUser, acl, start, pinLifetime); - } - - /** - * Method that adds an entry to the catalog that keeps track of Volatile - * files. The PFN and the fileLifetime are needed. If no entry corresponding - * to the given PFN is found, a new one gets recorded. If the PFN is already - * present, then provided the new expiry (obtained by adding together - * current-time and requested-lifetime) exceeds the expiry in the catalog, - * the entry is updated. Otherwise nothing takes place. If the supplied - * fileLifetime is zero, then a default value is used instead. This floor - * default value in seconds can be set from the configuration file, with the - * property: fileLifetime.default BEWARE: The intended use case for this - * method is during srmPrepareToPut. When files are uploaded into StoRM, they - * get specified as Volatile or Permanent. The PtP logic determines if the - * request is for a Volatile file and in that case it adds a new entry in the - * catalog. That is the purpose of this method. Any subsequent PtP call will - * just result in a modification of the expiry, provided the newer one lasts - * longer than the original one. Yet bear in mind that two or more PtP on the - * same file makes NO SENSE AT ALL! If any DB error occurs, then nothing gets - * added/updated and an error message gets logged. - */ - public synchronized void trackVolatile(PFN pfn, Calendar start, - TLifeTimeInSeconds fileLifetime) { - - if (pfn != null && fileLifetime != null && start != null) { - - String fileName = pfn.getValue(); - long fileTime = fileLifetime.value(); - if (fileTime <= 0) { - fileTime = defaultFileLifetime; - } - long fileStart = start.getTimeInMillis() / 1000; // seconds needed and not - // milliseconds! - int n = dao.numberVolatile(fileName); - if (n == -1) { - log.error("VolatileAndJiT CATALOG! DB problem does not allow to count " - + "number of Volatile entries for {}! Volatile entry NOT processed!", - pfn); - } else if (n == 0) { - dao.addVolatile(fileName, fileStart, fileTime); - } else { - dao.updateVolatile(fileName, fileStart, fileTime); - if (n > 1) { - log.warn("VolatileAndJiT CATALOG: More than one entry found for {}; " - + "the catalogue could be corrupt!", fileName); - } - } - return; - } - log.warn("VolatileAndJiT CATALOG: programming bug! volatileEntry invoked " - + "on null attributes; pfn={} start={} fileLifetime={}", pfn, start, - fileLifetime); - } - - public synchronized void setStartTime(PFN pfn, Calendar start) - throws Exception { - - if (pfn == null || start == null) { - log.warn("VolatileAndJiT CATALOG: programming bug! volatileEntry invoked " - + "on null attributes; pfn={} start={}", pfn, start); - return; - } - - String fileName = pfn.getValue(); - // seconds needed and not milliseconds! - long fileStart = start.getTimeInMillis() / 1000; - int n = dao.numberVolatile(fileName); - if (n == -1) { - log.error("VolatileAndJiT CATALOG! DB problem does not allow to count " - + "number of Volatile entries for {}! Volatile entry NOT processed!", - pfn); - return; - } - if (n == 0) { - throw new Exception("Unable to update row volatile for pfn \'" + pfn - + "\' , not on the database!"); - } - dao.updateVolatile(fileName, fileStart); - if (n > 1) { - log.warn("VolatileAndJiT CATALOG: More than one entry found for {}; " - + "the catalogue could be corrupt!", fileName); - } - } - - /** - * Method that returns a List whose first element is a Calendar with the - * starting date and time of the lifetime of the supplied PFN, and whose - * second element is the TLifeTime the system is keeping the PFN. If no entry - * is found for the given PFN, an empty List is returned. Likewise if any DB - * error occurs. In any case, proper error messages get logged. Moreover - * notice that if for any reason the value for the Lifetime read from the DB - * does not allow creation of a valid TLifeTimeInSeconds, an Empty one is - * returned. Error messages in logs warn of the situation. - */ - public synchronized List volatileInfoOn(PFN pfn) { - - ArrayList aux = new ArrayList(); - if (pfn == null) { - log.error("VolatileAndJiT CATALOG: programming bug! volatileInfoOn " - + "invoked on null PFN!"); - return aux; - } - Collection c = dao.volatileInfoOn(pfn.getValue()); - if (c.size() != 2) { - return aux; - } - Iterator i = c.iterator(); - // start time - long startInMillis = i.next().longValue() * 1000; - Calendar auxcal = Calendar.getInstance(); - auxcal.setTimeInMillis(startInMillis); - aux.add(auxcal); - // lifeTime - long lifetimeInSeconds = ((Long) i.next()).longValue(); - TLifeTimeInSeconds auxLifeTime = TLifeTimeInSeconds.makeEmpty(); - try { - auxLifeTime = TLifeTimeInSeconds - .make(lifetimeInSeconds, TimeUnit.SECONDS); - } catch (IllegalArgumentException e) { - log.error("VolatileAndJiT CATALOG: programming bug! Retrieved long does " - + "not allow TLifeTimeCreation! long is: {}; error is: {}", - lifetimeInSeconds, e.getMessage(), e); - } - aux.add(auxLifeTime); - return aux; - } - - /** - * Private method used to return a String representation of the expired - * entries Collection of JiTData. - */ - private String jitString(Collection c) { - - if (c == null) { - return ""; - } - StringBuilder sb = new StringBuilder(); - sb.append("file,acl,uid,gid\n"); - JiTData aux = null; - for (Iterator i = c.iterator(); i.hasNext();) { - aux = i.next(); - sb.append(aux.pfn()); - sb.append(","); - sb.append(aux.acl()); - sb.append(","); - sb.append(aux.uid()); - sb.append(","); - sb.append(aux.gid()); - if (i.hasNext()) { - sb.append("\n"); - } - } - return sb.toString(); - } - - /** - * Private method that makes sure that the lifeTime of the request: (1) It is - * not less than a predetermined value: this check is needed because clients - * may omit to supply a value and some default one must be used; moreover, it - * is feared that if the requested lifetime is very low, such as 0 or a few - * seconds, there could be strange problems in having a file written and - * erased immediately. (2) It is not larger than a given ceiling; this is - * necessary because in the JiT model, the underlying system may decide to - * remove the pool account mappings; it is paramount that no ACLs remain set - * up for the now un-associated pool account. - */ - private long validatePinLifetime(long lifetime) { - - long duration = lifetime < floor ? floor : lifetime; // adjust for lifetime - // set to zero! - duration = duration <= ceiling ? duration : ceiling; // make sure lifetime - // is not longer than - // the maximum set! - return duration; - } - - /** - * Private method used to return a String representation of the expired - * entries Collection of pfn Strings. - */ - private String volatileString(Collection c) { - - if (c == null) { - return ""; - } - StringBuilder sb = new StringBuilder(); - for (Iterator i = c.iterator(); i.hasNext();) { - sb.append(i.next()); - if (i.hasNext()) { - sb.append(","); - } - } - return sb.toString(); - } + private static final Logger log = LoggerFactory.getLogger(VolatileAndJiTCatalog.class); + + private static VolatileAndJiTCatalog instance; + + public static synchronized VolatileAndJiTCatalog getInstance() { + if (instance == null) { + instance = new VolatileAndJiTCatalog(); + } + return instance; + } + + private final VolatileAndJiTDAO dao; + + /** Timer object in charge of cleaning periodically the Catalog! */ + private final Timer cleaner = new Timer(); + /** Delay time before starting cleaning thread! Set to 1 minute */ + private final long delay = Configuration.getInstance().getExpiredSpacesAgentInitialDelay() * 1000; + /** Period of execution of cleaning! Set to 1 hour */ + private final long period = Configuration.getInstance().getExpiredSpacesAgentInterval() * 1000; + /** fileLifetime to use if user specified a non-positive value */ + private final long defaultFileLifetime = Configuration.getInstance().getFileLifetimeDefault(); + /** Number of seconds to use as default if the supplied lifetime is zero! */ + private final long floor = Configuration.getInstance().getPinLifetimeDefault(); + /** + * Maximum number of seconds that an ACL can live: the life time requested by the user cannot be + * greater than this value! This ceiling is needed because of the cron job that removes pool + * account mappings: when the mapping is removed, there must NOT be ANY ACL for that pool-user + * left! + */ + private final long ceiling = Configuration.getInstance().getPinLifetimeMaximum(); + + /** + * Private constructor that starts the cleaning timer. + */ + private VolatileAndJiTCatalog() { + + dao = VolatileAndJiTDAOMySql.getInstance(); + + TimerTask cleaningTask = new TimerTask() { + + @Override + public void run() { + + purge(); + } + }; + cleaner.scheduleAtFixedRate(cleaningTask, delay, period); + } + + /** + * Checks whether the given file exists in the volatile table or not. + * + * @param filename + * @return true if there is antry for the given file in the volatilte table, + * false otherwise. + */ + synchronized public boolean exists(PFN pfn) { + + return dao.exists(pfn.getValue()); + } + + /** + * Method used to expire _all_ related entries in the JiT catalogue, that were setup during a PtG + * operation. The method is intended to be used by code handling srmAbort command. Notice that the + * Traverse on the parents is NOT removed! This is to accomodate for the use case of a user that + * has run many PtG on different SURLs but all contained in the same directory tree! In practice + * this method removes the R permission. If any entry does not exist, then nothing happens and a + * warning gets written in the logs; otherwise entries get their start time set to now, and the + * lifetime set to zero; in case more than one matching entry is found, a message gets written to + * the logs, and the updating continues anyway as explained. At this point, when the garbage + * collector wakes up the entries get cleanly handled (physical ACL is removed, catalog entry + * removed, etc.); or an earlier cleaning can be forced by invoking directly the purge mehod. The + * method returns FALSE in case an entry was not found or the supplied parameters were null, and + * TRUE otherwise. Yet keep in mind that it says nothing of whether the DB operation was + * successful or not. + */ + synchronized public boolean expireGetJiTs(PFN pfn, LocalUser localUser) { + + if (pfn != null && localUser != null) { + return expireJiT(pfn, localUser, FilesystemPermission.Read); + } + log.error("VolatileAndJiT CATALOG: programming bug! expireGetJiTs invoked " + + "on null attributes; pfn={} localUser={}", pfn, localUser); + return false; + } + + /** + * Method used to expire an entry in the JiT catalogue. The method is intended to be used by code + * handling srmAbort command. If the entry does not exist, then nothing happens and a warning gets + * written in the logs; otherwise the entry gets its start time set to now, and its lifetime set + * to zero; in case more than one matching entry is found, a message gets written to the logs, and + * the updating continues anyway as explained. At this point, when the garbage collector wakes up + * the entry is cleanly handled (physical ACL is removed, catalog entry removed, etc.); or an + * earlier cleaning can be forced by invoking directly the purge method. The method returns FALSE + * in case no entry was found or the supplied parameters were null, and TRUE otherwise. Yet keep + * in mind that is says nothing of whether the DB operation was successful or not. + */ + synchronized public boolean expireJiT(PFN pfn, LocalUser localUser, FilesystemPermission acl) { + + if (pfn != null && localUser != null && acl != null) { + String fileName = pfn.getValue(); + int uid = localUser.getUid(); + int intacl = acl.getInt(); + // from the current time we remove 10 seconds because it was observed + // that when executing purge right after invoking this method, less + // than 1 second elapses, so no purging takes place at all since expiry + // is not yet reached! + // Seconds needed and not milliseconds! + long pinStart = (Calendar.getInstance().getTimeInMillis() / 1000) - 10; + long pinTime = 0; // set to zero the lifetime! + int n = dao.numberJiT(fileName, uid, intacl); + if (n == 0) { + log.warn("VolatileAndJiT CATALOG: expireJiT found no entry for ({}, {}, " + "{})!", + fileName, uid, intacl); + return false; + } + dao.forceUpdateJiT(fileName, uid, intacl, pinStart, pinTime); + if (n > 1) { + log.warn("VolatileAndJiT CATALOG: expireJiT found more than one entry " + + "for ({}, {}, {}); the catalogue could be corrupt!", fileName, uid, intacl); + } + return true; + } + log.error("VolatileAndJiT CATALOG: programming bug! expireJiT invoked on " + + "null attributes; pfn={} localUser={} acl={}", pfn, localUser, acl); + return false; + } + + /** + * Method used to expire _all_ related entries in the JiT catalogue, that were setup during a PtP + * operation. The method is intended to be used by code handling srmAbort command, and by + * srmPutDone. Notice that the Traverse on the parents is NOT removed! This is to accomodate for + * the use case of a user that has run many PtP on different SURLs but that are all contained in + * the same directory tree! In practice, this method removes R and W permissions. If any entry + * does not exist, then nothing happens and a warning gets written in the logs; otherwise entries + * get their start time set to now, and the lifetime set to zero; in case more than one matching + * entry is found, a message gets written to the logs, and the updating continues anyway as + * explained. At this point, when the garbage collector wakes up the entries get cleanly handled + * (physical ACL is removed, catalog entry removed, etc.); or an earlier cleaning can be forced by + * invoking directly the purge mehod. The method returns FALSE in case an entry was not found or + * the supplied parameters were null, and TRUE otherwise. Yet keep in mind that is says nothing of + * whether the DB operation was successful or not. + */ + synchronized public boolean expirePutJiTs(PFN pfn, LocalUser localUser) { + + if (pfn != null && localUser != null) { + return expireJiT(pfn, localUser, FilesystemPermission.Read) + && expireJiT(pfn, localUser, FilesystemPermission.Write); + } + + log.error("VolatileAndJiT CATALOG: programming bug! expirePutJiTs invoked " + + "on null attributes; pfn={} localUser={}", pfn, localUser); + return false; + } + + /** + * Method that purges the catalog, removing expired ACLs and deleting expired Volatile files. When + * Volatile entries expire, any realted JiT will automatically expire too, regardless of the + * specified pinLifetime: that is, fileLifetime wins over pinLifetime. WARNING! Notice that the + * catalogue DOES get cleaned up even if the physical removal of the ACL or erasing of the file + * fails. + */ + @SuppressWarnings("unchecked") + public synchronized void purge() { + + log.debug("VolatileAndJiT CATALOG! Executing purge!"); + Calendar rightNow = Calendar.getInstance(); + /** + * removes all expired entries from storm_pin and storm_track, returning two Collections: one + * with the PFN of Volatile files, and the other with PFN + GridUser couple of the entries that + * were just being tracked for the ACLs set up on them. + */ + List expired = dao.removeExpired(rightNow.getTimeInMillis() / 1000); + List expiredVolatile = (List) expired.get(0); + List expiredJiT = (List) expired.get(1); + if (expiredVolatile.size() == 0) { + log.debug("VolatileAndJiT CATALOG! No expired Volatile entries found."); + } else { + log.info("VolatileAndJiT CATALOG! Found and purged the following expired " + + "Volatile entries:\n {}", volatileString(expiredVolatile)); + } + if (expiredJiT.size() == 0) { + log.debug("VolatileAndJiT CATALOG! No JiT entries found."); + } else { + log.info("VolatileAndJiT CATALOG! Found and purged the following expired " + + "JiT ACLs entries:\n {}", jitString(expiredJiT)); + } + // Remove ACLs + JiTData aux = null; + for (Iterator i = expiredJiT.iterator(); i.hasNext();) { + aux = (JiTData) i.next(); + int jitacl = aux.acl(); + String jitfile = aux.pfn(); + int jituid = aux.uid(); + int jitgid = aux.gid(); + try { + log.info("VolatileAndJiT CATALOG. Removing ACL {} on file {} for " + "user {},{}", jitacl, + jitfile, jituid, jitgid); + LocalFile auxFile = + Namespace.getInstance().resolveStoRIbyPFN(PFN.make(jitfile)).getLocalFile(); + LocalUser auxUser = new LocalUser(jituid, jitgid); + FilesystemPermission auxACL = new FilesystemPermission(jitacl); + + AclManager manager = AclManagerFS.getInstance(); + if (auxFile == null) { + log.warn("VolatileAndJiT CATALOG! Unable to setting up the ACL. " + "LocalFile is null!"); + } else { + try { + manager.revokeUserPermission(auxFile, auxUser, auxACL); + } catch (IllegalArgumentException e) { + log.error( + "Unable to revoke user permissions on the file. " + "IllegalArgumentException: {}", + e.getMessage(), e); + } + } + } catch (Exception e) { + log.error( + "VolatileAndJiT CATALOG! Entry removed from Catalog, but " + + "physical ACL {} for user {}, could NOT be removed from {}", + jitacl, jituid, jitgid, jitfile); + log.error("VolatileAndJiT CATALOG! {}", e.getMessage(), e); + } + } + // Delete files + String auxPFN = null; + for (Iterator i = expiredVolatile.iterator(); i.hasNext();) { + auxPFN = (String) i.next(); + try { + log.info("VolatileAndJiT CATALOG. Deleting file {}", auxPFN); + LocalFile auxFile = + Namespace.getInstance().resolveStoRIbyPFN(PFN.make(auxPFN)).getLocalFile(); + boolean ok = auxFile.delete(); + if (!ok) { + throw new Exception("Java File deletion failed!"); + } + } catch (Exception e) { + log.error("VolatileAndJiT CATALOG! Entry removed from Catalog, but " + + "physical file {} could NOT be deleted!", auxPFN); + log.error("VolatileAndJiT CATALOG! {}", e.getMessage(), e); + } + } + } + + /** + * Method used upon expiry of SRM_SPACE_AVAILABLE to remove all JiT entries in the DB table, + * related to the given PFN; Notice that _no_ distinction is made aboutthe specific user! This is + * because upon expiry of SRM_SPACE_AVAILABLE the file gets erased, so all JiTs on that file are + * automatically erased. This implies that all catalogue entries get removed. If no entries are + * present nothing happens. + */ + public synchronized void removeAllJiTsOn(PFN pfn) { + + if (pfn != null) { + dao.removeAllJiTsOn(pfn.getValue()); + return; + } + log.error("VolatileAndJiT CATALOG: programming bug! removeAllJiTsOn " + "invoked on null pfn!"); + } + + /** + * Method used to keep track of an ACL set up on a PFN; it needs the PFN, the LocalUser, the ACL + * and the desired pinLifeTime. If the 3-ple (PFN, ACL, LocalUser) is not present, it gets added; + * if it is already present, provided the new desired expiry occurs after the present one, it gets + * changed. If the supplied lifetime is zero, then a default value is used instead. If it is + * larger than a ceiling, that ceiling is used instead. The floor value in seconds can be set from + * the configuration file, with the property: pinLifetime.minimum While the ceiling value in + * seconds is set with: pinLifetime.maximum BEWARE: The intended use case is in both + * srmPrepareToGet and srmPrepareToPut, for the case of the _JiT_ security mechanism. The maximum + * is necessary because JiT ACLs cannot last longer than the amount of time the pool account is + * leased. Notice that for Volatile entries, a pinLifetime larger than the fileLifetime can be + * specified. However, when Volatile files expire any related JiTs automatically expire in + * anticipation! + */ + public synchronized void trackJiT(PFN pfn, LocalUser localUser, FilesystemPermission acl, + Calendar start, TLifeTimeInSeconds pinLifetime) { + + if (pfn != null && localUser != null && acl != null && start != null && pinLifetime != null) { + + String fileName = pfn.getValue(); + int uid = localUser.getUid(); + int gid = localUser.getPrimaryGid(); + int intacl = acl.getInt(); + // seconds needed and not milliseconds! + long pinStart = start.getTimeInMillis() / 1000; + long pinTime = validatePinLifetime(pinLifetime.value()); + int n = dao.numberJiT(fileName, uid, intacl); + if (n == 0) { + dao.addJiT(fileName, uid, gid, intacl, pinStart, pinTime); + } else { + dao.updateJiT(fileName, uid, intacl, pinStart, pinTime); + if (n > 1) { + log.warn("VolatileAndJiT CATALOG: More than one entry found for " + + "({}, {}, {}); the catalogue could be corrupt!", fileName, uid, intacl); + } + } + return; + } + log.error( + "VolatileAndJiT CATALOG: programming bug! TrackACL invoked on " + + "null attributes; pfn={} localUser={} acl={} start={} pinLifetime={}", + pfn, localUser, acl, start, pinLifetime); + } + + /** + * Method that adds an entry to the catalog that keeps track of Volatile files. The PFN and the + * fileLifetime are needed. If no entry corresponding to the given PFN is found, a new one gets + * recorded. If the PFN is already present, then provided the new expiry (obtained by adding + * together current-time and requested-lifetime) exceeds the expiry in the catalog, the entry is + * updated. Otherwise nothing takes place. If the supplied fileLifetime is zero, then a default + * value is used instead. This floor default value in seconds can be set from the configuration + * file, with the property: fileLifetime.default BEWARE: The intended use case for this method is + * during srmPrepareToPut. When files are uploaded into StoRM, they get specified as Volatile or + * Permanent. The PtP logic determines if the request is for a Volatile file and in that case it + * adds a new entry in the catalog. That is the purpose of this method. Any subsequent PtP call + * will just result in a modification of the expiry, provided the newer one lasts longer than the + * original one. Yet bear in mind that two or more PtP on the same file makes NO SENSE AT ALL! If + * any DB error occurs, then nothing gets added/updated and an error message gets logged. + */ + public synchronized void trackVolatile(PFN pfn, Calendar start, TLifeTimeInSeconds fileLifetime) { + + if (pfn != null && fileLifetime != null && start != null) { + + String fileName = pfn.getValue(); + long fileTime = fileLifetime.value(); + if (fileTime <= 0) { + fileTime = defaultFileLifetime; + } + long fileStart = start.getTimeInMillis() / 1000; // seconds needed and not + // milliseconds! + int n = dao.numberVolatile(fileName); + if (n == -1) { + log.error("VolatileAndJiT CATALOG! DB problem does not allow to count " + + "number of Volatile entries for {}! Volatile entry NOT processed!", pfn); + } else if (n == 0) { + dao.addVolatile(fileName, fileStart, fileTime); + } else { + dao.updateVolatile(fileName, fileStart, fileTime); + if (n > 1) { + log.warn("VolatileAndJiT CATALOG: More than one entry found for {}; " + + "the catalogue could be corrupt!", fileName); + } + } + return; + } + log.warn("VolatileAndJiT CATALOG: programming bug! volatileEntry invoked " + + "on null attributes; pfn={} start={} fileLifetime={}", pfn, start, fileLifetime); + } + + public synchronized void setStartTime(PFN pfn, Calendar start) throws Exception { + + if (pfn == null || start == null) { + log.warn("VolatileAndJiT CATALOG: programming bug! volatileEntry invoked " + + "on null attributes; pfn={} start={}", pfn, start); + return; + } + + String fileName = pfn.getValue(); + // seconds needed and not milliseconds! + long fileStart = start.getTimeInMillis() / 1000; + int n = dao.numberVolatile(fileName); + if (n == -1) { + log.error("VolatileAndJiT CATALOG! DB problem does not allow to count " + + "number of Volatile entries for {}! Volatile entry NOT processed!", pfn); + return; + } + if (n == 0) { + throw new Exception( + "Unable to update row volatile for pfn \'" + pfn + "\' , not on the database!"); + } + dao.updateVolatile(fileName, fileStart); + if (n > 1) { + log.warn("VolatileAndJiT CATALOG: More than one entry found for {}; " + + "the catalogue could be corrupt!", fileName); + } + } + + /** + * Method that returns a List whose first element is a Calendar with the starting date and time of + * the lifetime of the supplied PFN, and whose second element is the TLifeTime the system is + * keeping the PFN. If no entry is found for the given PFN, an empty List is returned. Likewise if + * any DB error occurs. In any case, proper error messages get logged. Moreover notice that if for + * any reason the value for the Lifetime read from the DB does not allow creation of a valid + * TLifeTimeInSeconds, an Empty one is returned. Error messages in logs warn of the situation. + */ + public synchronized List volatileInfoOn(PFN pfn) { + + List aux = Lists.newArrayList(); + if (pfn == null) { + log + .error("VolatileAndJiT CATALOG: programming bug! volatileInfoOn " + "invoked on null PFN!"); + return aux; + } + Collection c = dao.volatileInfoOn(pfn.getValue()); + if (c.size() != 2) { + return aux; + } + Iterator i = c.iterator(); + // start time + long startInMillis = i.next().longValue() * 1000; + Calendar auxcal = Calendar.getInstance(); + auxcal.setTimeInMillis(startInMillis); + aux.add(auxcal); + // lifeTime + long lifetimeInSeconds = ((Long) i.next()).longValue(); + TLifeTimeInSeconds auxLifeTime = TLifeTimeInSeconds.makeEmpty(); + try { + auxLifeTime = TLifeTimeInSeconds.make(lifetimeInSeconds, TimeUnit.SECONDS); + } catch (IllegalArgumentException e) { + log.error( + "VolatileAndJiT CATALOG: programming bug! Retrieved long does " + + "not allow TLifeTimeCreation! long is: {}; error is: {}", + lifetimeInSeconds, e.getMessage(), e); + } + aux.add(auxLifeTime); + return aux; + } + + /** + * Private method used to return a String representation of the expired entries Collection of + * JiTData. + */ + private String jitString(Collection c) { + + if (c == null) { + return ""; + } + StringBuilder sb = new StringBuilder(); + sb.append("file,acl,uid,gid\n"); + JiTData aux = null; + for (Iterator i = c.iterator(); i.hasNext();) { + aux = i.next(); + sb.append(aux.pfn()); + sb.append(","); + sb.append(aux.acl()); + sb.append(","); + sb.append(aux.uid()); + sb.append(","); + sb.append(aux.gid()); + if (i.hasNext()) { + sb.append("\n"); + } + } + return sb.toString(); + } + + /** + * Private method that makes sure that the lifeTime of the request: (1) It is not less than a + * predetermined value: this check is needed because clients may omit to supply a value and some + * default one must be used; moreover, it is feared that if the requested lifetime is very low, + * such as 0 or a few seconds, there could be strange problems in having a file written and erased + * immediately. (2) It is not larger than a given ceiling; this is necessary because in the JiT + * model, the underlying system may decide to remove the pool account mappings; it is paramount + * that no ACLs remain set up for the now un-associated pool account. + */ + private long validatePinLifetime(long lifetime) { + + long duration = lifetime < floor ? floor : lifetime; // adjust for lifetime + // set to zero! + duration = duration <= ceiling ? duration : ceiling; // make sure lifetime + // is not longer than + // the maximum set! + return duration; + } + + /** + * Private method used to return a String representation of the expired entries Collection of pfn + * Strings. + */ + private String volatileString(List c) { + + if (c == null) { + return ""; + } + StringBuilder sb = new StringBuilder(); + for (Iterator i = c.iterator(); i.hasNext();) { + sb.append(i.next()); + if (i.hasNext()) { + sb.append(","); + } + } + return sb.toString(); + } } diff --git a/src/main/java/it/grid/storm/catalogs/VolatileAndJiTDAO.java b/src/main/java/it/grid/storm/catalogs/VolatileAndJiTDAO.java deleted file mode 100644 index 806851304..000000000 --- a/src/main/java/it/grid/storm/catalogs/VolatileAndJiTDAO.java +++ /dev/null @@ -1,889 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import com.google.common.collect.Lists; - -import it.grid.storm.config.Configuration; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.SQLWarning; -import java.sql.Statement; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.Timer; -import java.util.TimerTask; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * DAO class for VolatileAndJiTCatalog: it has been specifically designed for - * MySQL. - * - * @author EGRID ICTP - * @version 1.0 (based on old PinnedFilesDAO) - * @date November, 2006 - */ -public class VolatileAndJiTDAO { - - private static final Logger log = LoggerFactory.getLogger(VolatileAndJiTDAO.class); - - // The name of the class for the DB driver - private final String driver = Configuration.getInstance().getDBDriver(); - - // The URL of the DB - private final String url = Configuration.getInstance().getStormDbURL(); - - // The password for the DB - private final String password = Configuration.getInstance().getDBPassword(); - - // The name for the DB - private final String name = Configuration.getInstance().getDBUserName(); - - // Connection to DB - private Connection con = null; - - // instance of DAO - private static final VolatileAndJiTDAO dao = new VolatileAndJiTDAO(); - - // timer thread that will run a task to alert when reconnecting is necessary! - private Timer clock = null; - - // timer task that will update the boolean signaling that a reconnection is needed! - private TimerTask clockTask = null; - - // milliseconds that must pass before reconnecting to DB - private final long period = Configuration.getInstance().getDBReconnectPeriod() * 1000; - - // initial delay in milliseconds before starting timer - private final long delay = Configuration.getInstance().getDBReconnectDelay() * 1000; - - // boolean that tells whether reconnection is needed because of MySQL bug! - private boolean reconnect = false; - - private VolatileAndJiTDAO() { - - setUpConnection(); - clock = new Timer(); - clockTask = new TimerTask() { - - @Override - public void run() { - - reconnect = true; - } - }; // clock task - clock.scheduleAtFixedRate(clockTask, delay, period); - } - - /** - * Method that returns the only instance of VolatileAndJiTDAO. - */ - public static VolatileAndJiTDAO getInstance() { - - return dao; - } - - /** - * Method that inserts a new entry in the JiT table of the DB, consisting of - * the specified filename, the local user uid, the local user gid, the acl, - * the start time as expressed by UNIX epoch (seconds since 00:00:00 1 1 1970) - * and the number of seconds the jit must last. - * - * In the DB, the start time gets translated into DATE:TIME in order to make - * it more readable. pinLifetime remains in seconds. - */ - public void addJiT(String filename, int uid, int gid, int acl, long start, - long pinLifetime) { - - if (!checkConnection()) { - log - .error("VolatileAndJiTDAO. addJiT: unable to get a valid connection!"); - return; - } - String sql = "INSERT INTO jit(file,uid,gid,acl,start,pinLifetime) VALUES(?,?,?,?,FROM_UNIXTIME(?),?)"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(sql); - logWarnings(con.getWarnings()); - stmt.setString(1, filename); - logWarnings(stmt.getWarnings()); - stmt.setInt(2, uid); - logWarnings(stmt.getWarnings()); - stmt.setInt(3, gid); - logWarnings(stmt.getWarnings()); - stmt.setInt(4, acl); - logWarnings(stmt.getWarnings()); - stmt.setLong(5, start); - logWarnings(stmt.getWarnings()); - stmt.setLong(6, pinLifetime); - logWarnings(stmt.getWarnings()); - log.debug("VolatileAndJiTDAO. addJiT: {}", stmt); - stmt.execute(); - } catch (SQLException e) { - log.error("VolatileAndJiTDAO! Error in addJiT: {}", e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Method that inserts a new entry in the Volatile table of the DB, consisting - * of the specified filename, the start time as expressed by UNIX epoch - * (seconds since 00:00:00 1 1 1970), and the number of seconds the file must - * be kept for. - * - * In the DB, the start time gets translated into DATE:TIME in order to make - * it more readable. pinLifetime remains in seconds. - */ - public void addVolatile(String filename, long start, long fileLifetime) { - - if (!checkConnection()) { - log - .error("VolatileAndJiTDAO. addVolatile: unable to get a valid connection!"); - return; - } - String sql = "INSERT INTO volatile(file,start,fileLifetime) VALUES(?,FROM_UNIXTIME(?),?)"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(sql); - logWarnings(con.getWarnings()); - stmt.setString(1, filename); - logWarnings(stmt.getWarnings()); - stmt.setLong(2, start); - logWarnings(stmt.getWarnings()); - stmt.setLong(3, fileLifetime); - logWarnings(stmt.getWarnings()); - log.debug("VolatileAndJiTDAO. addVolatile: {}", stmt); - stmt.execute(); - } catch (SQLException e) { - log.error("VolatileAndJiTDAO! Error in addVolatile: {}", e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Checks whether the given file exists in the volatile table or not. - * - * @param filename - * @return true if there is antry for the given file in the - * volatilte table, false otherwise. - */ - public boolean exists(String filename) { - - if (!checkConnection()) { - log - .error("VolatileAndJiTDAO. exists: unable to get a valid connection!"); - return false; - } - String sql = "SELECT ID FROM volatile WHERE file=? LIMIT 1"; - PreparedStatement stmt = null; - ResultSet rs = null; - boolean result; - - try { - stmt = con.prepareStatement(sql); - logWarnings(con.getWarnings()); - - stmt.setString(1, filename); - logWarnings(stmt.getWarnings()); - - log.debug("VolatileAndJiTDAO - existsOnVolatile - {}", stmt); - - rs = stmt.executeQuery(); - logWarnings(stmt.getWarnings()); - - if (rs.next()) { - result = true; - } else { - result = false; - } - } catch (SQLException e) { - log.error("VolatileAndJiTDAO! Error in existsOnVolatile: {}", - e.getMessage(), e); - result = false; - } finally { - close(rs); - close(stmt); - } - return result; - } - - /** - * Method that updates an existing entry in the JiT table of the DB, - * consisting of the specified filename, the uid and gid of the local user, - * the acl, the start time as expressed by UNIX epoch (seconds since 00:00:00 - * 1 1 1970), and the number of seconds the jit must last. - * - * In the DB, the start time gets translated into DATE:TIME in order to make - * it more readable. pinLifetime remains in seconds. - * - * This method _forces_ the update regardless of the fact that the new expiry - * lasts less than the current one! This method is intended to be used by - * expireJiT. - * - * Only start and pinLifetime get updated, while filename, uid, gid and acl, - * are used as criteria to select records. - */ - public void forceUpdateJiT(String filename, int uid, int acl, long start, - long pinLifetime) { - - if (!checkConnection()) { - log - .error("VolatileAndJiTDAO. forceUpdateJiT: unable to get a valid connection!"); - return; - } - String sql = "UPDATE jit " + "SET start=FROM_UNIXTIME(?), pinLifetime=? " - + "WHERE file=? AND uid=? AND acl=?"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(sql); - logWarnings(con.getWarnings()); - stmt.setLong(1, start); - logWarnings(stmt.getWarnings()); - stmt.setLong(2, pinLifetime); - logWarnings(stmt.getWarnings()); - stmt.setString(3, filename); - logWarnings(stmt.getWarnings()); - stmt.setInt(4, uid); - logWarnings(stmt.getWarnings()); - stmt.setInt(5, acl); - logWarnings(stmt.getWarnings()); - log.debug("VolatileAndJiTDAO. forceUpdateJiT: {}", stmt); - int n = stmt.executeUpdate(); - log.debug("VolatileAndJiTDAO. {} jit entries forced updated.", n); - } catch (SQLException e) { - log.error("VolatileAndJiTDAO! Error in forceUpdateJiT: {}", - e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Method that returns the number of entries in the catalogue, matching the - * given filename, uid and acl. - * - * Notice that in general there should be either one or none, and more should - * be taken as indication of catalogue corruption. - * - * -1 is returned if there are problems with the DB. - */ - public int numberJiT(String filename, int uid, int acl) { - - if (!checkConnection()) { - log.error("VolatileAndJiTDAO. numberJiT: unable to get a valid connection!"); - return -1; - } - String sql = "SELECT COUNT(ID) FROM jit WHERE file=? AND uid=? AND acl=?"; - PreparedStatement stmt = null; - ResultSet rs = null; - try { - stmt = con.prepareStatement(sql); - logWarnings(con.getWarnings()); - stmt.setString(1, filename); - logWarnings(stmt.getWarnings()); - stmt.setInt(2, uid); - logWarnings(stmt.getWarnings()); - stmt.setInt(3, acl); - logWarnings(stmt.getWarnings()); - log.debug("VolatileAndJiTDAO. numberJiT: {}", stmt); - rs = stmt.executeQuery(); - logWarnings(stmt.getWarnings()); - int n = -1; - if (rs.next()) { - n = rs.getInt(1); - } else { - log.error("VolatileAndJiTDAO! Unexpected situation in numberJiT: " - + "result set empty!"); - } - close(rs); - close(stmt); - return n; - } catch (SQLException e) { - log.error("VolatileAndJiTDAO! Error in numberJiT: {}", e.getMessage(), e); - close(rs); - close(stmt); - return -1; - } - } - - /** - * Method that returns the number of Volatile entries in the catalogue, for - * the given filename. - * - * Notice that in general there should be either one or none, and more should - * be taken as indication of catalogue corruption. - * - * -1 is returned if there are problems with the DB. - */ - public int numberVolatile(String filename) { - - if (!checkConnection()) { - log - .error("VolatileAndJiTDAO. numberVolatile: unable to get a valid connection!"); - return -1; - } - String sql = "SELECT COUNT(ID) FROM volatile WHERE file=?"; - PreparedStatement stmt = null; - ResultSet rs = null; - try { - stmt = con.prepareStatement(sql); - logWarnings(con.getWarnings()); - stmt.setString(1, filename); - logWarnings(stmt.getWarnings()); - log.debug("VolatileAndJiTDAO. numberVolatile: {}", stmt); - rs = stmt.executeQuery(); - logWarnings(stmt.getWarnings()); - int n = -1; - if (rs.next()) { - n = rs.getInt(1); - } else { - log.error("VolatileAndJiTDAO! Unexpected situation in numberVolatile: " - + "result set empty!"); - } - close(rs); - close(stmt); - return n; - } catch (SQLException e) { - log.error("VolatileAndJiTDAO! Error in numberVolatile: {}", - e.getMessage(), e); - close(rs); - close(stmt); - return -1; - } - } - - /** - * Method that removes all entries in the JiT table of the DB, that match the - * specified filename. So this action takes place _regardless_ of the user - * that set up the ACL! - */ - public void removeAllJiTsOn(String filename) { - - if (!checkConnection()) { - log.error("VolatileAndJiTDAO. removeAllJiTsOn: unable to get a " - + "valid connection!"); - return; - } - String sql = "DELETE FROM jit WHERE file=?"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(sql); - logWarnings(con.getWarnings()); - stmt.setString(1, filename); - logWarnings(stmt.getWarnings()); - log.debug("VolatileAndJiTDAO. removeJiT: {}", stmt); - int n = stmt.executeUpdate(); - log.debug("VolatileAndJiTDAO. removeJiT: {} entries removed", n); - } catch (SQLException e) { - log.error("VolatileAndJiTDAO! Error in removeJiT: {}", e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Method used to remove all expired entries, both of pinned files and of jit - * ACLs. Also, when removing volatile entries, any jit entry that refers to - * those expired volatiles will also be removed. - * - * The method requires a long representing the time measured as UNIX EPOCH - * upon which to base the purging: entries are evaluated expired when compared - * to this date. - * - * The method returns an array of two Collections; Collection[0] contains - * expired volatile entries String PFNs, while Collection[1] contains - * JiTDataTO objects. Collection[1] also contains those entries that may not - * have expired yet, but since the respective Volatile is being removed they - * too must be removed automatically. - * - * WARNING! If any error occurs it gets logged, and an array of two empty - * Collection is returned. This operation is treated as a Transcation by the - * DB, so a Roll Back should return everything to its original state! - */ - public Collection[] removeExpired(long time) { - - if (!checkConnection()) { - log.error("VolatileAndJiTDAO. removeExpired: unable to get a valid connection!"); - // in case of any failure return an array of two empty Collection - return new Collection[] { new ArrayList(), new ArrayList() }; - } - - String vol = "SELECT ID,file FROM volatile WHERE (UNIX_TIMESTAMP(start)+fileLifetime 0) { - // there are expired volatile entries: adjust jit selection to include - // those SURLs too! - jit = jit + " OR file IN " + makeFileString(volat); - } - stmt = con.prepareStatement(jit); - logWarnings(con.getWarnings()); - stmt.setLong(1, time); - logWarnings(stmt.getWarnings()); - log.debug("VolatileAndJiTDAO. removeExpired: {}", stmt); - rs = stmt.executeQuery(); - logWarnings(stmt.getWarnings()); - Collection track = new ArrayList(); - Collection trackid = new ArrayList(); - JiTData aux = null; - while (rs.next()) { - trackid.add(new Long(rs.getLong("ID"))); - aux = new JiTData(rs.getString("file"), rs.getInt("acl"), - rs.getInt("uid"), rs.getInt("gid")); - track.add(aux); - } - int njit = trackid.size(); - close(rs); - close(stmt); - - // remove entries - Collection volcol = new ArrayList(); - Collection jitcol = new ArrayList(); - try { - con.setAutoCommit(false); // begin transaction! - logWarnings(con.getWarnings()); - // delete volatile - int deletedvol = 0; - if (nvolat > 0) { - delvol = delvol + makeIDString(volatid); - stmt = con.prepareStatement(delvol); - logWarnings(con.getWarnings()); - log.debug("VolatileAndJiTDAO. removeExpired: {}", stmt); - deletedvol = stmt.executeUpdate(); - logWarnings(stmt.getWarnings()); - close(stmt); - } - // delete jits - int deletedjit = 0; - if (njit > 0) { - deljit = deljit + makeIDString(trackid); - stmt = con.prepareStatement(deljit); - logWarnings(con.getWarnings()); - log.debug("VolatileAndJiTDAO. removeExpired: {}", stmt); - deletedjit = stmt.executeUpdate(); - logWarnings(stmt.getWarnings()); - close(stmt); - } - con.commit(); - logWarnings(con.getWarnings()); - con.setAutoCommit(true); // end transaction! - logWarnings(con.getWarnings()); - log.debug("VolatileAndJiTDAO. Removed {} volatile catalogue entries " - + "and {} jit catalogue entries.", deletedvol, deletedjit); - volcol = volat; - jitcol = track; - } catch (SQLException e) { - log.error("VolatileAndJiTDAO! Unable to complete removeExpired... " - + "rolling back! {}", e.getMessage(), e); - rollback(con); - close(stmt); - } - - // return collections - return new Collection[] { volcol, jitcol }; - } catch (SQLException e) { - close(rs); - close(stmt); - log.error("VolatileAndJiTDAO! Unable to complete removeExpired! {}", - e.getMessage(), e); - // in case of any failure return an array of two empty Collection - return new Collection[] { new ArrayList(), new ArrayList() }; - } - } - - /** - * Method that removes all entries in the Volatile table of the DB, that match - * the specified filename. - */ - public void removeVolatile(String filename) { - - if (!checkConnection()) { - log.error("VolatileAndJiTDAO. removeVolatile: unable to get a valid " - + "connection!"); - return; - } - String sql = "DELETE FROM volatile WHERE file=?"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(sql); - logWarnings(con.getWarnings()); - stmt.setString(1, filename); - logWarnings(stmt.getWarnings()); - log.debug("VolatileAndJiTDAO. removeVolatile: {}", stmt); - int n = stmt.executeUpdate(); - log.debug("VolatileAndJiTDAO. removeVolatile: {} entries removed.", n); - } catch (SQLException e) { - log.error("VolatileAndJiTDAO! Error in removeVolatile: {}", - e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Method that updates an existing entry in the JiT table of the DB, - * consisting of the specified filename, the uid and gid of the local user, - * the acl, the start time as expressed by UNIX epoch (seconds since 00:00:00 - * 1 1 1970), and the number of seconds the jit must last. - * - * In the DB, the start time gets translated into DATE:TIME in order to make - * it more readable. pinLifetime remains in seconds. - * - * Entries get updated only if the new expiry calculated by adding start and - * pinLifetime, is larger than the existing one. - * - * Only start and pinLifetime get updated, while filename, uid, gid and acl, - * are used as criteria to select records. - */ - public void updateJiT(String filename, int uid, int acl, long start, - long pinLifetime) { - - if (!checkConnection()) { - log.error("VolatileAndJiTDAO. updateJiT: unable to get a valid " - + "connection!"); - return; - } - String sql = "UPDATE jit " - + "SET start=FROM_UNIXTIME(?), pinLifetime=? " - + "WHERE file=? AND uid=? AND acl=? AND (UNIX_TIMESTAMP(start)+pinLifetime volatileInfoOn(String filename) { - - if (!checkConnection()) { - log.error("VolatileAndJiTDAO. volatileInfoOn: unable to get a valid connection!"); - return Lists.newArrayList(); - } - String sql = "SELECT UNIX_TIMESTAMP(start), fileLifetime FROM volatile WHERE file=?"; - PreparedStatement stmt = null; - ResultSet rs = null; - List aux = Lists.newArrayList(); - try { - stmt = con.prepareStatement(sql); - logWarnings(con.getWarnings()); - stmt.setString(1, filename); - logWarnings(stmt.getWarnings()); - log.debug("VolatileAndJiTDAO - infoOnVolatile - {}", stmt); - rs = stmt.executeQuery(); - logWarnings(stmt.getWarnings()); - if (rs.next()) { - aux.add(rs.getLong("UNIX_TIMESTAMP(start)")); - aux.add(rs.getLong("fileLifetime")); - } else { - log.debug("VolatileAndJiTDAO! infoOnVolatile did not find {}", filename); - } - } catch (SQLException e) { - log.error("VolatileAndJiTDAO! Error in infoOnVolatile: {}", - e.getMessage(), e); - } finally { - close(rs); - close(stmt); - } - return aux; - } - - /** - * Auxiliary method that checks if time for resetting the connection has come, - * and eventually takes it down and up back again. - */ - private boolean checkConnection() { - - boolean response = true; - if (reconnect) { - log.debug("VolatileAndJiTDAO: reconnecting to DB. "); - takeDownConnection(); - response = setUpConnection(); - if (response) { - reconnect = false; - } - } - return response; - } - - /** - * Auxiliary method that closes a ResultSet and handles all possible - * exceptions. - */ - private void close(ResultSet rset) { - - if (rset != null) { - try { - rset.close(); - } catch (Exception e) { - log.error("VolatileAndJiTDAO! Unable to close ResultSet - Error: {}", - e.getMessage(), e); - } - } - } - - /** - * Auxiliary method that closes a Statement and handles all possible - * exceptions. - */ - private void close(Statement stmt) { - - if (stmt != null) { - try { - stmt.close(); - } catch (Exception e) { - log.error("VolatileAndJiTDAO! Unable to close Statement {} - Error: {}", - stmt.toString(), e.getMessage(), e); - } - } - } - - /** - * Auxiliary method used to log warnings. - */ - private void logWarnings(SQLWarning warning) { - - if (warning != null) { - log.debug("VolatileAndJiTDAO: {}", warning); - while ((warning = warning.getNextWarning()) != null) { - log.debug("VolatileAndJiTDAO: {}", warning); - } - } - } - - /** - * Method that returns a String containing all Files. - */ - private String makeFileString(Collection files) { - - StringBuilder sb = new StringBuilder("("); - for (Iterator i = files.iterator(); i.hasNext();) { - sb.append("'"); - sb.append((String) i.next()); - sb.append("'"); - if (i.hasNext()) { - sb.append(","); - } - } - sb.append(")"); - return sb.toString(); - } - - /** - * Method that returns a String containing all IDs. - */ - private String makeIDString(Collection rowids) { - - StringBuilder sb = new StringBuilder("("); - for (Iterator i = rowids.iterator(); i.hasNext();) { - sb.append(i.next()); - if (i.hasNext()) { - sb.append(","); - } - } - sb.append(")"); - return sb.toString(); - } - - /** - * Auxiliary method used to roll back a transaction and handles all possible - * exceptions. - */ - private void rollback(Connection con) { - - if (con != null) { - try { - con.rollback(); - logWarnings(con.getWarnings()); - log.error("VolatileAndJiTDAO! Roll back successful!"); - } catch (SQLException e3) { - log.error("VolatileAndJiTDAO! Roll back failed! {}", e3.getMessage(), e3); - } - } - } - - /** - * Auxiliary method that sets up the connection to the DB. - */ - private boolean setUpConnection() { - - boolean response = false; - try { - Class.forName(driver); - con = DriverManager.getConnection(url, name, password); - response = con.isValid(0); - logWarnings(con.getWarnings()); - } catch (Exception e) { - log.error("VolatileAndJiTDAO! Exception in setUpconnection! {}", - e.getMessage(), e); - } - return response; - } - - /** - * Auxiliary method that takes down a connection to the DB. - */ - private void takeDownConnection() { - - if (con != null) { - try { - con.close(); - } catch (Exception e) { - log.error("VolatileAndJiTDAO! Exception in takeDownConnection! {}", - e.getMessage(), e); - } - } - } -} diff --git a/src/main/java/it/grid/storm/catalogs/executors/RequestFinalizerService.java b/src/main/java/it/grid/storm/catalogs/executors/RequestFinalizerService.java new file mode 100644 index 000000000..9e3f666f6 --- /dev/null +++ b/src/main/java/it/grid/storm/catalogs/executors/RequestFinalizerService.java @@ -0,0 +1,46 @@ +package it.grid.storm.catalogs.executors; + +import static java.util.concurrent.TimeUnit.SECONDS; + +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; + +import it.grid.storm.catalogs.executors.threads.BoLFinalizer; +import it.grid.storm.catalogs.executors.threads.PtGFinalizer; +import it.grid.storm.catalogs.executors.threads.PtPFinalizer; +import it.grid.storm.config.Configuration; + +public class RequestFinalizerService { + + private final long delay; + private final long period; + + private ScheduledExecutorService executor; + private PtPFinalizer ptpTask; + private BoLFinalizer bolTask; + private PtGFinalizer ptgTask; + + public RequestFinalizerService(Configuration config) { + + delay = config.getInProgressAgentInitialDelay() * 1000L; + period = config.getInProgressAgentInterval() * 1000L; + executor = Executors.newScheduledThreadPool(3); + ptpTask = new PtPFinalizer(config.getInProgressPtpExpirationTime()); + bolTask = new BoLFinalizer(); + ptgTask = new PtGFinalizer(); + + } + + public void start() { + + executor.scheduleAtFixedRate(ptpTask, delay, period, SECONDS); + executor.scheduleAtFixedRate(bolTask, delay, period, SECONDS); + executor.scheduleAtFixedRate(ptgTask, delay, period, SECONDS); + + } + + public void stop() { + + executor.shutdown(); + } +} diff --git a/src/main/java/it/grid/storm/catalogs/executors/threads/BoLFinalizer.java b/src/main/java/it/grid/storm/catalogs/executors/threads/BoLFinalizer.java new file mode 100644 index 000000000..3dd64a36f --- /dev/null +++ b/src/main/java/it/grid/storm/catalogs/executors/threads/BoLFinalizer.java @@ -0,0 +1,39 @@ +package it.grid.storm.catalogs.executors.threads; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import it.grid.storm.persistence.dao.BoLChunkDAO; +import it.grid.storm.persistence.impl.mysql.BoLChunkDAOMySql; + + +public class BoLFinalizer implements Runnable { + + private static final Logger log = LoggerFactory.getLogger(BoLFinalizer.class); + + private final BoLChunkDAO dao; + + public BoLFinalizer() { + + dao = BoLChunkDAOMySql.getInstance(); + } + + @Override + public void run() { + + log.debug("BoL finalizer started .."); + + try { + + int n = dao.releaseExpiredAndSuccessfulRequests(); + if (n > 0) { + log.info("Released {} expired and successful BoL requests", n); + } + + } catch (Exception e) { + + log.error("{}: {}", e.getClass(), e.getMessage(), e); + + } + } +} diff --git a/src/main/java/it/grid/storm/catalogs/executors/threads/PtGFinalizer.java b/src/main/java/it/grid/storm/catalogs/executors/threads/PtGFinalizer.java new file mode 100644 index 000000000..8d3026a09 --- /dev/null +++ b/src/main/java/it/grid/storm/catalogs/executors/threads/PtGFinalizer.java @@ -0,0 +1,46 @@ +package it.grid.storm.catalogs.executors.threads; + +import java.util.Collection; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import it.grid.storm.persistence.dao.PtGChunkDAO; +import it.grid.storm.persistence.impl.mysql.PtGChunkDAOMySql; +import it.grid.storm.srm.types.TSURL; + + +public class PtGFinalizer implements Runnable { + + private static final Logger log = LoggerFactory.getLogger(PtGFinalizer.class); + + private final PtGChunkDAO dao; + + public PtGFinalizer() { + + dao = PtGChunkDAOMySql.getInstance(); + } + + @Override + public void run() { + + log.debug("PtG finalizer started .."); + + try { + + Collection surls = dao.transitExpiredSRM_FILE_PINNED(); + + if (surls.size() > 0) { + log.info("Moved {} expired and successful PtG requests to SRM_FILE_PINNED", surls.size()); + log.debug("Released surls:"); + surls.forEach(surl -> { + log.debug("{}", surl); + }); + } + + } catch (Exception e) { + + log.error("{}: {}", e.getClass(), e.getMessage(), e); + } + } +} diff --git a/src/main/java/it/grid/storm/catalogs/executors/threads/PtPFinalizer.java b/src/main/java/it/grid/storm/catalogs/executors/threads/PtPFinalizer.java new file mode 100644 index 000000000..57ba6c64d --- /dev/null +++ b/src/main/java/it/grid/storm/catalogs/executors/threads/PtPFinalizer.java @@ -0,0 +1,86 @@ +package it.grid.storm.catalogs.executors.threads; + +import static it.grid.storm.srm.types.TStatusCode.SRM_FAILURE; + +import java.util.Map; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import it.grid.storm.persistence.dao.PtPChunkDAO; +import it.grid.storm.persistence.impl.mysql.PtPChunkDAOMySql; +import it.grid.storm.srm.types.InvalidTSURLAttributesException; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.synchcall.command.datatransfer.PutDoneCommand; +import it.grid.storm.synchcall.command.datatransfer.PutDoneCommandException; + + +public class PtPFinalizer implements Runnable { + + private static final Logger log = LoggerFactory.getLogger(PtPFinalizer.class); + + private static final String NAME = "Expired-PutRequests-Agent"; + + private long inProgressRequestsExpirationTime; + private final PtPChunkDAO dao; + + public PtPFinalizer(long inProgressRequestsExpirationTime) { + + this.inProgressRequestsExpirationTime = inProgressRequestsExpirationTime; + dao = PtPChunkDAOMySql.getInstance(); + log.info("{} created.", NAME); + } + + @Override + public void run() { + + log.debug("{} run.", NAME); + try { + + transitExpiredLifetimeRequests(); + transitExpiredInProgressRequests(); + + } catch (Exception e) { + + log.error("{}: {}", e.getClass(), e.getMessage(), e); + + } + } + + private void transitExpiredLifetimeRequests() { + + Map expiredRequests = dao.getExpiredSRM_SPACE_AVAILABLE(); + log.debug("{} lifetime-expired requests found ... ", NAME, expiredRequests.size()); + + if (expiredRequests.isEmpty()) { + return; + } + + expiredRequests.entrySet().forEach(e -> executePutDone(e.getKey(), e.getValue())); + + int count = + dao.transitExpiredSRM_SPACE_AVAILABLEtoSRM_FILE_LIFETIME_EXPIRED(expiredRequests.keySet()); + log.info("{} updated expired put requests - {} db rows affected", NAME, count); + } + + private void executePutDone(Long id, String surl) { + + try { + + if (PutDoneCommand.executePutDone(TSURL.makeFromStringValidate(surl))) { + log.info("{} successfully executed a srmPutDone on surl {}", NAME, surl); + } + + } catch (InvalidTSURLAttributesException | PutDoneCommandException e) { + + log.error("{}. Unable to execute PutDone on request with id {} and surl {}: ", NAME, id, surl, + e.getMessage(), e); + } + } + + private void transitExpiredInProgressRequests() { + + int count = dao.transitLongTimeInProgressRequestsToStatus(inProgressRequestsExpirationTime, SRM_FAILURE, "Request timeout"); + log.debug("{} moved in-progress put requests to failure - {} db rows affected", NAME, count); + } +} diff --git a/src/main/java/it/grid/storm/catalogs/surl/SURLStatusManagerImpl.java b/src/main/java/it/grid/storm/catalogs/surl/SURLStatusManagerImpl.java index 76bcefb72..e21aec50e 100644 --- a/src/main/java/it/grid/storm/catalogs/surl/SURLStatusManagerImpl.java +++ b/src/main/java/it/grid/storm/catalogs/surl/SURLStatusManagerImpl.java @@ -2,12 +2,13 @@ import it.grid.storm.authz.AuthzException; import it.grid.storm.catalogs.BoLChunkCatalog; -import it.grid.storm.catalogs.CopyChunkCatalog; import it.grid.storm.catalogs.PtGChunkCatalog; import it.grid.storm.catalogs.PtPChunkCatalog; import it.grid.storm.catalogs.RequestSummaryCatalog; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.dao.SURLStatusDAO; +import it.grid.storm.persistence.impl.mysql.SURLStatusDAOMySql; +import it.grid.storm.persistence.model.RequestSummaryData; import it.grid.storm.srm.types.TRequestToken; import it.grid.storm.srm.types.TReturnStatus; import it.grid.storm.srm.types.TSURL; @@ -24,7 +25,7 @@ public class SURLStatusManagerImpl implements SURLStatusManager { public boolean abortAllGetRequestsForSURL(GridUserInterface user, TSURL surl, String explanation) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); return dao.abortActivePtGsForSURL(user, surl, explanation); } @@ -33,7 +34,7 @@ public boolean abortAllGetRequestsForSURL(GridUserInterface user, TSURL surl, public boolean abortAllPutRequestsForSURL(GridUserInterface user, TSURL surl, String explanation) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); return dao.abortActivePtPsForSURL(user, surl, explanation); } @@ -61,11 +62,6 @@ public boolean abortRequest(GridUserInterface user, TRequestToken token, TStatusCode.SRM_REQUEST_QUEUED, TStatusCode.SRM_ABORTED, explanation); break; - case COPY: - CopyChunkCatalog.getInstance().updateFromPreviousStatus(token, - TStatusCode.SRM_REQUEST_QUEUED, TStatusCode.SRM_ABORTED, explanation); - break; - case EMPTY: break; @@ -137,7 +133,7 @@ public boolean failRequestForSURL(GridUserInterface user, public Map getPinnedSURLsForUser( GridUserInterface user, List surls) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); return dao.getPinnedSURLsForUser(user, surls); } @@ -145,7 +141,7 @@ public Map getPinnedSURLsForUser( public Map getPinnedSURLsForUser( GridUserInterface user, TRequestToken token, List surls) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); return dao.getPinnedSURLsForUser(user, token, surls); } @@ -154,7 +150,7 @@ public Map getPinnedSURLsForUser( public Map getSURLStatuses(GridUserInterface user, TRequestToken token) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); return dao.getSURLStatuses(token); } @@ -163,28 +159,28 @@ public Map getSURLStatuses(GridUserInterface user, TRequestToken token, List surls) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); return dao.getSURLStatuses(token, surls); } @Override public boolean isSURLBusy(TRequestToken requestTokenToExclude, TSURL surl) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); return dao.surlHasOngoingPtPs(surl, requestTokenToExclude); } @Override public boolean isSURLBusy(TSURL surl) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); return dao.surlHasOngoingPtPs(surl, null); } @Override public boolean isSURLPinned(TSURL surl) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); return dao.surlHasOngoingPtGs(surl); } @@ -212,7 +208,7 @@ private RequestSummaryData lookupRequest(TRequestToken token) { @Override public int markSURLsReadyForRead(TRequestToken token, List surls) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); return dao.markSURLsReadyForRead(token, surls); } @@ -220,7 +216,7 @@ public int markSURLsReadyForRead(TRequestToken token, List surls) { @Override public void releaseSURLs(GridUserInterface user, List surls) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); dao.releaseSURLs(user, surls); } @@ -228,7 +224,7 @@ public void releaseSURLs(GridUserInterface user, List surls) { @Override public void releaseSURLs(TRequestToken token, List surls) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); dao.releaseSURLs(token, surls); } diff --git a/src/main/java/it/grid/storm/catalogs/timertasks/ExpiredPutRequestsAgent.java b/src/main/java/it/grid/storm/catalogs/timertasks/ExpiredPutRequestsAgent.java deleted file mode 100644 index c205b6a46..000000000 --- a/src/main/java/it/grid/storm/catalogs/timertasks/ExpiredPutRequestsAgent.java +++ /dev/null @@ -1,93 +0,0 @@ -package it.grid.storm.catalogs.timertasks; - -import it.grid.storm.catalogs.PtPChunkDAO; -import it.grid.storm.srm.types.InvalidTSURLAttributesException; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.synchcall.command.datatransfer.PutDoneCommand; -import it.grid.storm.synchcall.command.datatransfer.PutDoneCommandException; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; -import java.util.Map; -import java.util.TimerTask; - - -public class ExpiredPutRequestsAgent extends TimerTask { - - private static final Logger log = LoggerFactory.getLogger(ExpiredPutRequestsAgent.class); - - private static final String NAME = "Expired-PutRequests-Agent"; - - private long inProgressRequestsExpirationTime; - - public ExpiredPutRequestsAgent(long inProgressRequestsExpirationTime) { - - this.inProgressRequestsExpirationTime = inProgressRequestsExpirationTime; - log.info("{} created.", NAME); - } - - @Override - public synchronized void run() { - - log.debug("{} run.", NAME); - try { - - transitExpiredLifetimeRequests(); - transitExpiredInProgressRequests(); - - } catch (Exception e) { - - log.error("{}: {}", e.getClass(), e.getMessage(), e); - - } - } - - private void transitExpiredLifetimeRequests() { - - PtPChunkDAO dao = PtPChunkDAO.getInstance(); - Map expiredRequests = dao.getExpiredSRM_SPACE_AVAILABLE(); - log.debug("{} lifetime-expired requests found ... ", NAME, expiredRequests.size()); - - if (expiredRequests.isEmpty()) { - return; - } - - expiredRequests.entrySet().forEach(e -> executePutDone(e.getKey(), e.getValue())); - - int count = dao.transitExpiredSRM_SPACE_AVAILABLEtoSRM_FILE_LIFETIME_EXPIRED( - expiredRequests.keySet()); - log.info("{} updated expired put requests - {} db rows affected", NAME, count); - } - - private void executePutDone(Long id, String surl) { - - try { - - if (PutDoneCommand.executePutDone(TSURL.makeFromStringValidate(surl))) { - log.info("{} successfully executed a srmPutDone on surl {}", NAME, surl); - } - - } catch (InvalidTSURLAttributesException | PutDoneCommandException e) { - - log.error("{}. Unable to execute PutDone on request with id {} and surl {}: ", NAME, id, - surl, e.getMessage(), e); - } - } - - private void transitExpiredInProgressRequests() { - - PtPChunkDAO dao = PtPChunkDAO.getInstance(); - List expiredRequestsIds = - dao.getExpiredSRM_REQUEST_INPROGRESS(inProgressRequestsExpirationTime); - log.debug("{} expired in-progress requests found.", expiredRequestsIds.size()); - - if (expiredRequestsIds.isEmpty()) { - return; - } - - int count = dao.transitExpiredSRM_REQUEST_INPROGRESStoSRM_FAILURE(expiredRequestsIds); - log.info("{} moved in-progress put requests to failure - {} db rows affected", NAME, count); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/timertasks/RequestsGarbageCollector.java b/src/main/java/it/grid/storm/catalogs/timertasks/RequestsGarbageCollector.java index 090fc0920..b5bc938b7 100644 --- a/src/main/java/it/grid/storm/catalogs/timertasks/RequestsGarbageCollector.java +++ b/src/main/java/it/grid/storm/catalogs/timertasks/RequestsGarbageCollector.java @@ -6,168 +6,161 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import it.grid.storm.catalogs.BoLChunkCatalog; -import it.grid.storm.catalogs.PtGChunkCatalog; -import it.grid.storm.catalogs.RequestSummaryDAO; import it.grid.storm.config.Configuration; +import it.grid.storm.persistence.dao.RequestSummaryDAO; +import it.grid.storm.persistence.impl.mysql.RequestSummaryDAOMySql; import it.grid.storm.tape.recalltable.TapeRecallCatalog; public class RequestsGarbageCollector extends TimerTask { - private static final Logger log = LoggerFactory.getLogger(RequestsGarbageCollector.class); + private static final Logger log = LoggerFactory.getLogger(RequestsGarbageCollector.class); - private final Configuration config = Configuration.getInstance(); - private final RequestSummaryDAO dao = RequestSummaryDAO.getInstance(); - private final PtGChunkCatalog ptgCat = PtGChunkCatalog.getInstance(); - private final BoLChunkCatalog bolCat = BoLChunkCatalog.getInstance(); + private final Configuration config = Configuration.getInstance(); + private final RequestSummaryDAO dao = RequestSummaryDAOMySql.getInstance(); - private Timer handler; - private long delay; + private Timer handler; + private long delay; - public RequestsGarbageCollector(Timer handlerTimer, long delay) { + public RequestsGarbageCollector(Timer handlerTimer, long delay) { - this.delay = delay; - handler = handlerTimer; - } + this.delay = delay; + handler = handlerTimer; + } - @Override - public void run() { + @Override + public void run() { - try { + try { - TGarbageData gd = purgeExpiredRequests(); + TGarbageData gd = purgeExpiredRequests(); - if (gd.getTotalPurged() == 0) { + if (gd.getTotalPurged() == 0) { - log.trace("GARBAGE COLLECTOR didn't find completed requests older than {} seconds", - config.getExpiredRequestTime()); + log.trace("GARBAGE COLLECTOR didn't find completed requests older than {} seconds", + config.getCompletedRequestsAgentPurgeAge()); - } else { + } else { - log.info( - "GARBAGE COLLECTOR removed < {} > completed requests (< {} > recall) older than {} seconds", - gd.getTotalPurgedRequests(), gd.getTotalPurgedRecalls(), - config.getExpiredRequestTime()); + log.info( + "GARBAGE COLLECTOR removed < {} > completed requests (< {} > recall) older than {} seconds", + gd.getTotalPurgedRequests(), gd.getTotalPurgedRecalls(), + config.getCompletedRequestsAgentPurgeAge()); - } + } - long nextDelay = computeNextDelay(gd); + long nextDelay = computeNextDelay(gd); - if (nextDelay != delay) { + if (nextDelay != delay) { - log.info("GARBAGE COLLECTOR: tuning new interval to {} seconds", nextDelay / 1000); - delay = nextDelay; + log.info("GARBAGE COLLECTOR: tuning new interval to {} seconds", nextDelay / 1000); + delay = nextDelay; - } + } - } catch (Exception t) { + } catch (Exception t) { - /* useful to prevent unexpected exceptions that would kill the GC */ - log.error(t.getMessage(), t); + /* useful to prevent unexpected exceptions that would kill the GC */ + log.error(t.getMessage(), t); - } finally { + } finally { - reschedule(); - } - } + reschedule(); + } + } - /** - * Delete from database the completed requests older than a specified and configurable value. - * - * @return A TGarbageData object containing info about the deleted requests - */ - private TGarbageData purgeExpiredRequests() { + /** + * Delete from database the completed requests older than a specified and configurable value. + * + * @return A TGarbageData object containing info about the deleted requests + */ + private TGarbageData purgeExpiredRequests() { - if (!enabled()) { - return TGarbageData.EMPTY; - } + if (!enabled()) { + return TGarbageData.EMPTY; + } - long expirationTime = config.getExpiredRequestTime(); - int purgeSize = config.getPurgeBatchSize(); + long expirationTime = config.getCompletedRequestsAgentPurgeAge(); + int purgeSize = config.getCompletedRequestsAgentPurgeSize(); - int nRequests = purgeExpiredRequests(expirationTime, purgeSize); - int nRecalls = purgeExpiredRecallRequests(expirationTime, purgeSize); + int nRequests = purgeExpiredRequests(expirationTime, purgeSize); + int nRecalls = purgeExpiredRecallRequests(expirationTime, purgeSize); - return new TGarbageData(nRequests, nRecalls); - } + return new TGarbageData(nRequests, nRecalls); + } - /** - * Check if Garbage Collector is enabled or not. - * - * @return If the purger is enabled. False otherwise. - */ - private boolean enabled() { + /** + * Check if Garbage Collector is enabled or not. + * + * @return If the purger is enabled. False otherwise. + */ + private boolean enabled() { - return config.getExpiredRequestPurging(); - } + return config.isCompletedRequestsAgentEnabled(); + } - /** - * Method used to purge from db a bunch of completed requests, older than the - * specified @expiredRequestTime. - * - * @param purgeSize The maximum size of the bunch of expired requests that must be deleted - * @param expiredRequestTime The number of seconds after that a request can be considered - * expired - * @return The number of requests involved. - */ - private synchronized int purgeExpiredRequests(long expiredRequestTime, int purgeSize) { + /** + * Method used to purge from db a bunch of completed requests, older than the + * specified @expiredRequestTime. + * + * @param purgeSize The maximum size of the bunch of expired requests that must be deleted + * @param expiredRequestTime The number of seconds after that a request can be considered expired + * @return The number of requests involved. + */ + private synchronized int purgeExpiredRequests(long expiredRequestTime, int purgeSize) { - ptgCat.transitExpiredSRM_FILE_PINNED(); - bolCat.transitExpiredSRM_SUCCESS(); + return dao.purgeExpiredRequests(expiredRequestTime, purgeSize).size(); - return dao.purgeExpiredRequests(expiredRequestTime, purgeSize).size(); + } - } + /** + * Method used to clear a bunch of completed recall requests from database. + * + * @param expirationTime The number of seconds that must pass before considering a request as + * expired + * @param purgeSize The maximum size of the bunch of expired requests that must be deleted + * @return The number of requests involved. + */ + private synchronized int purgeExpiredRecallRequests(long expirationTime, int purgeSize) { - /** - * Method used to clear a bunch of completed recall requests from database. - * - * @param expirationTime The number of seconds that must pass before considering a request as - * expired - * @param purgeSize The maximum size of the bunch of expired requests that must be deleted - * @return The number of requests involved. - */ - private synchronized int purgeExpiredRecallRequests(long expirationTime, int purgeSize) { + return TapeRecallCatalog.getInstance().purgeCatalog(expirationTime, purgeSize); + } - return new TapeRecallCatalog().purgeCatalog(expirationTime, purgeSize); - } + /** + * Compute a new delay. It will be decreased if the number of purged requests is equal to the + * purge.size value. Otherwise, it will be increased until default value. + * + * @return the computed next interval predicted from last removed requests info + */ + private long computeNextDelay(TGarbageData gd) { - /** - * Compute a new delay. It will be decreased if the number of purged requests is equal to the - * purge.size value. Otherwise, it will be increased until default value. - * - * @return the computed next interval predicted from last removed requests info - */ - private long computeNextDelay(TGarbageData gd) { + /* max delay from configuration in milliseconds */ + long maxDelay = config.getCompletedRequestsAgentPeriod() * 1000L; + /* min delay accepted in milliseconds */ + long minDelay = 10000L; - /* max delay from configuration in milliseconds */ - long maxDelay = config.getRequestPurgerPeriod() * 1000L; - /* min delay accepted in milliseconds */ - long minDelay = 10000L; + long nextDelay; - long nextDelay; + /* Check purged requests value */ + if (gd.getTotalPurged() >= config.getCompletedRequestsAgentPurgeSize()) { - /* Check purged requests value */ - if (gd.getTotalPurged() >= config.getPurgeBatchSize()) { + /* bunch size reached: decrease interval */ + nextDelay = Math.max(delay / 2, minDelay); - /* bunch size reached: decrease interval */ - nextDelay = Math.max(delay / 2, minDelay); + } else { - } else { + /* bunch size not reached: increase interval */ + nextDelay = Math.min(delay * 2, maxDelay); - /* bunch size not reached: increase interval */ - nextDelay = Math.min(delay * 2, maxDelay); + } - } + return nextDelay; + } - return nextDelay; - } - - /** - * Schedule another task after @delay milliseconds. - */ - private void reschedule() { - - handler.schedule(new RequestsGarbageCollector(handler, delay), delay); - } + /** + * Schedule another task after @delay milliseconds. + */ + private void reschedule() { + + handler.schedule(new RequestsGarbageCollector(handler, delay), delay); + } } diff --git a/src/main/java/it/grid/storm/check/SimpleCheckManager.java b/src/main/java/it/grid/storm/check/SimpleCheckManager.java index 41c3d0e23..1514232e8 100644 --- a/src/main/java/it/grid/storm/check/SimpleCheckManager.java +++ b/src/main/java/it/grid/storm/check/SimpleCheckManager.java @@ -24,8 +24,8 @@ import it.grid.storm.check.sanity.filesystem.NamespaceFSExtendedACLUsageCheck; import it.grid.storm.check.sanity.filesystem.NamespaceFSExtendedAttributeUsageCheck; import it.grid.storm.filesystem.MtabUtil; -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.VirtualFSInterface; +import it.grid.storm.namespace.Namespace; +import it.grid.storm.namespace.model.VirtualFS; /** * @author Michele Dibenedetto @@ -77,7 +77,7 @@ private Check getNamespaceFSAssociationCheck() { if (log.isDebugEnabled()) { log.debug("Retrieved MountPoints: {}", printMapCouples(mountPoints)); } - List vfsSet = NamespaceDirector.getNamespace().getAllDefinedVFS(); + List vfsSet = Namespace.getInstance().getAllDefinedVFS(); return new NamespaceFSAssociationCheck(mountPoints, vfsSet); } diff --git a/src/main/java/it/grid/storm/check/SimpleClassLoaderCheckManager.java b/src/main/java/it/grid/storm/check/SimpleClassLoaderCheckManager.java deleted file mode 100644 index 83f3f7e28..000000000 --- a/src/main/java/it/grid/storm/check/SimpleClassLoaderCheckManager.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS - * OF ANY KIND, either express or implied. See the License for the specific - * language governing permissions and limitations under the License. - */ - -package it.grid.storm.check; - -import java.io.File; -import java.io.FileInputStream; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; -import java.net.URL; -import java.security.CodeSource; -import java.util.ArrayList; -import java.util.List; -import java.util.jar.JarEntry; -import java.util.jar.JarInputStream; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * @author Michele Dibenedetto THIS CLASS HAS TO BE TESTED - */ -public class SimpleClassLoaderCheckManager extends CheckManager { - - private static final Logger log = LoggerFactory - .getLogger(SimpleClassLoaderCheckManager.class); - - private ArrayList checks = new ArrayList(); - - @Override - protected Logger getLogger() { - - return log; - } - - @Override - protected void loadChecks() { - - CodeSource source = SimpleClassLoaderCheckManager.class - .getProtectionDomain().getCodeSource(); - URL location = null; - if (source != null) { - location = source.getLocation(); - log.info("location: {}", location); - } - String packageResourcePath = "it" + File.separatorChar + "grid" - + File.separatorChar + "storm" + File.separatorChar + "check" - + File.separatorChar + "sanity"; - List classes = getClasseNamesInPackage(location.toString(), - packageResourcePath); - for (String className : classes) { - Class classe = null; - try { - classe = Class.forName(className); - } catch (ClassNotFoundException e) { - log.error(e.getMessage()); - } - Constructor constructor; - try { - constructor = classe.getConstructor(); - try { - Check check = (Check) constructor.newInstance(); - checks.add(check); - } catch (IllegalArgumentException e) { - log.error(e.getMessage(), e); - } catch (InstantiationException e) { - log.error(e.getMessage(), e); - } catch (IllegalAccessException e) { - log.error(e.getMessage(), e); - } catch (InvocationTargetException e) { - log.error(e.getMessage(), e); - } - } catch (SecurityException e1) { - log.error(e1.getMessage(), e1); - } catch (NoSuchMethodException e1) { - log.error(e1.getMessage(), e1); - } - } - } - - private List getClasseNamesInPackage(String jarName, - String packageName) { - - ArrayList arrayList = new ArrayList(); - packageName = packageName.replaceAll("\\.", "" + File.separatorChar); - try { - JarInputStream jarFile = new JarInputStream(new FileInputStream(jarName)); - JarEntry jarEntry; - while (true) { - jarEntry = jarFile.getNextJarEntry(); - if (jarEntry == null) { - break; - } - if ((jarEntry.getName().startsWith(packageName)) - && (jarEntry.getName().endsWith(".class"))) { - arrayList.add(jarEntry.getName().replaceAll("" + File.separatorChar, - "\\.")); - } - } - jarFile.close(); - } catch (Exception e) { - e.printStackTrace(); - } - return arrayList; - } - - @Override - protected List prepareSchedule() { - - return checks; - } -} diff --git a/src/main/java/it/grid/storm/check/sanity/filesystem/NamespaceFSAssociationCheck.java b/src/main/java/it/grid/storm/check/sanity/filesystem/NamespaceFSAssociationCheck.java index bb2c86f98..5fbb46aae 100644 --- a/src/main/java/it/grid/storm/check/sanity/filesystem/NamespaceFSAssociationCheck.java +++ b/src/main/java/it/grid/storm/check/sanity/filesystem/NamespaceFSAssociationCheck.java @@ -15,13 +15,15 @@ import java.io.IOException; import java.util.Collection; import java.util.Map; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import it.grid.storm.check.GenericCheckException; + import it.grid.storm.check.Check; import it.grid.storm.check.CheckResponse; import it.grid.storm.check.CheckStatus; -import it.grid.storm.namespace.VirtualFSInterface; +import it.grid.storm.check.GenericCheckException; +import it.grid.storm.namespace.model.VirtualFS; import it.grid.storm.namespace.naming.NamespaceUtil; /** @@ -42,7 +44,7 @@ public class NamespaceFSAssociationCheck implements Check { private Map mountPoints; - private Collection vfsSet; + private Collection vfsSet; private NamespaceFSAssociationCheck() { @@ -54,7 +56,7 @@ private NamespaceFSAssociationCheck() { * @throws IllegalArgumentException */ public NamespaceFSAssociationCheck(Map mountPoints, - Collection vfsSet) throws IllegalArgumentException { + Collection vfsSet) throws IllegalArgumentException { this(); if (mountPoints == null || vfsSet == null) { @@ -83,9 +85,9 @@ public NamespaceFSAssociationCheck(Map mountPoints, * @param vfsSet * @return */ - private boolean verifyVfsSet(Collection vfsSet) { + private boolean verifyVfsSet(Collection vfsSet) { - for (VirtualFSInterface vfs : vfsSet) { + for (VirtualFS vfs : vfsSet) { if (vfs == null) { log.info("The vfsSet contains null entries"); return false; @@ -136,7 +138,7 @@ public CheckResponse execute() throws GenericCheckException { CheckStatus status = CheckStatus.SUCCESS; String errorMessage = ""; - for (VirtualFSInterface vfs : vfsSet) { + for (VirtualFS vfs : vfsSet) { // check if is simple posix FS boolean currentResponse = verifyPosixDeclaredFS(vfs.getFSType()); if (!currentResponse) { diff --git a/src/main/java/it/grid/storm/check/sanity/filesystem/NamespaceFSExtendedACLUsageCheck.java b/src/main/java/it/grid/storm/check/sanity/filesystem/NamespaceFSExtendedACLUsageCheck.java index de3d9ab45..335aa90ad 100644 --- a/src/main/java/it/grid/storm/check/sanity/filesystem/NamespaceFSExtendedACLUsageCheck.java +++ b/src/main/java/it/grid/storm/check/sanity/filesystem/NamespaceFSExtendedACLUsageCheck.java @@ -10,6 +10,13 @@ */ package it.grid.storm.check.sanity.filesystem; +import java.io.File; +import java.io.IOException; +import java.util.Calendar; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.check.Check; import it.grid.storm.check.CheckResponse; import it.grid.storm.check.CheckStatus; @@ -19,14 +26,9 @@ import it.grid.storm.griduser.CannotMapUserException; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.LocalUser; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.VirtualFSInterface; -import java.io.File; -import java.io.IOException; -import java.util.Calendar; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import it.grid.storm.namespace.model.VirtualFS; /** * @author Michele Dibenedetto @@ -64,7 +66,7 @@ public CheckResponse execute() throws GenericCheckException { } try { // load declared file systems from namespace.xml - for (VirtualFSInterface vfs : NamespaceDirector.getNamespace().getAllDefinedVFS()) { + for (VirtualFS vfs : Namespace.getInstance().getAllDefinedVFS()) { String fsRootPath = vfs.getRootPath().trim(); if (fsRootPath.charAt(fsRootPath.length() - 1) != File.separatorChar) { fsRootPath += File.separatorChar; diff --git a/src/main/java/it/grid/storm/check/sanity/filesystem/NamespaceFSExtendedAttributeDeclarationCheck.java b/src/main/java/it/grid/storm/check/sanity/filesystem/NamespaceFSExtendedAttributeDeclarationCheck.java deleted file mode 100644 index 46135dd20..000000000 --- a/src/main/java/it/grid/storm/check/sanity/filesystem/NamespaceFSExtendedAttributeDeclarationCheck.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS - * OF ANY KIND, either express or implied. See the License for the specific - * language governing permissions and limitations under the License. - */ - -package it.grid.storm.check.sanity.filesystem; - -import java.io.IOException; -import java.util.List; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import it.grid.storm.check.Check; -import it.grid.storm.check.CheckResponse; -import it.grid.storm.check.CheckStatus; -import it.grid.storm.check.GenericCheckException; -import it.grid.storm.filesystem.MtabRow; -import it.grid.storm.filesystem.MtabUtil; -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.VirtualFSInterface; - -/** - * @author Michele Dibenedetto - */ -public class NamespaceFSExtendedAttributeDeclarationCheck implements Check { - - private static final Logger log = - LoggerFactory.getLogger(NamespaceFSExtendedAttributeDeclarationCheck.class); - - private static final String POSIX_EXTENDED_ATTRIBUTES_OPTION_NAME = "user_xattr"; - - private static final String CHECK_NAME = "NamespaceFSEAValidation"; - private static final String CHECK_DESCRIPTION = - "This check verifies that all the file systems declared " - + "in namespace.xml are mounted at boot time with the mount \'extended attribute abilitation\' option "; - - private static final boolean CRITICAL_CHECK = false; - - @Override - public CheckResponse execute() throws GenericCheckException { - - CheckStatus status = CheckStatus.SUCCESS; - String errorMessage = ""; - // load mtab rows - List rows; - try { - rows = MtabUtil.getRows(); - } catch (IOException e) { - log.warn("Unable to get the rows from mtab. IOException : {}", e.getMessage()); - return new CheckResponse(CheckStatus.INDETERMINATE, - "Check not performed. Unable to get the rows from mtab. IOException : " + e.getMessage()); - } - log.debug("Retrieved Mtab : {}", rows.toString()); - // load declared file systems from namespace.xml - for (VirtualFSInterface vfs : NamespaceDirector.getNamespace().getAllDefinedVFS()) { - String fsTypeName = vfs.getFSType(); - String fsRootPath = vfs.getRootPath(); - if (fsTypeName == null || fsRootPath == null) { - log.warn( - "Skipping chek on VFS with alias '{}' has null type ->{}<- " + "or root path ->{}<-", - vfs.getAliasName(), vfs.getFSType(), vfs.getRootPath()); - continue; - } - log.debug("Checking fs at {} with type {}", fsRootPath, fsTypeName); - boolean found = false; - // for each root path get the matching line in mstab - for (MtabRow row : rows) { - if (fsRootPath.startsWith(row.getMountPoint())) { - log.debug("Found on mountPoint {}", row.getMountPoint()); - // this is the row to check - found = true; - SupportedFSType fsType; - try { - fsType = SupportedFSType.parseFS(fsTypeName); - } catch (IllegalArgumentException e) { - log.warn("Unable to get the SupportedFSType for file system '{}'. " - + "IllegalArgumentException: {}", fsTypeName, e.getMessage()); - throw new GenericCheckException( - "Unable to get the " + "SupportedFSType for file system \'" + fsTypeName - + "\' IllegalArgumentException: " + e.getMessage()); - } - - // given the file system specified in the row check if the - // appropriate flag enabling EA is set - CheckStatus retrievedStatus; - switch (fsType) { - case EXT3: - retrievedStatus = checkEXT3(row.getMountOptions()); - break; - case GPFS: - retrievedStatus = checkGPFS(row.getMountOptions()); - break; - default: { - log.error("Unable to switch on the provided SupportedFSType " + "(unknown): {}", - fsType); - throw new GenericCheckException( - "Unable to switch on the " + "provided SupportedFSType (unknown) : " + fsType); - } - } - if (!retrievedStatus.equals(CheckStatus.SUCCESS)) { - log.error("Check failed for file system at {} with type {}", fsRootPath, fsType); - errorMessage += - "Check failed for file system at " + fsRootPath + " with type " + fsType + "; "; - } - status = CheckStatus.and(status, retrievedStatus); - break; - } - } - if (!found) { - log.error("No file systems are mounted at path {}!", fsRootPath); - errorMessage += "No file systems are mounted at path " + fsRootPath + ";"; - status = CheckStatus.INDETERMINATE; - } - } - return new CheckResponse(status, errorMessage); - } - - /** - * Checks if the ext3 mount option POSIX_EXTENDED_ATTRIBUTES_OPTION_NAME is in the provided mount - * options list - * - * @param fsOptions a comma separated list of mount options - * @return a successful CheckStatus if the option is available - */ - private CheckStatus checkEXT3(List fsOptions) { - - log.debug("Checking ext3 file system estended attribute options " + "against '{}'", - fsOptions.toString()); - CheckStatus response = CheckStatus.FAILURE; - if (fsOptions.contains(POSIX_EXTENDED_ATTRIBUTES_OPTION_NAME)) { - log.debug("Options for ext3 correctly set"); - response = CheckStatus.SUCCESS; - } - return response; - } - - /** - * Checks if the gpfs mount option is in the provided mount options list - * - * @param fsOptions a comma separated list of mount options - * @return always a successful CheckStatus, gpfs has always EA enabled - */ - private CheckStatus checkGPFS(List fsOptions) { - - log.debug("Checking gpfs file system estended attribute options " + "against '{}'", - fsOptions.toString()); - /* - * According to Vladimir for GPFS the EA are enabled by default and their status doesn't have - * any info in mtab - */ - CheckStatus response = CheckStatus.SUCCESS; - return response; - } - - @Override - public String getName() { - - return CHECK_NAME; - } - - @Override - public String getDescription() { - - return CHECK_DESCRIPTION; - } - - @Override - public boolean isCritical() { - - return CRITICAL_CHECK; - } -} diff --git a/src/main/java/it/grid/storm/check/sanity/filesystem/NamespaceFSExtendedAttributeUsageCheck.java b/src/main/java/it/grid/storm/check/sanity/filesystem/NamespaceFSExtendedAttributeUsageCheck.java index a46239211..4f70e9654 100644 --- a/src/main/java/it/grid/storm/check/sanity/filesystem/NamespaceFSExtendedAttributeUsageCheck.java +++ b/src/main/java/it/grid/storm/check/sanity/filesystem/NamespaceFSExtendedAttributeUsageCheck.java @@ -25,8 +25,8 @@ import it.grid.storm.ea.ExtendedAttributes; import it.grid.storm.ea.ExtendedAttributesException; import it.grid.storm.ea.ExtendedAttributesFactory; -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.VirtualFSInterface; +import it.grid.storm.namespace.Namespace; +import it.grid.storm.namespace.model.VirtualFS; /** * @author Michele Dibenedetto @@ -69,7 +69,7 @@ public CheckResponse execute() throws GenericCheckException { CheckStatus status = CheckStatus.SUCCESS; String errorMessage = ""; // load declared file systems from namespace.xml - for (VirtualFSInterface vfs : NamespaceDirector.getNamespace().getAllDefinedVFS()) { + for (VirtualFS vfs : Namespace.getInstance().getAllDefinedVFS()) { String fsRootPath = vfs.getRootPath().trim(); if (fsRootPath.charAt(fsRootPath.length() - 1) != File.separatorChar) { fsRootPath += File.separatorChar; diff --git a/src/main/java/it/grid/storm/checksum/ChecksumManager.java b/src/main/java/it/grid/storm/checksum/ChecksumManager.java index 3c2a74832..395423be9 100644 --- a/src/main/java/it/grid/storm/checksum/ChecksumManager.java +++ b/src/main/java/it/grid/storm/checksum/ChecksumManager.java @@ -17,16 +17,17 @@ package it.grid.storm.checksum; -import it.grid.storm.config.DefaultValue; -import it.grid.storm.ea.ExtendedAttributesException; -import it.grid.storm.ea.StormEA; - import java.io.FileNotFoundException; import java.util.Map; +import javax.ws.rs.NotSupportedException; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import it.grid.storm.ea.ExtendedAttributesException; +import it.grid.storm.ea.StormEA; + public class ChecksumManager { private static final Logger log = LoggerFactory.getLogger(ChecksumManager.class); @@ -36,7 +37,7 @@ public class ChecksumManager { private ChecksumManager() { - defaultAlgorithm = DefaultValue.getChecksumAlgorithm(); + defaultAlgorithm = ChecksumAlgorithm.ADLER32; } public static synchronized ChecksumManager getInstance() { diff --git a/src/main/java/it/grid/storm/checksum/ChecksumRuntimeException.java b/src/main/java/it/grid/storm/checksum/ChecksumRuntimeException.java deleted file mode 100644 index 1eff3508b..000000000 --- a/src/main/java/it/grid/storm/checksum/ChecksumRuntimeException.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.checksum; - -public class ChecksumRuntimeException extends RuntimeException { - - private static final long serialVersionUID = -6992922355763921291L; - - public ChecksumRuntimeException() { - } - - public ChecksumRuntimeException(String message) { - - super(message); - } - - public ChecksumRuntimeException(Throwable cause) { - - super(cause); - } - - public ChecksumRuntimeException(String message, Throwable cause) { - - super(message, cause); - } - -} diff --git a/src/main/java/it/grid/storm/common/GUID.java b/src/main/java/it/grid/storm/common/GUID.java index 7971516f3..4bd58f7c1 100644 --- a/src/main/java/it/grid/storm/common/GUID.java +++ b/src/main/java/it/grid/storm/common/GUID.java @@ -36,38 +36,6 @@ public GUID() { buildNewGUID(); } - public GUID(String guidString) { - - int pos = 0; - int count = 0; - - while (pos < guidString.length()) { - guidValue[count] = getByteValue(guidString.substring(pos, pos + 2)); - pos += 2; - count++; - - if (pos == guidString.length()) { - continue; - } - - if (guidString.charAt(pos) == '-') { - pos++; - } - } - } - - /** - * Calculates the byte from a hex string. - * - * @param hex - * A string hex value. - * @return a byte value. - */ - private byte getByteValue(String hex) { - - return (byte) Integer.parseInt(hex, 16); - } - /** * Calculates the hex string from a byte. * diff --git a/src/main/java/it/grid/storm/common/HostLookup.java b/src/main/java/it/grid/storm/common/HostLookup.java deleted file mode 100644 index 661f3d5b9..000000000 --- a/src/main/java/it/grid/storm/common/HostLookup.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.common; - -import java.net.InetAddress; -import java.net.UnknownHostException; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class HostLookup { - - private static final Logger log = LoggerFactory.getLogger(HostLookup.class); - - public HostLookup() { - - } - - public String lookup(String hostname) throws UnknownHostException { - - InetAddress ia = InetAddress.getByName(hostname); - log.debug("Lookup for hostname: {} resulted in {}", - hostname, - ia.getHostAddress()); - return ia.getHostAddress(); - } - -} diff --git a/src/main/java/it/grid/storm/common/SRMConstants.java b/src/main/java/it/grid/storm/common/SRMConstants.java index 0ad26fd59..205045869 100644 --- a/src/main/java/it/grid/storm/common/SRMConstants.java +++ b/src/main/java/it/grid/storm/common/SRMConstants.java @@ -29,11 +29,6 @@ public class SRMConstants { - /** - * Default parameter for SrmRmdir function. - */ - public static final boolean recursiveFlag = false; - /** * Default Parameter for SrmLS function. */ diff --git a/src/main/java/it/grid/storm/common/types/InvalidEndPointAttributeException.java b/src/main/java/it/grid/storm/common/types/InvalidEndPointAttributeException.java index 617e190f4..1ccd00ca6 100644 --- a/src/main/java/it/grid/storm/common/types/InvalidEndPointAttributeException.java +++ b/src/main/java/it/grid/storm/common/types/InvalidEndPointAttributeException.java @@ -18,9 +18,8 @@ package it.grid.storm.common.types; /** - * This class represents an Exception throw nwhen attempting to create a - * StFNRoot with a null or empty String, or with a String that does not begin a - * /. + * This class represents an Exception throw nwhen attempting to create a StFNRoot with a null or + * empty String, or with a String that does not begin a /. * * @author EGRID ICTP Trieste * @version 1.0 @@ -28,26 +27,29 @@ */ public class InvalidEndPointAttributeException extends Exception { - private boolean nullName; // boolean true if the supplied String is null - private boolean emptyName; // boolean true if the supplied String is empty - private boolean wrong = false; // boolean true if the supplied String does not - // begin with a / + /** + * + */ + private static final long serialVersionUID = 1L; - /** - * Constructor requiring the String that caused the exception to be thrown. - */ - public InvalidEndPointAttributeException(String name) { + private boolean nullName; // boolean true if the supplied String is null + private boolean emptyName; // boolean true if the supplied String is empty + private boolean wrong = false; // boolean true if the supplied String does not begin with a / - this.nullName = (name == null); - this.emptyName = (name.equals("")); - if (!nullName) - wrong = (name.charAt(0) == '/'); - } + /** + * Constructor requiring the String that caused the exception to be thrown. + */ + public InvalidEndPointAttributeException(String name) { - public String toString() { + this.nullName = (name == null); + this.emptyName = (name.equals("")); + if (!nullName) + wrong = (name.charAt(0) == '/'); + } - return "nullName=" + nullName + "; emptyName=" + emptyName - + "; not-beginning-with-/=" + wrong; - } + public String toString() { + + return "nullName=" + nullName + "; emptyName=" + emptyName + "; not-beginning-with-/=" + wrong; + } } diff --git a/src/main/java/it/grid/storm/common/types/InvalidMachineAttributeException.java b/src/main/java/it/grid/storm/common/types/InvalidMachineAttributeException.java index ae4d7179f..5206e25d2 100644 --- a/src/main/java/it/grid/storm/common/types/InvalidMachineAttributeException.java +++ b/src/main/java/it/grid/storm/common/types/InvalidMachineAttributeException.java @@ -18,8 +18,8 @@ package it.grid.storm.common.types; /** - * This class represents an Exception thrown when the String supplied to the - * constructor of Machine is null or empty. + * This class represents an Exception thrown when the String supplied to the constructor of Machine + * is null or empty. * * @author Ezio Corso * @author EGRID - ICTP Trieste @@ -28,21 +28,25 @@ */ public class InvalidMachineAttributeException extends Exception { - private boolean nullName; // boolean representing a null name String - private boolean emptyName; // boolean true if the supplied String is empty + /** + * + */ + private static final long serialVersionUID = 1L; - /** - * Constructor that requires the String that caused the exception to be - * thrown. - */ - public InvalidMachineAttributeException(String name) { + private boolean nullName; // boolean representing a null name String + private boolean emptyName; // boolean true if the supplied String is empty - nullName = name == null; - emptyName = (name.equals("")); - } + /** + * Constructor that requires the String that caused the exception to be thrown. + */ + public InvalidMachineAttributeException(String name) { - public String toString() { + nullName = name == null; + emptyName = (name.equals("")); + } - return "nullName=" + nullName + "; emptyName=" + emptyName; - } + public String toString() { + + return "nullName=" + nullName + "; emptyName=" + emptyName; + } } diff --git a/src/main/java/it/grid/storm/common/types/InvalidPFNAttributeException.java b/src/main/java/it/grid/storm/common/types/InvalidPFNAttributeException.java index e7e445633..68bb6c6db 100644 --- a/src/main/java/it/grid/storm/common/types/InvalidPFNAttributeException.java +++ b/src/main/java/it/grid/storm/common/types/InvalidPFNAttributeException.java @@ -18,9 +18,8 @@ package it.grid.storm.common.types; /** - * This class represents an Exception throw nwhen attempting to create a - * PathName with a null or empty String, or with a String that does not begin a - * /. + * This class represents an Exception throw nwhen attempting to create a PathName with a null or + * empty String, or with a String that does not begin a /. * * @author Ezio Corso * @author EGRID - ICTP Trieste @@ -29,27 +28,30 @@ */ public class InvalidPFNAttributeException extends Exception { - private boolean nullName; // boolean true if the supplied String is null - private boolean emptyName; // boolean true if the supplied String is empty - private boolean wrong = false; // boolean true if the supplied String does not - // begin with a / + /** + * + */ + private static final long serialVersionUID = 1L; - /** - * Constructor requiring the String that caused the exception to be thrown. - */ - public InvalidPFNAttributeException(String name) { + private boolean nullName; // boolean true if the supplied String is null + private boolean emptyName; // boolean true if the supplied String is empty + private boolean wrong = false; // boolean true if the supplied String does not begin with a / - this.nullName = (name == null); - this.emptyName = (name.equals("")); - if (!nullName && !emptyName) - this.wrong = (name.charAt(0) != '/'); - } + /** + * Constructor requiring the String that caused the exception to be thrown. + */ + public InvalidPFNAttributeException(String name) { - public String toString() { + this.nullName = (name == null); + this.emptyName = (name.equals("")); + if (!nullName && !emptyName) + this.wrong = (name.charAt(0) != '/'); + } - return "Attempt to create PFN with invalid attributes: nullName=" - + nullName + "; emptyName=" + emptyName + "; not-beginning-with-/=" - + wrong; - } + public String toString() { + + return "Attempt to create PFN with invalid attributes: nullName=" + nullName + "; emptyName=" + + emptyName + "; not-beginning-with-/=" + wrong; + } } diff --git a/src/main/java/it/grid/storm/common/types/InvalidPFNRootAttributeException.java b/src/main/java/it/grid/storm/common/types/InvalidPFNRootAttributeException.java index a5d3dd7b0..cdb367823 100644 --- a/src/main/java/it/grid/storm/common/types/InvalidPFNRootAttributeException.java +++ b/src/main/java/it/grid/storm/common/types/InvalidPFNRootAttributeException.java @@ -18,33 +18,35 @@ package it.grid.storm.common.types; /** - * This class represents an Exception throw nwhen attempting to create a - * PFNRootRoot with a null or empty String, or with a String that does not begin - * a /. + * This class represents an Exception throw nwhen attempting to create a PFNRootRoot with a null or + * empty String, or with a String that does not begin a /. * */ public class InvalidPFNRootAttributeException extends Exception { - private boolean nullName; // boolean true if the supplied String is null - private boolean emptyName; // boolean true if the supplied String is empty - private boolean wrong = false; // boolean true if the supplied String does not - // begin with a / + /** + * + */ + private static final long serialVersionUID = 1L; - /** - * Constructor requiring the String that caused the exception to be thrown. - */ - public InvalidPFNRootAttributeException(String name) { + private boolean nullName; // boolean true if the supplied String is null + private boolean emptyName; // boolean true if the supplied String is empty + private boolean wrong = false; // boolean true if the supplied String does not begin with a / - this.nullName = (name == null); - this.emptyName = (name.equals("")); - if (!nullName) - wrong = (name.charAt(0) == '/'); - } + /** + * Constructor requiring the String that caused the exception to be thrown. + */ + public InvalidPFNRootAttributeException(String name) { - public String toString() { + this.nullName = (name == null); + this.emptyName = (name.equals("")); + if (!nullName) + wrong = (name.charAt(0) == '/'); + } - return "nullName=" + nullName + "; emptyName=" + emptyName - + "; not-beginning-with-/=" + wrong; - } + public String toString() { + + return "nullName=" + nullName + "; emptyName=" + emptyName + "; not-beginning-with-/=" + wrong; + } } diff --git a/src/main/java/it/grid/storm/common/types/InvalidPortAttributeException.java b/src/main/java/it/grid/storm/common/types/InvalidPortAttributeException.java index c2b742cab..de8ded989 100644 --- a/src/main/java/it/grid/storm/common/types/InvalidPortAttributeException.java +++ b/src/main/java/it/grid/storm/common/types/InvalidPortAttributeException.java @@ -18,8 +18,8 @@ package it.grid.storm.common.types; /** - * This class represents an exception thrown if a Port is attempted to be built - * with an int <0 or >65535. + * This class represents an exception thrown if a Port is attempted to be built with an int <0 or + * >65535. * * @author Ezio Corso * @author EGRID - ICTP Trieste @@ -28,18 +28,23 @@ */ public class InvalidPortAttributeException extends Exception { - private int port; + /** + * + */ + private static final long serialVersionUID = 1L; - /** - * Constructor requiring the port that caused the exception. - */ - public InvalidPortAttributeException(int port) { + private int port; - this.port = port; - } + /** + * Constructor requiring the port that caused the exception. + */ + public InvalidPortAttributeException(int port) { - public String toString() { + this.port = port; + } - return "Port exceeded limits; supplied port was: " + port; - } + public String toString() { + + return "Port exceeded limits; supplied port was: " + port; + } } diff --git a/src/main/java/it/grid/storm/common/types/InvalidSFNAttributesException.java b/src/main/java/it/grid/storm/common/types/InvalidSFNAttributesException.java index 897fd0263..5d9fc5ebb 100644 --- a/src/main/java/it/grid/storm/common/types/InvalidSFNAttributesException.java +++ b/src/main/java/it/grid/storm/common/types/InvalidSFNAttributesException.java @@ -18,8 +18,8 @@ package it.grid.storm.common.types; /** - * This class represents an exception thrown when the SFN constructor is invoked - * with null Machine, Port or PathName. + * This class represents an exception thrown when the SFN constructor is invoked with null Machine, + * Port or PathName. * * @author Ezio Corso * @author EGRID - ICTP Trieste @@ -28,79 +28,82 @@ */ public class InvalidSFNAttributesException extends Exception { - private boolean nullMachine; // boolean true if Machine is null - private boolean nullPort; // boolean true if Port is null - private boolean nullEndPoint; // boolean true if EndPoint is null - private boolean nullStFN; // boolean true if PathName is null - private boolean emptyMachine = false; // boolean indicating if Machine is - // empty - private boolean emptyPort = false; // boolean indicating if Port is empty - private boolean emptyEndPoint = false; // boolean indicating if EndPoint is - // empty - private boolean emptyStFN = false; // boolean indicating if StFN is empty + /** + * + */ + private static final long serialVersionUID = 1L; - private boolean queryForm = false; + private boolean nullMachine; // boolean true if Machine is null + private boolean nullPort; // boolean true if Port is null + private boolean nullEndPoint; // boolean true if EndPoint is null + private boolean nullStFN; // boolean true if PathName is null + private boolean emptyMachine = false; // boolean indicating if Machine is + // empty + private boolean emptyPort = false; // boolean indicating if Port is empty + private boolean emptyEndPoint = false; // boolean indicating if EndPoint is + // empty + private boolean emptyStFN = false; // boolean indicating if StFN is empty - /** - * Constructor that requires the Machine m, the Port p and the PathName pn - * that caused the Exception to be thrown. - */ - public InvalidSFNAttributesException(Machine m, Port p, StFN s) { + private boolean queryForm = false; - nullMachine = (m == null); - if (!nullMachine) - emptyMachine = m.isEmpty(); - nullPort = (p == null); - if (!nullPort) - emptyPort = p.isEmpty(); - nullStFN = (s == null); - if (!nullStFN) - emptyStFN = s.isEmpty(); - } + /** + * Constructor that requires the Machine m, the Port p and the PathName pn that caused the + * Exception to be thrown. + */ + public InvalidSFNAttributesException(Machine m, Port p, StFN s) { - public InvalidSFNAttributesException(Machine m, Port p, EndPoint e, StFN s) { + nullMachine = (m == null); + if (!nullMachine) + emptyMachine = m.isEmpty(); + nullPort = (p == null); + if (!nullPort) + emptyPort = p.isEmpty(); + nullStFN = (s == null); + if (!nullStFN) + emptyStFN = s.isEmpty(); + } - nullMachine = (m == null); - if (!nullMachine) - emptyMachine = m.isEmpty(); - nullPort = (p == null); - if (!nullPort) - emptyPort = p.isEmpty(); - nullEndPoint = (e == null); - if (!nullEndPoint) - emptyEndPoint = e.isEmpty(); - nullStFN = (s == null); - if (!nullStFN) - emptyStFN = s.isEmpty(); - queryForm = true; - } + public InvalidSFNAttributesException(Machine m, Port p, EndPoint e, StFN s) { - /** - * Constructor that makes an InvalidSFNAttributesException with Machine, Port - * and StFN, as though they had been supplied all null. - */ - public InvalidSFNAttributesException() { + nullMachine = (m == null); + if (!nullMachine) + emptyMachine = m.isEmpty(); + nullPort = (p == null); + if (!nullPort) + emptyPort = p.isEmpty(); + nullEndPoint = (e == null); + if (!nullEndPoint) + emptyEndPoint = e.isEmpty(); + nullStFN = (s == null); + if (!nullStFN) + emptyStFN = s.isEmpty(); + queryForm = true; + } - nullMachine = true; - nullPort = true; - nullEndPoint = true; - nullStFN = true; - } + /** + * Constructor that makes an InvalidSFNAttributesException with Machine, Port and StFN, as though + * they had been supplied all null. + */ + public InvalidSFNAttributesException() { - public String toString() { + nullMachine = true; + nullPort = true; + nullEndPoint = true; + nullStFN = true; + } - if (queryForm) { - return "Invalid SFN Attributes: nullMachine=" + nullMachine - + "; nullPort=" + nullPort + "; nullEndPoint=" + nullEndPoint - + "; nullStFN=" + nullStFN + "; emptyMachine=" + emptyMachine - + "; emptyPort=" + emptyPort + "; emptyEndPoint=" + emptyEndPoint - + "; emptyStFN=" + emptyStFN + "."; - } else { - return "Invalid SFN Attributes: nullMachine=" + nullMachine - + "; nullPort=" + nullPort + "; nullStFN=" + nullStFN - + "; emptyMachine=" + emptyMachine + "; emptyPort=" + emptyPort - + "; emptyStFN=" + emptyStFN + "."; - } - } + public String toString() { + + if (queryForm) { + return "Invalid SFN Attributes: nullMachine=" + nullMachine + "; nullPort=" + nullPort + + "; nullEndPoint=" + nullEndPoint + "; nullStFN=" + nullStFN + "; emptyMachine=" + + emptyMachine + "; emptyPort=" + emptyPort + "; emptyEndPoint=" + emptyEndPoint + + "; emptyStFN=" + emptyStFN + "."; + } else { + return "Invalid SFN Attributes: nullMachine=" + nullMachine + "; nullPort=" + nullPort + + "; nullStFN=" + nullStFN + "; emptyMachine=" + emptyMachine + "; emptyPort=" + emptyPort + + "; emptyStFN=" + emptyStFN + "."; + } + } } diff --git a/src/main/java/it/grid/storm/common/types/InvalidStFNAttributeException.java b/src/main/java/it/grid/storm/common/types/InvalidStFNAttributeException.java index 46b7c4b7a..a36fa2e4a 100644 --- a/src/main/java/it/grid/storm/common/types/InvalidStFNAttributeException.java +++ b/src/main/java/it/grid/storm/common/types/InvalidStFNAttributeException.java @@ -18,8 +18,8 @@ package it.grid.storm.common.types; /** - * This class represents an Exception thrown when attempting to create a StFN - * with a null or empty String, or with a String that does not begin a /. + * This class represents an Exception thrown when attempting to create a StFN with a null or empty + * String, or with a String that does not begin a /. * * @author EGRID ICTP - CNAF Bologna * @version 2.0 @@ -27,29 +27,32 @@ */ public class InvalidStFNAttributeException extends Exception { - private boolean nullName; // boolean true if the supplied String is null - private boolean emptyName; // boolean true if the supplied String is empty - private boolean noBeginningSlash = false; // boolean true if the supplied - // String does not begin with a / - private boolean hasDot = false; // boolean true is string contains a . - - /** - * Constructor requiring the String that caused the exception to be thrown. - */ - public InvalidStFNAttributeException(String name) { - - this.nullName = (name == null); - this.emptyName = (name.equals("")); - if (!nullName && !emptyName) { - noBeginningSlash = (name.charAt(0) != '/'); - hasDot = (name.indexOf("..") != -1); - } - } - - public String toString() { - - return "Invalid StFN Attributes: nullName=" + nullName + "; emptyName=" - + emptyName + "; doesn't beginning with slash=" + noBeginningSlash - + "; has dots=" + hasDot; - } + /** + * + */ + private static final long serialVersionUID = 1L; + + private boolean nullName; // boolean true if the supplied String is null + private boolean emptyName; // boolean true if the supplied String is empty + private boolean noBeginningSlash = false; // boolean true if the supplied String does not begin with a / + private boolean hasDot = false; // boolean true is string contains a . + + /** + * Constructor requiring the String that caused the exception to be thrown. + */ + public InvalidStFNAttributeException(String name) { + + this.nullName = (name == null); + this.emptyName = (name.equals("")); + if (!nullName && !emptyName) { + noBeginningSlash = (name.charAt(0) != '/'); + hasDot = (name.indexOf("..") != -1); + } + } + + public String toString() { + + return "Invalid StFN Attributes: nullName=" + nullName + "; emptyName=" + emptyName + + "; doesn't beginning with slash=" + noBeginningSlash + "; has dots=" + hasDot; + } } diff --git a/src/main/java/it/grid/storm/common/types/InvalidStFNRootAttributeException.java b/src/main/java/it/grid/storm/common/types/InvalidStFNRootAttributeException.java index f3467f609..0ba4347c0 100644 --- a/src/main/java/it/grid/storm/common/types/InvalidStFNRootAttributeException.java +++ b/src/main/java/it/grid/storm/common/types/InvalidStFNRootAttributeException.java @@ -18,33 +18,35 @@ package it.grid.storm.common.types; /** - * This class represents an Exception throw nwhen attempting to create a - * StFNRoot with a null or empty String, or with a String that does not begin a - * /. + * This class represents an Exception throw nwhen attempting to create a StFNRoot with a null or + * empty String, or with a String that does not begin a /. * */ public class InvalidStFNRootAttributeException extends Exception { - private boolean nullName; // boolean true if the supplied String is null - private boolean emptyName; // boolean true if the supplied String is empty - private boolean wrong = false; // boolean true if the supplied String does not - // begin with a / + /** + * + */ + private static final long serialVersionUID = 1L; - /** - * Constructor requiring the String that caused the exception to be thrown. - */ - public InvalidStFNRootAttributeException(String name) { + private boolean nullName; // boolean true if the supplied String is null + private boolean emptyName; // boolean true if the supplied String is empty + private boolean wrong = false; // boolean true if the supplied String does not begin with a / - this.nullName = (name == null); - this.emptyName = (name.equals("")); - if (!nullName) - wrong = (name.charAt(0) == '/'); - } + /** + * Constructor requiring the String that caused the exception to be thrown. + */ + public InvalidStFNRootAttributeException(String name) { - public String toString() { + this.nullName = (name == null); + this.emptyName = (name.equals("")); + if (!nullName) + wrong = (name.charAt(0) == '/'); + } - return "nullName=" + nullName + "; emptyName=" + emptyName - + "; not-beginning-with-/=" + wrong; - } + public String toString() { + + return "nullName=" + nullName + "; emptyName=" + emptyName + "; not-beginning-with-/=" + wrong; + } } diff --git a/src/main/java/it/grid/storm/common/types/InvalidTFNAttributesException.java b/src/main/java/it/grid/storm/common/types/InvalidTFNAttributesException.java index 31cf2c9cb..99eadfdf9 100644 --- a/src/main/java/it/grid/storm/common/types/InvalidTFNAttributesException.java +++ b/src/main/java/it/grid/storm/common/types/InvalidTFNAttributesException.java @@ -18,8 +18,8 @@ package it.grid.storm.common.types; /** - * This class represents an exception thrown when the TFN constructor is invoked - * with null Machine, Port or PathName, or if any is empty. + * This class represents an exception thrown when the TFN constructor is invoked with null Machine, + * Port or PathName, or if any is empty. * * @author EGRID - ICTP Trieste * @date March 26th, 2005 @@ -27,34 +27,39 @@ */ public class InvalidTFNAttributesException extends Exception { - private boolean nullMachine; // boolean true if Machine is null - private boolean nullPort; // boolean true if Port is null - private boolean nullPFN; // boolean true if PathName is null - private boolean emptyMachine = false; // boolean true if Machine is empty - private boolean emptyPort = false; // boolean true if Port is empty - private boolean emptyPFN = false; // boolean true if PFN is empty - - /** - * Constructor that requires the Machine m, the Port p and the PathName pn - * that caused the Exception to be thrown. - */ - public InvalidTFNAttributesException(Machine m, Port p, PFN pfn) { - - nullMachine = (m == null); - nullPort = (p == null); - nullPFN = (pfn == null); - if (!nullMachine) - emptyMachine = m.isEmpty(); - if (!nullPort) - emptyPort = p.isEmpty(); - if (!nullPFN) - emptyPFN = pfn.isEmpty(); - } - - public String toString() { - - return "nullMachine=" + nullMachine + "; emptyMachine=" + emptyMachine - + "; nullPort=" + nullPort + "; emptyPort=" + emptyPort + "; nullPFN=" - + nullPFN + "; emptyPFN=" + emptyPFN + "."; - } + /** + * + */ + private static final long serialVersionUID = 1L; + + private boolean nullMachine; // boolean true if Machine is null + private boolean nullPort; // boolean true if Port is null + private boolean nullPFN; // boolean true if PathName is null + private boolean emptyMachine = false; // boolean true if Machine is empty + private boolean emptyPort = false; // boolean true if Port is empty + private boolean emptyPFN = false; // boolean true if PFN is empty + + /** + * Constructor that requires the Machine m, the Port p and the PathName pn that caused the + * Exception to be thrown. + */ + public InvalidTFNAttributesException(Machine m, Port p, PFN pfn) { + + nullMachine = (m == null); + nullPort = (p == null); + nullPFN = (pfn == null); + if (!nullMachine) + emptyMachine = m.isEmpty(); + if (!nullPort) + emptyPort = p.isEmpty(); + if (!nullPFN) + emptyPFN = pfn.isEmpty(); + } + + public String toString() { + + return "nullMachine=" + nullMachine + "; emptyMachine=" + emptyMachine + "; nullPort=" + + nullPort + "; emptyPort=" + emptyPort + "; nullPFN=" + nullPFN + "; emptyPFN=" + emptyPFN + + "."; + } } diff --git a/src/main/java/it/grid/storm/common/types/PFNRoot.java b/src/main/java/it/grid/storm/common/types/PFNRoot.java deleted file mode 100644 index faa83959a..000000000 --- a/src/main/java/it/grid/storm/common/types/PFNRoot.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.common.types; - -/** - * This class represent a Physical File Name Root, the directory entry in FIle - * System assigned to a Virtual Organization. - * - */ - -public class PFNRoot { - - private String pfnroot; - - public PFNRoot(String pfnroot) throws InvalidPFNRootAttributeException { - - if ((pfnroot == null) || (pfnroot.equals("")) || (pfnroot.charAt(0) != '/')) - throw new InvalidPFNRootAttributeException(pfnroot); - this.pfnroot = pfnroot.replaceAll(" ", ""); - - } - - public String getValue() { - - return pfnroot; - } - - public String toString() { - - return pfnroot; - } - - public boolean equals(Object o) { - - if (o == this) - return true; - if (!(o instanceof PFNRoot)) - return false; - PFNRoot po = (PFNRoot) o; - return pfnroot.equals(po.pfnroot); - } - - @Override - public int hashCode() { - - int result = 17; - result = 31 * result + (pfnroot != null ? pfnroot.hashCode() : 0); - return result; - } - -} diff --git a/src/main/java/it/grid/storm/common/types/ParsingSFNAttributesException.java b/src/main/java/it/grid/storm/common/types/ParsingSFNAttributesException.java index 9e479c75a..1c1150ddf 100644 --- a/src/main/java/it/grid/storm/common/types/ParsingSFNAttributesException.java +++ b/src/main/java/it/grid/storm/common/types/ParsingSFNAttributesException.java @@ -18,33 +18,35 @@ package it.grid.storm.common.types; /** - * Class that represents an Exception thrown when making an SFN from a String - * representation. + * Class that represents an Exception thrown when making an SFN from a String representation. * * @author EGRID - ICTP Trieste * @version 1.0 * @date September, 2006 */ -public class ParsingSFNAttributesException extends - InvalidSFNAttributesException { +public class ParsingSFNAttributesException extends InvalidSFNAttributesException { - private String explanation = ""; - private String sfn = ""; + /** + * + */ + private static final long serialVersionUID = 1L; + private String explanation = ""; + private String sfn = ""; - /** - * Constructor that requires the String that caused the exception to be - * thrown, and an explanation String that describes the problem encountered. - */ - public ParsingSFNAttributesException(String sfn, String explanation) { + /** + * Constructor that requires the String that caused the exception to be thrown, and an explanation + * String that describes the problem encountered. + */ + public ParsingSFNAttributesException(String sfn, String explanation) { - if ((sfn != null) && (explanation != null)) { - this.sfn = sfn; - this.explanation = explanation; - } - } + if ((sfn != null) && (explanation != null)) { + this.sfn = sfn; + this.explanation = explanation; + } + } - public String toString() { + public String toString() { - return sfn + " is malformed: " + explanation; - } + return sfn + " is malformed: " + explanation; + } } diff --git a/src/main/java/it/grid/storm/common/types/SFN.java b/src/main/java/it/grid/storm/common/types/SFN.java index 08b78ec95..ccc08a441 100644 --- a/src/main/java/it/grid/storm/common/types/SFN.java +++ b/src/main/java/it/grid/storm/common/types/SFN.java @@ -21,9 +21,6 @@ import java.util.Collection; import java.util.Iterator; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** * This class represents a SFN, that is a Site File Name. It is used as part of * a SURL. @@ -34,8 +31,6 @@ */ public class SFN { - private static final Logger log = LoggerFactory.getLogger(SFN.class); - private Machine m = null; private Port p = null; private EndPoint ep = null; @@ -117,322 +112,6 @@ public static SFN makeInQueryForm(Machine m, EndPoint ep, StFN stfn) return new SFN(m, Port.makeEmpty(), ep, stfn, false); } - /** - * Static method that returns an SFN from a String representation. If the - * supplied String is null or malformed, an InvalidSFNAttributesException is - * thrown. - * - * @param surlString - * a surl string without the protocol schema part - * @return - * @throws ParsingSFNAttributesException - * @throws InvalidSFNAttributesException - */ - public static SFN makeFromString(String surlString) - throws ParsingSFNAttributesException, InvalidSFNAttributesException { - - if (surlString == null) { - throw new ParsingSFNAttributesException(surlString, - "Supplied SFN String was null!"); - } - int colon = surlString.indexOf(":"); // first occurence of : - int slash = surlString.indexOf("/"); // first occurence of / - /* First occurence of ?SFN= */ - int question = surlString.toUpperCase().indexOf("?SFN="); - // TODO MICHELE USER_SURL refactored - if (colon > 0) { - if (question < 0) { - /* - * Supplied string does not contain a colon, and does not contain - * question mark! Treat it as optional port specification, _in_ simple - * form! - */ - if ((slash == -1) || (slash == 0)) { - /* Slash not found or right at the beginning! */ - throw new ParsingSFNAttributesException( - surlString, - "String interpreted as omitting the optional port specification, and as referring to query form;" - + " but the first slash was either not found or right at the beginning!"); - } - return makeFromSimpleFormNoPort(surlString, slash); - } else { - /* - * Supplied string does not contain a colon! Treat it as optional port - * specification, _in_ query form! - */ - if ((slash == -1) || (slash == 0) || (slash > question)) { - /* - * Slash not found or right at the beginning! Or, slash follows - * question! - */ - throw new ParsingSFNAttributesException( - surlString, - "String interpreted as omitting the optional port specification," - + " and as referring to query form; but the first slash was either not found, " - + "or right at the beginning, or only followed the question mark!"); - } - return makeFromQueryFormNoPort(surlString, question, slash); - } - } else { - if (question < 0) { - /* - * Supplied string contains a colon! Treat it as if port _is_ specified, - * and _not_ in query form! - */ - - if ((colon == 0) || (colon > slash)) { - /* - * Solon or slash not found or right at the beginning! or, colon - * follows slash! - */ - throw new ParsingSFNAttributesException( - surlString, - "String interpreted as specifying port, and as not referring to query form; " - + "but either the colon is missing, or it follows the first slash!"); - } - return makeFromSimpleForm(surlString, colon, slash); - } else { - /* - * Supplied string contains a port and it also is in query form! - */ - if ((colon == 0) || (colon > slash) || (slash > question)) { - /* - * Colon or slash not found or right at the beginning! Or, colon - * follows slash! Or slash follows question! - */ - throw new ParsingSFNAttributesException( - surlString, - "String interpreted as having the optional port specification, " - + "and as referring to query form; but either colon is missing, " - + "colon follows first slash, or first slash follows question mark!"); - } - return makeFromQueryForm(surlString, colon, slash, question); - } - } - } - - /** - * Returns an SFN from the received string that is supposed to contain the - * port and to be in simple form - * - * @param surlString - * @param colon - * @param slash - * @param question - * @return - * @throws ParsingSFNAttributesException - * @throws InvalidSFNAttributesException - */ - private static SFN makeFromQueryForm(String surlString, int colon, int slash, - int question) throws ParsingSFNAttributesException, - InvalidSFNAttributesException { - - String machineString = surlString.substring(0, colon); - Machine machine = null; - try { - machine = Machine.make(machineString); - } catch (InvalidMachineAttributeException e) { - log.warn("SFN: Unable to build -machine- attribute from {}. {}", - machineString, e.getMessage()); - } - if ((colon + 1) == slash) { - // slash found right after colon! There is no port! - throw new ParsingSFNAttributesException( - surlString, - "String interpreted as specifying the optional port, and as referring to query form; but the port number is missing since the first slash was found right after the colon!"); - } - String portString = surlString.substring(colon + 1, slash); - Port port = null; - try { - port = Port.make(Integer.parseInt(portString)); - } catch (Throwable e) { - log.warn("SFN: Unable to build -port- attribute from {}. {}", portString, e.getMessage()); - } - // EndPoint - String endpointString = surlString.substring(slash, question); - EndPoint endpoint = null; - try { - endpoint = EndPoint.make(endpointString); - } catch (InvalidEndPointAttributeException e) { - log.warn("SFN: Unable to build -endpoint- attribute from {}. {}", - endpointString, e.getMessage()); - } - // StFN checks only for a starting / while the rest can be empty! So it is - // sufficient to choose whatever String starts at the /... even just the - // slash itself if that is what is left!!! Should the StFN definition be - // changed??? - if (question + 5 >= surlString.length()) { - throw new ParsingSFNAttributesException( - surlString, - "String interpreted as omitting the optional port specification, and as referring to query form; but theere is nothing left after the question mark!"); // nothing - // left - // after - // question!!! - } - String stfnString = surlString.substring(question + 5, surlString.length()); - StFN stfn = null; - try { - stfn = StFN.make(stfnString); - } catch (InvalidStFNAttributeException e) { - log.warn("SFN: Unable to build -stfn- attribute from {}. {}", - stfnString, - e.getMessage()); - } - return SFN.makeInQueryForm(machine, port, endpoint, stfn); - } - - /** - * - * Returns an SFN from the received string that is supposed to contain the - * port and to be in simple form - * - * @param surlString - * @param colon - * @param slash - * @return - * @throws ParsingSFNAttributesException - * @throws InvalidSFNAttributesException - */ - private static SFN makeFromSimpleForm(String surlString, int colon, int slash) - throws ParsingSFNAttributesException, InvalidSFNAttributesException { - - String machineString = surlString.substring(0, colon); - Machine machine = null; - try { - machine = Machine.make(machineString); - } catch (InvalidMachineAttributeException e) { - - log.warn("SFN: Unable to build -machine- attribute from {}. {}", - machineString, e.getMessage()); - } - if ((colon + 1) == slash) { - /* Slash found right after colon! There is no port! */ - throw new ParsingSFNAttributesException(surlString, - "String interpreted as specifying port, and as not referring to query form;" - + " but the actual port number is missing since the first slash is " - + "found right after the colon"); - } - String portString = surlString.substring(colon + 1, slash); - Port port = null; - try { - port = Port.make(Integer.parseInt(portString)); - } catch (Throwable e) { - log.warn("SFN: Unable to build -port- attribute from {}. {}", - portString, e.getMessage()); - } - // StFN checks only for a starting / while the rest can be empty! So it is - // sufficient to choose whatever String starts at the /... even just the - // slash itself if that is what is left!!! Should the StFN definition be - // changed??? - String stfnString = surlString.substring(slash, surlString.length()); - StFN stfn = null; - try { - stfn = StFN.make(stfnString); - } catch (InvalidStFNAttributeException e) { - log.warn("SFN: Unable to build -stfn- attribute from {}. {}", - stfnString, - e.getMessage()); - } - return SFN.makeInSimpleForm(machine, port, stfn); - } - - /** - * Returns an SFN from the received string that is supposed to not contain the - * port and to be in query form - * - * @param surlString - * @param slash - * @param question - * @return - * @throws ParsingSFNAttributesException - * @throws InvalidSFNAttributesException - */ - private static SFN makeFromQueryFormNoPort(String surlString, int question, - int slash) throws ParsingSFNAttributesException, - InvalidSFNAttributesException { - - String machine = surlString.substring(0, slash); - Machine machineType = null; - try { - machineType = Machine.make(machine); - } catch (InvalidMachineAttributeException e) { - - log.warn("SFN: Unable to build -machine- attribute from {}. {}", - machine, e.getMessage()); - } - // EndPoint - String endpoint = surlString.substring(slash, question); - EndPoint endpointType = null; - try { - endpointType = EndPoint.make(endpoint); - } catch (InvalidEndPointAttributeException e) { - - log.warn("SFN: Unable to build -endpoint- attribute from {}. {}", - endpoint, e.getMessage()); - } - // StFN checks only for a starting / while the rest can be empty! So it is - // sufficient to choose whatever String starts at the /... even just the - // slash itself if that is what is left!!! Should the StFN definition be - // changed??? - if (question + 5 >= surlString.length()) { - throw new ParsingSFNAttributesException( - surlString, - "String interpreted as omitting the optional port specification, and as referring to query form; but nothing left after the question mark!"); // nothing - // left - // after - // question!!! - } - String stfnString = surlString.substring(question + 5, surlString.length()); - StFN stfn = null; - try { - stfn = StFN.make(stfnString); - } catch (InvalidStFNAttributeException e) { - log.warn("SFN: Unable to build -stfn- attribute from {}. {}", - stfnString, - e.getMessage()); - } - return SFN.makeInQueryForm(machineType, endpointType, stfn); - } - - /** - * - * Returns an SFN from the received string that is supposed to not contain the - * port and to be in simple form - * - * @param surlString - * @param slash - * @return - * @throws ParsingSFNAttributesException - * @throws InvalidSFNAttributesException - */ - private static SFN makeFromSimpleFormNoPort(String surlString, int slash) - throws ParsingSFNAttributesException, InvalidSFNAttributesException { - - String machine = surlString.substring(0, slash); - Machine machineType = null; - try { - machineType = Machine.make(machine); - } catch (InvalidMachineAttributeException e) { - log.warn("SFN: Unable to build -machine- attribute from {}. {}", - machine, e.getMessage()); - } - // StFN checks only for a starting / while the rest can be empty! So it - // is sufficient to choose whatever String starts at the /... even just - // the slash itself if that is what is left!!! Should the StFN - // definition be changed??? - String stfnString = surlString.substring(slash, surlString.length()); - StFN stfn = null; - try { - stfn = StFN.make(stfnString); - } catch (InvalidStFNAttributeException e) { - log.warn("SFN: Unable to build -stfn- attribute from {}. {}", - stfnString, - e.getMessage()); - } - return SFN.makeInSimpleForm(machineType, stfn); - - } - /** * Method that returns a Collection of all parent SFNs. The following example * clarifies what is meant by parent SFNs. @@ -526,19 +205,6 @@ public Port port() { return p; } - /** - * Method that returns th EndPoint specified in This SFN. If This is an empty - * SFN, then an Empty EndPoint is returned; likewise if none was specified at - * creation time. - */ - public EndPoint endPoint() { - - if (empty) { - return EndPoint.makeEmpty(); - } - return ep; - } - /** * Method that returns the StFN specified in this SFN. If this is an empty * SFN, then an empty StFN is returned. diff --git a/src/main/java/it/grid/storm/common/types/StFN.java b/src/main/java/it/grid/storm/common/types/StFN.java index 08c8554ee..60ced7231 100644 --- a/src/main/java/it/grid/storm/common/types/StFN.java +++ b/src/main/java/it/grid/storm/common/types/StFN.java @@ -33,223 +33,216 @@ */ public class StFN { - private ArrayList name = new ArrayList(); - private boolean directory = false; - - private boolean empty = true; - public static final String PNAME_PATH = "path"; - - private static final String ROOT_STFN = "/"; - - private StFN(ArrayList name, boolean empty, boolean dir) { - - this.name.clear(); - this.name.addAll(name); - this.empty = empty; - this.directory = dir; - } - - /** - * Public static method that returns an empty StFN. - */ - public static StFN makeEmpty() { - - return new StFN(new ArrayList(), true, false); - } - - /** - * Public static method that requires a String representing the pathname of - * the SFN: it cannot be null or empty otherwise an - * InvalidStFNAttributeException is thrown. Likewise if it contains two - * consecutive dots (..). or does not begin with a slash (/). - */ - public static StFN make(String name) throws InvalidStFNAttributeException { - - if (invalid(name)) { - throw new InvalidStFNAttributeException(name); - } - return new StFN(normalize(name), false, checkDirectory(name)); - } - - /** - * Public static method that returns true if the supplied String ends with the - * Separator, thereby indicating a directory. - */ - private static boolean checkDirectory(String path) { - - if (path != null) { - return path.endsWith(NamingConst.SEPARATOR); - } else { - return false; - } - } - - /** - * Private method that returns true if the supplied string is null, or is - * empty, or contains two consecutive dots (..), or does not begin with a - * slash (/). - */ - static private boolean invalid(String name) { - - boolean wrong = (name == null) || (name.equals("")) - || (name.charAt(0) != '/'); - return wrong; - } - - /** - * Private method that accepts a valid String as defined by the private valid - * method, and returns an ordered ArrayList of all slash-separated elemets, - * trimmed of leading and trailing white spaces. Multiple consecutive slashes - * are treated as a single slash. Example1: /a/ b /c/d Result: a b c d - * Example2: /////a///b//////////// c/d///////// Result: a b c d Example3: / - * Result: empty ArrayList! - */ - static private ArrayList normalize(String s) { - - // split around slash! - String[] pieces = s.split("/"); - // remove all empty Strings which may have been produced because of - // consecutive slashes! - ArrayList auxList = new ArrayList(); - int pos = 0; - String aux = null; - for (String piece : pieces) { - aux = piece; // get the element - aux = aux.trim(); // remove all leading and trailing white spaces - if (!aux.equals("")) { - auxList.add(pos++, aux); - } - } - return auxList; - } - - /** - * Method that returns a Collection of all parent StFNs, stopping at root - * parent. The following example clarifies what is meant by parent StFNs, and - * by stopping at root parent. Original StFN: - * /EGRID/original/data/nyse/file.txt Parent StFNs: /EGRID/original/data/nyse - * /EGRID/original/data /EGRID/original /EGRID Second example: /file.txt - * Parent StFNs: Empty collection! Third example: /EGRID/ Parent StFNs: Empty - * collection! An empty collection is returned if any error occurs during - * creation of parent StFNs. Likewise if This is an EmptyStFN. - */ - public Collection getParents() { - - Collection aux = new ArrayList(); - if (empty) { - // empty StFN! - return aux; - } - // number of elements in this StFN - int size = name.size(); - if ((size == 0) || (size == 1)) { - // StFN directly on root, or with only _one_ element! - return aux; - } - for (int i = 1; i < size; i++) { - // recall sublist goes from 0 inclusive, to i _EXCLUDED_!!! - aux.add(new StFN(new ArrayList(name.subList(0, i)), false, true)); - } - return aux; - } - - /** - * Method that returns the parent StFN. The following example clarifies what - * is meant by parent StFN. Beware of the root of the StFN: the parent is - * calculated from the root! Original StFN: /EGRID/original/data/nyse/file.txt - * Parent StFN: /EGRID/original/data/nyse Second example, Original StFN: - * /file.txt Parent StFN: Empty StFN! Third example: /EGRID/ Parent StFN: - * Empty StFN! An empty StFN is returned if any error occurs during creation - * of parent. Likewise if This is an EmptyStFN. - */ - public StFN getParent() { - - if (empty) { - return makeEmpty(); // empty StFN! - } - int size = name.size(); // number of elements in this StFN - if ((size == 0) || (size == 1)) { - return makeEmpty(); // either directly on root, or only one element! - } - return new StFN(new ArrayList(name.subList(0, size - 1)), false, true); - } - - /** - * Method that returns true if this StFN is empty. - */ - public boolean isEmpty() { - - return empty; - } - - public String getValue() { - - return toString(); - } - - @Override - public String toString() { - - if (empty) { - return "Empty StFN"; - } - int size = name.size(); - if (size == 0) { - return ROOT_STFN; - } - StringBuilder sb = new StringBuilder(); - sb.append("/"); - for (Iterator i = name.iterator(); i.hasNext();) { - sb.append(i.next()); - if (i.hasNext()) { - sb.append("/"); - } - } - if (directory) { - sb.append(NamingConst.SEPARATOR); - } - return sb.toString(); - } - - @Override - public boolean equals(Object o) { - - if (o == this) { - return true; - } - if (!(o instanceof StFN)) { - return false; - } - StFN po = (StFN) o; - if (po.empty && empty) { - return true; - } - if ((!empty) && (!po.empty) && (name.size() == 0) && (po.name.size() == 0)) { - return true; - } - return (!empty) && (!po.empty) && (directory == po.directory) && name.equals(po.name); - } - - @Override - public int hashCode() { - - if (empty) { - return 0; - } - int hash = 17; - if (name.size() != 0) { - hash = 31 * hash + name.hashCode(); - } - hash = 31 * hash + (directory ? 1 : 0); - return hash; - } - - /** - * Encode StFN for FE communication. - */ - public void encode(Map param, String name) { - - param.put(name, toString()); - } + private ArrayList name = new ArrayList(); + private boolean directory = false; + + private boolean empty = true; + public static final String PNAME_PATH = "path"; + + private static final String ROOT_STFN = "/"; + + private StFN(ArrayList name, boolean empty, boolean dir) { + + this.name.clear(); + this.name.addAll(name); + this.empty = empty; + this.directory = dir; + } + + /** + * Public static method that returns an empty StFN. + */ + public static StFN makeEmpty() { + + return new StFN(new ArrayList(), true, false); + } + + /** + * Public static method that requires a String representing the pathname of the SFN: it cannot be + * null or empty otherwise an InvalidStFNAttributeException is thrown. Likewise if it contains two + * consecutive dots (..). or does not begin with a slash (/). + */ + public static StFN make(String name) throws InvalidStFNAttributeException { + + if (invalid(name)) { + throw new InvalidStFNAttributeException(name); + } + return new StFN(normalize(name), false, checkDirectory(name)); + } + + /** + * Public static method that returns true if the supplied String ends with the Separator, thereby + * indicating a directory. + */ + private static boolean checkDirectory(String path) { + + if (path != null) { + return path.endsWith(NamingConst.SEPARATOR); + } else { + return false; + } + } + + /** + * Private method that returns true if the supplied string is null, or is empty, or contains two + * consecutive dots (..), or does not begin with a slash (/). + */ + static private boolean invalid(String name) { + + boolean wrong = (name == null) || (name.equals("")) || (name.charAt(0) != '/'); + return wrong; + } + + /** + * Private method that accepts a valid String as defined by the private valid method, and returns + * an ordered ArrayList of all slash-separated elemets, trimmed of leading and trailing white + * spaces. Multiple consecutive slashes are treated as a single slash. Example1: /a/ b /c/d + * Result: a b c d Example2: /////a///b//////////// c/d///////// Result: a b c d Example3: / + * Result: empty ArrayList! + */ + static private ArrayList normalize(String s) { + + // split around slash! + String[] pieces = s.split("/"); + // remove all empty Strings which may have been produced because of + // consecutive slashes! + ArrayList auxList = new ArrayList(); + int pos = 0; + String aux = null; + for (String piece : pieces) { + aux = piece; // get the element + aux = aux.trim(); // remove all leading and trailing white spaces + if (!aux.equals("")) { + auxList.add(pos++, aux); + } + } + return auxList; + } + + /** + * Method that returns a Collection of all parent StFNs, stopping at root parent. The following + * example clarifies what is meant by parent StFNs, and by stopping at root parent. Original StFN: + * /EGRID/original/data/nyse/file.txt Parent StFNs: /EGRID/original/data/nyse /EGRID/original/data + * /EGRID/original /EGRID Second example: /file.txt Parent StFNs: Empty collection! Third example: + * /EGRID/ Parent StFNs: Empty collection! An empty collection is returned if any error occurs + * during creation of parent StFNs. Likewise if This is an EmptyStFN. + */ + public Collection getParents() { + + Collection aux = new ArrayList(); + if (empty) { + // empty StFN! + return aux; + } + // number of elements in this StFN + int size = name.size(); + if ((size == 0) || (size == 1)) { + // StFN directly on root, or with only _one_ element! + return aux; + } + for (int i = 1; i < size; i++) { + // recall sublist goes from 0 inclusive, to i _EXCLUDED_!!! + aux.add(new StFN(new ArrayList(name.subList(0, i)), false, true)); + } + return aux; + } + + /** + * Method that returns the parent StFN. The following example clarifies what is meant by parent + * StFN. Beware of the root of the StFN: the parent is calculated from the root! Original StFN: + * /EGRID/original/data/nyse/file.txt Parent StFN: /EGRID/original/data/nyse Second example, + * Original StFN: /file.txt Parent StFN: Empty StFN! Third example: /EGRID/ Parent StFN: Empty + * StFN! An empty StFN is returned if any error occurs during creation of parent. Likewise if This + * is an EmptyStFN. + */ + public StFN getParent() { + + if (empty) { + return makeEmpty(); // empty StFN! + } + int size = name.size(); // number of elements in this StFN + if ((size == 0) || (size == 1)) { + return makeEmpty(); // either directly on root, or only one element! + } + return new StFN(new ArrayList(name.subList(0, size - 1)), false, true); + } + + /** + * Method that returns true if this StFN is empty. + */ + public boolean isEmpty() { + + return empty; + } + + public String getValue() { + + return toString(); + } + + @Override + public String toString() { + + if (empty) { + return "Empty StFN"; + } + int size = name.size(); + if (size == 0) { + return ROOT_STFN; + } + StringBuilder sb = new StringBuilder(); + sb.append("/"); + for (Iterator i = name.iterator(); i.hasNext();) { + sb.append(i.next()); + if (i.hasNext()) { + sb.append("/"); + } + } + if (directory) { + sb.append(NamingConst.SEPARATOR); + } + return sb.toString(); + } + + @Override + public boolean equals(Object o) { + + if (o == this) { + return true; + } + if (!(o instanceof StFN)) { + return false; + } + StFN po = (StFN) o; + if (po.empty && empty) { + return true; + } + if ((!empty) && (!po.empty) && (name.size() == 0) && (po.name.size() == 0)) { + return true; + } + return (!empty) && (!po.empty) && (directory == po.directory) && name.equals(po.name); + } + + @Override + public int hashCode() { + + if (empty) { + return 0; + } + int hash = 17; + if (name.size() != 0) { + hash = 31 * hash + name.hashCode(); + } + hash = 31 * hash + (directory ? 1 : 0); + return hash; + } + + /** + * Encode StFN for FE communication. + */ + public void encode(Map param, String name) { + + param.put(name, toString()); + } } diff --git a/src/main/java/it/grid/storm/common/types/StFNRoot.java b/src/main/java/it/grid/storm/common/types/StFNRoot.java deleted file mode 100644 index 50056eddc..000000000 --- a/src/main/java/it/grid/storm/common/types/StFNRoot.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.common.types; - -/** - * This class represent a Storage File Name Root. A virtual directory path - * assigned to a single Virtual Oraganization, so each SURL of this VO must - * start with correct StFNRoot. - */ -public class StFNRoot { - - private String stfnroot; - - public StFNRoot(String stfnroot) throws InvalidStFNRootAttributeException { - - if ((stfnroot == null) || (stfnroot.equals("")) - || (stfnroot.charAt(0) != '/')) - throw new InvalidStFNRootAttributeException(stfnroot); - this.stfnroot = stfnroot.replaceAll(" ", ""); - } - - public String getValue() { - - return stfnroot; - } - - public String toString() { - - return stfnroot; - } - - public boolean equals(Object o) { - - if (o == this) - return true; - if (!(o instanceof StFNRoot)) - return false; - StFNRoot po = (StFNRoot) o; - return stfnroot.equals(po.stfnroot); - } - - @Override - public int hashCode() { - - int result = 17; - result = 31 * result + (stfnroot != null ? stfnroot.hashCode() : 0); - return result; - } - -} diff --git a/src/main/java/it/grid/storm/common/types/TURLPrefix.java b/src/main/java/it/grid/storm/common/types/TURLPrefix.java index 6da17c6ce..39429ef09 100644 --- a/src/main/java/it/grid/storm/common/types/TURLPrefix.java +++ b/src/main/java/it/grid/storm/common/types/TURLPrefix.java @@ -20,6 +20,7 @@ import java.util.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import it.grid.storm.namespace.model.Protocol; /** @@ -74,10 +75,6 @@ public int size() { return desiredProtocols.size(); } - public void print() { - - } - public String toString() { StringBuilder sb = new StringBuilder(); @@ -94,7 +91,7 @@ public String toString() { * @param memberName * @return */ - public static TURLPrefix decode(Map inputParam, String memberName) { + public static TURLPrefix decode(Map inputParam, String memberName) { TURLPrefix decodedTurlPrefix = null; if (inputParam.containsKey(memberName)) { diff --git a/src/main/java/it/grid/storm/common/types/VO.java b/src/main/java/it/grid/storm/common/types/VO.java index 1b34a87c0..122624bea 100644 --- a/src/main/java/it/grid/storm/common/types/VO.java +++ b/src/main/java/it/grid/storm/common/types/VO.java @@ -21,60 +21,65 @@ public class VO implements Serializable { - private String vo; + /** + * + */ + private static final long serialVersionUID = 1L; - public static final VO NO_VO = new VO("NO_VO"); + private String vo; - private VO(String vo) { + public static final VO NO_VO = new VO("NO_VO"); - this.vo = vo; - } + private VO(String vo) { - public static VO make(String newVo) { + this.vo = vo; + } - if (newVo.equals("NO_VO")) - return NO_VO; - else - return new VO(newVo); - } + public static VO make(String newVo) { - public static VO makeDefault() { + if (newVo.equals("NO_VO")) + return NO_VO; + else + return new VO(newVo); + } - return new VO("CNAF"); - } + public static VO makeDefault() { - public static VO makeNoVo() { + return new VO("CNAF"); + } - return NO_VO; - } + public static VO makeNoVo() { - public String getValue() { + return NO_VO; + } - return vo; - } + public String getValue() { - public String toString() { + return vo; + } - return vo; - } + public String toString() { - public boolean equals(Object o) { + return vo; + } - if (!(o instanceof VO)) - return false; - if (o == this) - return true; - VO tmp = (VO) o; + public boolean equals(Object o) { - return (vo.equals(tmp.getValue())); - } + if (!(o instanceof VO)) + return false; + if (o == this) + return true; + VO tmp = (VO) o; - @Override - public int hashCode() { + return (vo.equals(tmp.getValue())); + } - int result = 17; - result = 31 * result + (vo != null ? vo.hashCode() : 0); - return result; - } + @Override + public int hashCode() { + + int result = 17; + result = 31 * result + (vo != null ? vo.hashCode() : 0); + return result; + } } diff --git a/src/main/java/it/grid/storm/concurrency/TimingThreadPool.java b/src/main/java/it/grid/storm/concurrency/TimingThreadPool.java deleted file mode 100644 index f441e4246..000000000 --- a/src/main/java/it/grid/storm/concurrency/TimingThreadPool.java +++ /dev/null @@ -1,69 +0,0 @@ -package it.grid.storm.concurrency; - -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.CancellationException; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TimingThreadPool extends ThreadPoolExecutor { - - public TimingThreadPool(int corePoolSize, int maximumPoolSize, - long keepAliveTime, TimeUnit unit, BlockingQueue workQueue, - ThreadFactory threadFactory) { - - super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, - threadFactory); - } - - private final ThreadLocal startTime = new ThreadLocal<>(); - private static final Logger log = LoggerFactory - .getLogger(TimingThreadPool.class); - private final AtomicLong numTasks = new AtomicLong(); - private final AtomicLong totalTime = new AtomicLong(); - - @Override - protected void beforeExecute(Thread t, Runnable r) { - - super.beforeExecute(t, r); - log.debug("Thread {}: start {}", t, r); - startTime.set(System.nanoTime()); - } - - @Override - protected void afterExecute(Runnable r, Throwable t) { - - try { - long endTime = System.nanoTime(); - long taskTime = endTime - startTime.get(); - startTime.remove(); - numTasks.incrementAndGet(); - totalTime.addAndGet(taskTime); - if (t == null && r instanceof Future) { - try { - Object result = ((Future) r).get(); - log.debug("Thread ended with result: {}", result); - } catch (CancellationException ce) { - t = ce; - } catch (ExecutionException ee) { - t = ee.getCause(); - } catch (InterruptedException ie) { - Thread.currentThread().interrupt(); // ignore/reset - } - log.debug("Throwable {}. end {}, time={}ns", - t,r,taskTime); - } else { - log.debug("Throwable {}", t); - - } - } finally { - super.afterExecute(r, t); - } - } -} diff --git a/src/main/java/it/grid/storm/config/ConfigReader.java b/src/main/java/it/grid/storm/config/ConfigReader.java deleted file mode 100644 index 25ca3c757..000000000 --- a/src/main/java/it/grid/storm/config/ConfigReader.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.config; - -import static com.google.common.base.Preconditions.checkNotNull; - -import java.util.Iterator; - -import org.apache.commons.configuration.CompositeConfiguration; -import org.apache.commons.configuration.Configuration; -import org.apache.commons.configuration.ConfigurationException; -import org.apache.commons.configuration.PropertiesConfiguration; -import org.apache.commons.configuration.reloading.FileChangedReloadingStrategy; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class ConfigReader { - - private static final Logger log = LoggerFactory.getLogger(ConfigReader.class); - - private Configuration c; - - private String configurationPathname = ""; - - public ConfigReader(String configurationPathname, int refresh) throws ConfigurationException { - - checkNotNull(configurationPathname, "Null configuration pathname."); - int refreshRate = refresh < 0 ? 0 : refresh; - this.configurationPathname = configurationPathname; - log.info("Configuration file {}. Refresh rate: {} seconds", configurationPathname, refreshRate); - - FileChangedReloadingStrategy strategy = new FileChangedReloadingStrategy(); - strategy.setRefreshDelay(refreshRate); - PropertiesConfiguration properties = new PropertiesConfiguration(configurationPathname); - logPropertiesConfiguration(properties); - properties.setReloadingStrategy(strategy); - this.c = new CompositeConfiguration(); - ((CompositeConfiguration) this.c).addConfiguration(properties); - log.info("Configuration read successfully."); - } - - private void logPropertiesConfiguration(PropertiesConfiguration properties) { - - log.debug("Configuration properties: "); - String key; - for (Iterator i = properties.getKeys(); i.hasNext();) { - key = (String) i.next(); - log.debug("{} = {}", key, properties.getProperty(key)); - } - } - - /** - * Method that returns the Apache object holding all configuration parameters! - */ - public Configuration getConfiguration() { - - return c; - } - - /** - * Method that returns the directory containing the configuration files: it is extrapolated from - * the complete pathname of the configuration file. If the pathname was not setup, an empty String - * is returned. - */ - public String configurationDirectory() { - - if (configurationPathname.isEmpty()) - return ""; - int lastSlash = this.configurationPathname.lastIndexOf(java.io.File.separator); - if (lastSlash == -1) - return ""; // no slash! - return this.configurationPathname.substring(0, lastSlash + 1); - } - -} diff --git a/src/main/java/it/grid/storm/config/Configuration.java b/src/main/java/it/grid/storm/config/Configuration.java index 678f1cc98..bb6ae8ced 100644 --- a/src/main/java/it/grid/storm/config/Configuration.java +++ b/src/main/java/it/grid/storm/config/Configuration.java @@ -17,607 +17,418 @@ package it.grid.storm.config; -import static it.grid.storm.info.du.DiskUsageService.DEFAULT_INITIAL_DELAY; -import static it.grid.storm.info.du.DiskUsageService.DEFAULT_TASKS_INTERVAL; -import static it.grid.storm.info.du.DiskUsageService.DEFAULT_TASKS_PARALLEL; -import static java.lang.System.getProperty; +import static java.io.File.separatorChar; import java.io.File; -import java.lang.reflect.Field; -import java.lang.reflect.Method; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; +import java.io.IOException; import java.util.List; +import java.util.stream.Collectors; -import org.apache.commons.configuration.ConfigurationException; -import org.apache.commons.lang.ArrayUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import com.google.common.collect.Lists; +import com.fasterxml.jackson.databind.JsonMappingException; +import com.fasterxml.jackson.databind.MapperFeature; +import com.fasterxml.jackson.dataformat.javaprop.JavaPropsMapper; -import it.grid.storm.rest.RestServer; -import it.grid.storm.xmlrpc.XMLRPCHttpServer; - -/** - * Singleton holding all configuration values that any other object in the StoRM backend reads from - * configuration files, databases, etc. Implements a 'get' method for each value that - * should be looked up this way. In fact, this is a "read-only" class. If no value is specified in - * the configuration medium, a default one is used instead; some properties may hold several comma - * separated values without any white spaces in-between; the name of the property in the - * configuration medium, default values, as well as the option of holding multiple values, is - * specified in each method comment. - */ +import it.grid.storm.config.converter.StormPropertiesConversionException; +import it.grid.storm.config.converter.StormPropertiesConverter; +import it.grid.storm.config.model.v2.SrmEndpoint; +import it.grid.storm.config.model.v2.OverwriteMode; +import it.grid.storm.config.model.v2.QualityLevel; +import it.grid.storm.config.model.v2.StorageType; +import it.grid.storm.config.model.v2.StormProperties; +import it.grid.storm.namespace.model.Authority; public class Configuration { - public static final String DEFAULT_STORM_CONFIG_FILE = - "/etc/storm/backend-server/storm.properties"; - public static final int DEFAULT_STORM_CONFIG_REFRESH_RATE = 0; - - private final ConfigReader cr; - - private static Configuration instance; - - /* System properties */ - public static final String CONFIG_FILE_PATH = "storm.configuration.file"; - public static final String REFRESH_RATE = "storm.configuration.refresh"; - - /* Configuration file properties */ - private static final String MANAGED_SURLS_KEY = "storm.service.SURL.endpoint"; - private static final String MANAGED_SURL_DEFAULT_PORTS_KEY = "storm.service.SURL.default-ports"; - private static final String SERVICE_HOSTNAME_KEY = "storm.service.FE-public.hostname"; - private static final String SERVICE_PORT_KEY = "storm.service.port"; - private static final String LIST_OF_MACHINE_IPS_KEY = "storm.service.FE-list.IPs"; - private static final String DB_URL_HOSTNAME = "storm.service.request-db.host"; - private static final String DB_URL_PROPERTIES = "storm.service.request-db.properties"; - private static final String DB_USER_NAME_KEY = "storm.service.request-db.username"; - private static final String DB_PASSWORD_KEY = "storm.service.request-db.passwd"; - private static final String DB_RECONNECT_PERIOD_KEY = "asynch.db.ReconnectPeriod"; - private static final String DB_RECONNECT_DELAY_KEY = "asynch.db.DelayPeriod"; - private static final String CLEANING_INITIAL_DELAY_KEY = "gc.pinnedfiles.cleaning.delay"; - private static final String CLEANING_TIME_INTERVAL_KEY = "gc.pinnedfiles.cleaning.interval"; - private static final String FILE_DEFAULT_SIZE_KEY = "fileSize.default"; - private static final String FILE_LIFETIME_DEFAULT_KEY = "fileLifetime.default"; - private static final String PIN_LIFETIME_DEFAULT_KEY = "pinLifetime.default"; - private static final String PIN_LIFETIME_MAXIMUM_KEY = "pinLifetime.maximum"; - private static final String TRANSIT_INITIAL_DELAY_KEY = "transit.delay"; - private static final String TRANSIT_TIME_INTERVAL_KEY = "transit.interval"; - private static final String PICKING_INITIAL_DELAY_KEY = "asynch.PickingInitialDelay"; - private static final String PICKING_TIME_INTERVAL_KEY = "asynch.PickingTimeInterval"; - private static final String PICKING_MAX_BATCH_SIZE_KEY = "asynch.PickingMaxBatchSize"; - private static final String XMLRPC_MAX_THREAD_KEY = "synchcall.xmlrpc.maxthread"; - private static final String XMLRPC_MAX_QUEUE_SIZE_KEY = "synchcall.xmlrpc.max_queue_size"; - private static final String LIST_OF_DEFAULT_SPACE_TOKEN_KEY = "storm.service.defaultSpaceTokens"; - private static final String COMMAND_SERVER_BINDING_PORT_KEY = "storm.commandserver.port"; - private static final String BE_PERSISTENCE_POOL_DB_MAX_ACTIVE_KEY = - "persistence.internal-db.connection-pool.maxActive"; - private static final String BE_PERSISTENCE_POOL_DB_MAX_WAIT_KEY = - "persistence.internal-db.connection-pool.maxWait"; - private static final String XMLRPC_SERVER_PORT_KEY = "synchcall.xmlrpc.unsecureServerPort"; - private static final String LS_MAX_NUMBER_OF_ENTRY_KEY = "synchcall.directoryManager.maxLsEntry"; - private static final String LS_ALL_LEVEL_RECURSIVE_KEY = - "synchcall.directoryManager.default.AllLevelRecursive"; - private static final String LS_NUM_OF_LEVELS_KEY = "synchcall.directoryManager.default.Levels"; - private static final String LS_OFFSET_KEY = "synchcall.directoryManager.default.Offset"; - private static final String PTP_CORE_POOL_SIZE_KEY = - "scheduler.chunksched.ptp.workerCorePoolSize"; - private static final String PTP_MAX_POOL_SIZE_KEY = "scheduler.chunksched.ptp.workerMaxPoolSize"; - private static final String PTP_QUEUE_SIZE_KEY = "scheduler.chunksched.ptp.queueSize"; - private static final String PTG_CORE_POOL_SIZE_KEY = - "scheduler.chunksched.ptg.workerCorePoolSize"; - private static final String PTG_MAX_POOL_SIZE_KEY = "scheduler.chunksched.ptg.workerMaxPoolSize"; - private static final String PTG_QUEUE_SIZE_KEY = "scheduler.chunksched.ptg.queueSize"; - private static final String BOL_CORE_POOL_SIZE_KEY = - "scheduler.chunksched.bol.workerCorePoolSize"; - private static final String BOL_MAX_POOL_SIZE_KEY = "scheduler.chunksched.bol.workerMaxPoolSize"; - private static final String BOL_QUEUE_SIZE_KEY = "scheduler.chunksched.bol.queueSize"; - private static final String CORE_POOL_SIZE_KEY = "scheduler.crusher.workerCorePoolSize"; - private static final String MAX_POOL_SIZE_KEY = "scheduler.crusher.workerMaxPoolSize"; - private static final String QUEUE_SIZE_KEY = "scheduler.crusher.queueSize"; - private static final String NAMESPACE_CONFIG_FILENAME_KEY = "namespace.filename"; - private static final String NAMESPACE_SCHEMA_FILENAME_KEY = "namespace.schema.filename"; - private static final String NAMESPACE_CONFIG_REFRESH_RATE_IN_SECONDS_KEY = - "namespace.refreshrate"; - private static final String NAMESPACE_AUTOMATIC_RELOADING_KEY = - "namespace.automatic-config-reload"; - private static final String GRIDFTP_TIME_OUT_KEY = "asynch.srmcopy.gridftp.timeout"; - private static final String AUTOMATIC_DIRECTORY_CREATION_KEY = "directory.automatic-creation"; - private static final String DEFAULT_OVERWRITE_MODE_KEY = "default.overwrite"; - private static final String DEFAULT_FILE_STORAGE_TYPE_KEY = "default.storagetype"; - private static final String PURGE_BATCH_SIZE_KEY = "purge.size"; - private static final String EXPIRED_REQUEST_TIME_KEY = "expired.request.time"; - private static final String EXPIRED_INPROGRESS_PTP_TIME_KEY = "expired.inprogress.time"; - private static final String REQUEST_PURGER_DELAY_KEY = "purge.delay"; - private static final String REQUEST_PURGER_PERIOD_KEY = "purge.interval"; - private static final String EXPIRED_REQUEST_PURGING_KEY = "purging"; - private static final String EXTRA_SLASHES_FOR_FILE_TURL_KEY = "extraslashes.file"; - private static final String EXTRA_SLASHES_FOR_RFIO_TURL_KEY = "extraslashes.rfio"; - private static final String EXTRA_SLASHES_FOR_GSIFTP_TURL_KEY = "extraslashes.gsiftp"; - private static final String EXTRA_SLASHES_FOR_ROOT_TURL_KEY = "extraslashes.root"; - private static final String PING_VALUES_PROPERTIES_FILENAME_KEY = "ping-properties.filename"; - private static final String HEARTHBEAT_PERIOD_KEY = "health.electrocardiogram.period"; - private static final String PERFORMANCE_GLANCE_TIME_INTERVAL_KEY = - "health.performance.glance.timeInterval"; - private static final String PERFORMANCE_LOGBOOK_TIME_INTERVAL_KEY = - "health.performance.logbook.timeInterval"; - private static final String PERFORMANCE_MEASURING_KEY = "health.performance.mesauring.enabled"; - private static final String BOOK_KEEPING_ENABLED_KEY = "health.bookkeeping.enabled"; - private static final String ENABLE_WRITE_PERM_ON_DIRECTORY_KEY = "directory.writeperm"; - private static final String MAX_LOOP_KEY = "abort.maxloop"; - private static final String GRID_USER_MAPPER_CLASSNAME_KEY = "griduser.mapper.classname"; - private static final String AUTHZ_DB_PATH_KEY = "authzdb.path"; - private static final String REFRESH_RATE_AUTHZDB_FILES_IN_SECONDS_KEY = "authzdb.refreshrate"; - private static final String RECALL_TABLE_TESTING_MODE_KEY = "tape.recalltable.service.test-mode"; - private static final String REST_SERVICES_PORT_KEY = "storm.rest.services.port"; - private static final String REST_SERVICES_MAX_THREAD = "storm.rest.services.maxthread"; - private static final String REST_SERVICES_MAX_QUEUE_SIZE = "storm.rest.services.max_queue_size"; - private static final String RETRY_VALUE_KEY_KEY = "tape.recalltable.service.param.retry-value"; - private static final String STATUS_KEY_KEY = "tape.recalltable.service.param.status"; - private static final String TASKOVER_KEY_KEY = "tape.recalltable.service.param.takeover"; - private static final String STORM_PROPERTIES_VERSION_KEY = "storm.properties.version"; - private static final String TAPE_SUPPORT_ENABLED_KEY = "tape.support.enabled"; - private static final String SYNCHRONOUS_QUOTA_CHECK_ENABLED_KEY = "info.quota-check.enabled"; - private static final String GPFS_QUOTA_REFRESH_PERIOD_KEY = "info.quota.refresh.period"; - private static final String FAST_BOOTSTRAP_ENABLED_KEY = "bootstrap.fast.enabled"; - private static final String SERVER_POOL_STATUS_CHECK_TIMEOUT_KEY = - "server-pool.status-check.timeout"; - private static final String SANITY_CHECK_ENABLED_KEY = "sanity-check.enabled"; - private static final String XMLRPC_SECURITY_ENABLED_KEY = "synchcall.xmlrpc.security.enabled"; - private static final String XMLRPC_SECURITY_TOKEN_KEY = "synchcall.xmlrpc.security.token"; - private static final String PTG_SKIP_ACL_SETUP = "ptg.skip-acl-setup"; - private static final String HTTP_TURL_PREFIX = "http.turl_prefix"; - private static final String NETWORKADDRESS_CACHE_TTL = "networkaddress.cache.ttl"; - private static final String NETWORKADDRESS_CACHE_NEGATIVE_TTL = - "networkaddress.cache.negative.ttl"; - - public static final String DISKUSAGE_SERVICE_ENABLED = "storm.service.du.enabled"; - private static final String DISKUSAGE_SERVICE_INITIAL_DELAY = "storm.service.du.delaySecs"; - private static final String DISKUSAGE_SERVICE_TASKS_INTERVAL = "storm.service.du.periodSecs"; - private static final String DISKUSAGE_SERVICE_TASKS_PARALLEL = "storm.service.du.parallelTasks"; - - static { - try { - instance = new Configuration(); - } catch (ConfigurationException e) { - throw new ExceptionInInitializerError(e); - } + private static Configuration instance = null; + + private static final Logger log = LoggerFactory.getLogger(Configuration.class); + + private File configFile; + private JavaPropsMapper mapper; + private StormProperties properties; + + + public static void init(String filePath) throws IOException { + instance = new Configuration(filePath); } - private Configuration() throws ConfigurationException { + private Configuration(String filePath) throws IOException { + + configFile = new File(filePath); + mapper = new JavaPropsMapper(); + mapper.enable(MapperFeature.ACCEPT_CASE_INSENSITIVE_ENUMS); + loadConfiguration(); + } + + private void loadConfiguration() throws IOException { - String filePath = getProperty(CONFIG_FILE_PATH, DEFAULT_STORM_CONFIG_FILE); - int refreshRate; try { - refreshRate = Integer.valueOf(getProperty(REFRESH_RATE)); - } catch (NumberFormatException e) { - refreshRate = DEFAULT_STORM_CONFIG_REFRESH_RATE; + properties = mapper.readerFor(StormProperties.class).readValue(configFile); + } catch (JsonMappingException e) { + log.error("Malformed configuration file: {}", e.getMessage()); + properties = null; + } + if (properties == null) { + log.warn("It seems that '{}' is not compliant with this StoRM version.", configFile); + File configTarget = new File(configFile + ".new"); + log.info("Converting your configuration into {} ...", configTarget); + try { + StormPropertiesConverter.convert(configFile, configTarget); + } catch (IOException | StormPropertiesConversionException e) { + log.error(e.getMessage()); + throw new RuntimeException("Unable to load configuration!"); + } + log.warn("The automatic convertion has been done."); + log.warn("Pleas check the generated configuration and properly update your '{}'", configFile); + log.info("Loading configuration from {} ...", configTarget); + try { + properties = mapper.readerFor(StormProperties.class).readValue(configTarget); + } catch (JsonMappingException e) { + log.error("Malformed configuration file: {}", e.getMessage()); + throw new RuntimeException("Unable to load configuration!"); + } } - cr = new ConfigReader(filePath, refreshRate); + } + + public synchronized static Configuration getInstance() { + + return instance; + } + + public String getVersion() { + + return properties.getVersion(); + } + + public File getConfigurationDir() { + + return configFile.getParentFile(); } /** - * Returns the sole instance of the Configuration class. + * The published host of SRM service. It's used also to initialize a SURL starting from the SFN. */ - public static Configuration getInstance() { + public String getSrmServiceHostname() { + + return getManagedSrmEndpoints().get(0).getServiceHostname(); + } + + public List getSrmEndpoints() { + + return properties.getSrmEndpoints(); + } - return Configuration.instance; + public List getManagedSrmEndpoints() { + + return properties.getSrmEndpoints().stream() + .map(e -> new Authority(e.getHost(), e.getPort())) + .collect(Collectors.toList()); } /** - * Method that returns the directory holding the configuration file. The methods that make use of - * it are uncertain... must be found soon!!! Beware that the configuration directory is implicit - * in the complete pathname to the configuration file supplied in the command line when starting - * StoRM BE. + * The published port of SRM service. It's used also to initialize a SURL starting from the SFN. */ - public String configurationDir() { + public int getSrmServicePort() { - return cr.configurationDirectory(); + return getManagedSrmEndpoints().get(0).getServicePort(); } /** - * getNamespaceConfigPath - * - * @return String + * Get database host */ - public String namespaceConfigPath() { + public String getDbHostname() { - return String.format("%s%setc", getProperty("user.dir"), File.separator); + return properties.getDb().getHostname(); } /** - * MANDATORY CONFIGURATION PARAMETER! Define the SURL end-points. - * - * @return String[] + * Get database URL's sub-name */ - public String[] getManagedSURLs() { + public int getDbPort() { - String[] defaultValue = {"UNDEFINED_SERVICE_ENDPOINT"}; - if (!cr.getConfiguration().containsKey(MANAGED_SURLS_KEY)) { - return defaultValue; - } - return cr.getConfiguration().getStringArray(MANAGED_SURLS_KEY); + return properties.getDb().getPort(); } /** - * @return + * Get database username. */ - public Integer[] getManagedSurlDefaultPorts() { - - Integer[] portsArray; - if (!cr.getConfiguration().containsKey(MANAGED_SURL_DEFAULT_PORTS_KEY)) { - portsArray = new Integer[] {8444}; - } else { - // load from external source - String[] portString = cr.getConfiguration().getStringArray(MANAGED_SURL_DEFAULT_PORTS_KEY); - ArrayList ports = new ArrayList<>(); - for (String port : portString) { - ports.add(Integer.parseInt(port.trim())); - } - portsArray = ports.toArray(new Integer[0]); - } - return portsArray; + public String getDbUsername() { + + return properties.getDb().getUsername(); } /** - * @return String + * Get database password. */ - public String getServiceHostname() { + public String getDbPassword() { - return cr.getConfiguration().getString(SERVICE_HOSTNAME_KEY, "UNDEFINED_STORM_HOSTNAME"); + return properties.getDb().getPassword(); } /** - * Method used by SFN to establish the FE binding port. If no value is found in the configuration - * medium, then the default one is used instead. key="storm.service.port"; default value="8444" + * Get database connection properties */ - public int getServicePort() { + public String getDbProperties() { - return cr.getConfiguration().getInt(SERVICE_PORT_KEY, 8444); + return properties.getDb().getProperties(); } /** - * Method used to get a List of Strings of the IPs of the machine hosting the FE for _this_ StoRM - * instance! Used in the xmlrcp server configuration, to allow request coming from the specified - * IP. (Into the xmlrpc server the filter is done by IP, not hostname.) This paramter is mandatory - * when a distribuited FE-BE installation of StoRM is used togheter with a dynamic DNS on the FE - * hostname. In that case the properties storm.machinenames is not enough meaningfull. If no value - * is found in the configuration medium, then the default value is returned instead. - * key="storm.machineIPs"; default value={"127.0.0.1"}; + * Sets the maximum total number of idle and borrows connections that can be active at the same + * time. Use a negative value for no limit. */ - public List getListOfMachineIPs() { + public int getDbPoolSize() { - if (cr.getConfiguration().containsKey(LIST_OF_MACHINE_IPS_KEY)) { + return properties.getDb().getPool().getSize(); + } - String[] names = cr.getConfiguration().getString(LIST_OF_MACHINE_IPS_KEY).split(";"); // split - for (int i = 0; i < names.length; i++) { - names[i] = names[i].trim().toLowerCase(); // for each bit remove - } - return Arrays.asList(names); + /** + * Sets the minimum number of idle connections in the pool. + */ + public int getDbPoolMinIdle() { - } else { - return Arrays.asList("127.0.0.1"); - } + return properties.getDb().getPool().getMinIdle(); } /** - * Method used by all DAO Objects to get the DataBase Driver. If no value is found in the - * configuration medium, then the default value is returned instead. - * key="asynch.picker.db.driver"; default value="com.mysql.cj.jdbc.Driver"; + * Sets the MaxWaitMillis property. Use -1 to make the pool wait indefinitely. */ - public String getDBDriver() { + public int getDbPoolMaxWaitMillis() { - return "com.mysql.cj.jdbc.Driver"; + return properties.getDb().getPool().getMaxWaitMillis(); } /** - * Method used by all DAO Objects to get DB URL. If no value is found in the configuration medium, - * then the default value is returned instead. + * This property determines whether or not the pool will validate objects before they are borrowed + * from the pool. */ - public String getStormDbURL() { + public boolean isDbPoolTestOnBorrow() { - String host = getDBHostname(); - String properties = getDBProperties(); - if (properties.isEmpty()) { - return "jdbc:mysql://" + host + "/storm_db"; - } - return "jdbc:mysql://" + host + "/storm_db?" + properties; + return properties.getDb().getPool().isTestOnBorrow(); } /** - * Method used by all DAO Objects to get the DB username. If no value is found in the - * configuration medium, then the default value is returned instead. Default value = "storm"; key - * searched in medium = "asynch.picker.db.username". + * This property determines whether or not the idle object evictor will validate connections. */ - public String getDBUserName() { + public boolean isDbPoolTestWhileIdle() { - return cr.getConfiguration().getString(DB_USER_NAME_KEY, "storm"); + return properties.getDb().getPool().isTestWhileIdle(); } /** - * Method used by all DAO Objects to get the DB password. If no value is found in the - * configuration medium, then the default value is returned instead. Default value = "storm"; key - * searched in medium = "asynch.picker.db.passwd". + * Method used to retrieve the PORT where RESTful services listen (like the Recall Table service) */ - public String getDBPassword() { + public int getRestServicesPort() { - return cr.getConfiguration().getString(DB_PASSWORD_KEY, "storm"); + return properties.getRest().getPort(); } - public String getDBHostname() { + public int getRestServicesMaxThreads() { - return cr.getConfiguration().getString(DB_URL_HOSTNAME, "localhost"); + return properties.getRest().getMaxThreads(); } - /* - * END definition of MANDATORY PROPERTIES - */ + public int getRestServicesMaxQueueSize() { + + return properties.getRest().getMaxQueueSize(); + } - public String getDBProperties() { + public boolean isSanityCheckEnabled() { - return cr.getConfiguration().getString(DB_URL_PROPERTIES, "serverTimezone=UTC&autoReconnect=true"); + return properties.isSanityChecksEnabled(); } /** - * Method used by all DAOs to establish the reconnection period in _seconds_: after such period - * the DB connection will be closed and re-opened. Beware that after such time expires, the - * connection is _not_ automatically closed and reopened; rather, it acts as a flag that is - * considered by the main code and when the most appropriate time comes, the connection is closed - * and reopened. This is because of MySQL bug that does not allow a connection to remain open for - * an arbitrary amount of time! Else an Unexpected EOF Exception gets thrown by the JDBC driver! - * If no value is found in the configuration medium, then the default value is returned instead. - * key="asynch.db.ReconnectPeriod"; default value=18000; Keep in mind that 18000 seconds = 5 - * hours. + * Get max number of XMLRPC threads into for the XMLRPC server. */ - public long getDBReconnectPeriod() { + public int getXmlrpcMaxThreads() { - return cr.getConfiguration().getLong(DB_RECONNECT_PERIOD_KEY, 18000); + return properties.getXmlrpc().getMaxThreads(); } - /** - * Method used by all DAOs to establish the reconnection delay in _seconds_: when StoRM is first - * launched it will wait for this amount of time before starting the timer. This is because of - * MySQL bug that does not allow a connection to remain open for an arbitrary amount of time! Else - * an Unexpected EOF Exception gets thrown by the JDBC driver! If no value is found in the - * configuration medium, then the default value is returned instead. - * key="asynch.db.ReconnectDelay"; default value=30; - */ - public long getDBReconnectDelay() { + public int getXmlrpcMaxQueueSize() { - return cr.getConfiguration().getLong(DB_RECONNECT_DELAY_KEY, 30); + return properties.getXmlrpc().getMaxQueueSize(); } - /** - * Method used by PinnedFilesCatalog to get the initial delay in _seconds_ before starting the - * cleaning thread. If no value is found in the configuration medium, then the default value is - * returned instead. key="pinnedfiles.cleaning.delay"; default value=10; - */ - public long getCleaningInitialDelay() { + public int getXmlRpcServerPort() { - return cr.getConfiguration().getLong(CLEANING_INITIAL_DELAY_KEY, 10); + return properties.getXmlrpc().getPort(); } - /** - * Method used by PinnedFilesCatalog to get the cleaning time interval, in _seconds_. If no value - * is found in the configuration medium, then the default value is returned instead. - * key="pinnedfiles.cleaning.interval"; default value=300; Keep in mind that 300 seconds = 5 - * minutes. - */ - public long getCleaningTimeInterval() { + public Boolean isSecurityEnabled() { - return cr.getConfiguration().getLong(CLEANING_TIME_INTERVAL_KEY, 300); + return properties.getSecurity().isEnabled(); } - /** - * Get the default file size - * - * @return - */ - public long getFileDefaultSize() { + public String getSecurityToken() { - return cr.getConfiguration().getLong(FILE_DEFAULT_SIZE_KEY, 1000000); + return properties.getSecurity().getToken(); } - /** - * Method used by VolatileAndJiTCatalog to get the default fileLifetime to use when a volatile - * entry is being added/updated, but the user specified a non positive value. Measured in - * _seconds_. If no value is found in the configuration medium, then the default value is returned - * instead. key="fileLifetime.default"; default value=3600; - */ - public long getFileLifetimeDefault() { + public boolean isDiskUsageServiceEnabled() { - return cr.getConfiguration().getLong(FILE_LIFETIME_DEFAULT_KEY, 3600); + return properties.getDu().isEnabled(); } - /** - * Method used by VolatileAndJiTCatalog to get the minimum pinLifetime allowed, when a jit is - * being added/updated, but the user specified a lower one. This method is also used by the - * PinLifetimeConverter to translate a NULL/0/negative value to a default one. Measured in - * _seconds_. If no value is found in the configuration medium, then the default value is returned - * instead. key="pinLifetime.minimum"; default value=259200; - */ - public long getPinLifetimeDefault() { + public int getDiskUsageServiceInitialDelay() { - return cr.getConfiguration().getLong(PIN_LIFETIME_DEFAULT_KEY, 259200); + return properties.getDu().getInitialDelay(); } - /** - * Method used by VolatileAndJiTCatalog to get the maximum pinLifetime allowed, when a jit is - * being added/updated, but the user specified a higher one. Measured in _seconds_. If no value is - * found in the configuration medium, then the default value is returned instead. - * key="pinLifetime.maximum"; default value=1814400 (21 days); - */ - public long getPinLifetimeMaximum() { + public long getDiskUsageServiceTasksInterval() { - return cr.getConfiguration().getLong(PIN_LIFETIME_MAXIMUM_KEY, 1814400); + return properties.getDu().getTasksInterval(); } - /** - * Method used by PtPChunkCatalog to get the initial delay in _seconds_ before starting the - * transiting thread. If no value is found in the configuration medium, then the default value is - * returned instead. key="transit.delay"; default value=10; - */ - public long getTransitInitialDelay() { + public boolean isDiskUsageServiceTasksParallel() { - return cr.getConfiguration().getLong(TRANSIT_INITIAL_DELAY_KEY, 10); + return properties.getDu().isParallelTasksEnabled(); } - /** - * Method used by PtPChunkCatalog to get the transiting time interval, in _seconds_. If no value - * is found in the configuration medium, then the default value is returned instead. - * key="transit.interval"; default value=300; Keep in mind that 300 seconds = 5 minutes. - */ - public long getTransitTimeInterval() { + public String getNamespaceConfigFilename() { + + return "namespace.xml"; + } - return cr.getConfiguration().getLong(TRANSIT_TIME_INTERVAL_KEY, 300); + public String getNamespaceConfigFilePath() { + + String configurationDir = getConfigurationDir().getAbsolutePath(); + if (configurationDir.charAt(configurationDir.length() - 1) != separatorChar) { + configurationDir += Character.toString(separatorChar); + } + return configurationDir + getNamespaceConfigFilename(); } /** - * Method used by AdvancedPicker to get the initial delay before starting to pick data from the - * DB, in _seconds_. If no value is found in the configuration medium, then the default value is - * returned instead. key="asynch.PickingInitialDelay"; default value=1; + * Used by PinnedFilesCatalog to get the initial delay in _seconds_ before starting the cleaning + * thread. */ - public long getPickingInitialDelay() { + public long getExpiredSpacesAgentInitialDelay() { - return cr.getConfiguration().getLong(PICKING_INITIAL_DELAY_KEY, 1); + return properties.getExpiredSpacesAgent().getDelay(); } /** - * Method used by AdvancedPicker to get the time interval of successive pickings, in _seconds_. If - * no value is found in the configuration medium, then the default value is returned instead. - * key="asynch.PickingTimeInterval"; default value=15; + * Used by PinnedFilesCatalog to get the cleaning time interval, in _seconds_. */ - public long getPickingTimeInterval() { + public long getExpiredSpacesAgentInterval() { + + return properties.getExpiredSpacesAgent().getInterval(); + } + + public long getFileDefaultSize() { - return cr.getConfiguration().getLong(PICKING_TIME_INTERVAL_KEY, 2); + return properties.getFiles().getDefaultSize(); } /** - * Method used by RequestSummaryDAO to establish the maximum number of requests to retrieve with - * each polling. If no value is found in the configuration medium, then the default value is - * returned instead. key="asynch.PickingMaxBatchSize"; default value=100; + * Method used by VolatileAndJiTCatalog to get the default fileLifetime to use when a volatile + * entry is being added/updated, but the user specified a non positive value. Measured in + * _seconds_. */ - public int getPickingMaxBatchSize() { + public long getFileLifetimeDefault() { - return cr.getConfiguration().getInt(PICKING_MAX_BATCH_SIZE_KEY, 100); + return properties.getFiles().getDefaultLifetime(); } /** - * Get max number of XMLRPC threads into for the XMLRPC server. + * Method used by VolatileAndJiTCatalog to get the minimum pinLifetime allowed, when a jit is + * being added/updated, but the user specified a lower one. This method is also used by the + * PinLifetimeConverter to translate a NULL/0/negative value to a default one. Measured in + * _seconds_. */ - public int getXMLRPCMaxThread() { + public long getPinLifetimeDefault() { - return cr.getConfiguration() - .getInt(XMLRPC_MAX_THREAD_KEY, XMLRPCHttpServer.DEFAULT_MAX_THREAD_NUM); + return properties.getPinlifetime().getDefaultValue(); } - public int getXMLRPCMaxQueueSize() { + /** + * Method used by VolatileAndJiTCatalog to get the maximum pinLifetime allowed, when a jit is + * being added/updated, but the user specified a higher one. Measured in _seconds_. + */ + public long getPinLifetimeMaximum() { - return cr.getConfiguration() - .getInt(XMLRPC_MAX_QUEUE_SIZE_KEY, XMLRPCHttpServer.DEFAULT_MAX_QUEUE_SIZE); + return properties.getPinlifetime().getMaximum(); } /** - * Get Default Space Tokens - * - * @return + * Method used by PtPChunkCatalog to get the initial delay in _seconds_ before starting the + * transiting thread. */ - public List getListOfDefaultSpaceToken() { + public long getInProgressAgentInitialDelay() { - if (cr.getConfiguration().containsKey(LIST_OF_DEFAULT_SPACE_TOKEN_KEY)) { - - String[] namesArray = cr.getConfiguration().getStringArray(LIST_OF_DEFAULT_SPACE_TOKEN_KEY); - if (namesArray != null) { - return Arrays.asList(namesArray); - } - } - return Lists.newArrayList(); + return properties.getInprogressRequestsAgent().getDelay(); } /** - * Method used by StoRMCommandServer to establish the listening port to which it should bind. If - * no value is found in the configuration medium, then the default value is returned instead. - * key="storm.commandserver.port"; default value=4444; + * Method used by PtPChunkCatalog to get the transiting time interval, in _seconds_. */ - public int getCommandServerBindingPort() { + public long getInProgressAgentInterval() { - return cr.getConfiguration().getInt(COMMAND_SERVER_BINDING_PORT_KEY, 4444); + return properties.getInprogressRequestsAgent().getInterval(); } /** - * Method used in Persistence Component it returns an int indicating the maximum number of active - * connections in the connection pool. It is the maximum number of active connections that can be - * allocated from this pool at the same time... 0 (zero) for no limit. If no value is found in the - * configuration medium, then the default value is returned instead. - * key="persistence.db.pool.maxActive"; default value=10; + * Method used by AdvancedPicker to get the initial delay before starting to pick data from the + * DB, in _seconds_. */ - public int getBEPersistencePoolDBMaxActive() { + public long getRequestsPickerAgentInitialDelay() { - return cr.getConfiguration().getInt(BE_PERSISTENCE_POOL_DB_MAX_ACTIVE_KEY, 10); + return properties.getRequestsPickerAgent().getDelay(); } /** - * Method used in Persistence Component it returns an int indicating the maximum waiting time in - * _milliseconds_ for the connection in the pool. It represents the time that the pool will wait - * (when there are no available connections) for a connection to be returned before throwing an - * exception... a value of -1 to wait indefinitely. If no value is found in the configuration - * medium, then the default value is returned instead. key="persistence.db.pool.maxWait"; default - * value=50; + * Method used by AdvancedPicker to get the time interval of successive pickings, in _seconds_. */ - public int getBEPersistencePoolDBMaxWait() { + public long getRequestsPickerAgentInterval() { - return cr.getConfiguration().getInt(BE_PERSISTENCE_POOL_DB_MAX_WAIT_KEY, 50); + return properties.getRequestsPickerAgent().getInterval(); } /** - * Method used by the Synch Component to set the binding port for the _unsecure_ xmlrpc server in - * the BE. If no value is found in the configuration medium, then the default value is returned - * instead. key="synchcall.xmlrpc.unsecureServerPort"; default value=8080; + * Method used by RequestSummaryDAO to establish the maximum number of requests to retrieve with + * each polling. */ - public int getXmlRpcServerPort() { + public int getRequestsPickerAgentMaxFetchedSize() { - return cr.getConfiguration().getInt(XMLRPC_SERVER_PORT_KEY, 8080); + return properties.getRequestsPickerAgent().getMaxFetchedSize(); } /** * Method used by the Synch Component to set the maximum number of entries to return for the srmLs - * functionality. If no value is found in the configuration medium, then the default value is - * returned instead. key="synchcall.directoryManager.maxLsEntry"; default value=500; - * - * @return int + * functionality. */ - public int getLSMaxNumberOfEntry() { + public int getLsMaxNumberOfEntry() { - return cr.getConfiguration().getInt(LS_MAX_NUMBER_OF_ENTRY_KEY, 500); + return properties.getSynchLs().getMaxEntries(); } /** * Default value for the parameter "allLevelRecursive" of the LS request. - * - * @return boolean */ - public boolean getLSallLevelRecursive() { + public boolean isLsDefaultAllLevelRecursive() { - return cr.getConfiguration().getBoolean(LS_ALL_LEVEL_RECURSIVE_KEY, false); + return properties.getSynchLs().isDefaultAllLevelRecursive(); } /** * Default value for the parameter "numOfLevels" of the LS request. - * - * @return int */ - public int getLSnumOfLevels() { + public short getLsDefaultNumOfLevels() { - return cr.getConfiguration().getInt(LS_NUM_OF_LEVELS_KEY, 1); + return properties.getSynchLs().getDefaultNumLevels(); } /** * Default value for the parameter "offset" of the LS request. - * - * @return int */ - public int getLSoffset() { + public short getLsDefaultOffset() { - return cr.getConfiguration().getInt(LS_OFFSET_KEY, 0); + return properties.getSynchLs().getDefaultOffset(); } /** @@ -629,13 +440,11 @@ public int getLSoffset() { * threads are idle. If there are more than corePoolSize but less than maximumPoolSize threads * running, a new thread will be created only if the queue is full. By setting corePoolSize and * maximumPoolSize the same, you create a fixed-size thread pool. corePoolSize - the number of - * threads to keep in the pool, even if they are idle. If no value is found in the configuration - * medium, then the default value is returned instead. - * key="scheduler.chunksched.ptp.workerCorePoolSize"; default value=50; + * threads to keep in the pool, even if they are idle. */ public int getPtPCorePoolSize() { - return cr.getConfiguration().getInt(PTP_CORE_POOL_SIZE_KEY, 50); + return properties.getPtpScheduler().getCorePoolSize(); } /** @@ -647,13 +456,11 @@ public int getPtPCorePoolSize() { * threads are idle. If there are more than corePoolSize but less than maximumPoolSize threads * running, a new thread will be created only if the queue is full. By setting corePoolSize and * maximumPoolSize the same, you create a fixed-size thread pool. maxPoolSize - the maximum number - * of threads to allow in the pool. If no value is found in the configuration medium, then the - * default value is returned instead. key="scheduler.chunksched.ptp.workerMaxPoolSize"; default - * value=100; + * of threads to allow in the pool. */ public int getPtPMaxPoolSize() { - return cr.getConfiguration().getInt(PTP_MAX_POOL_SIZE_KEY, 200); + return properties.getPtpScheduler().getMaxPoolSize(); } /** @@ -667,11 +474,10 @@ public int getPtPMaxPoolSize() { * would exceed maxPoolSize, in which case, the task will be rejected. QueueSize - The initial * capacity for this priority queue used for holding tasks before they are executed. The queue * will hold only the Runnable tasks submitted by the execute method. - * key="scheduler.chunksched.ptp.queueSize"; default value=100; */ public int getPtPQueueSize() { - return cr.getConfiguration().getInt(PTP_QUEUE_SIZE_KEY, 1000); + return properties.getPtpScheduler().getQueueSize(); } /** @@ -685,11 +491,10 @@ public int getPtPQueueSize() { * running, a new thread will be created only if the queue is full. By setting corePoolSize and * maximumPoolSize the same, you create a fixed-size thread pool. corePoolSize - the number of * threads to keep in the pool, even if they are idle. - * key="scheduler.chunksched.ptg.workerCorePoolSize"; default value=50; */ public int getPtGCorePoolSize() { - return cr.getConfiguration().getInt(PTG_CORE_POOL_SIZE_KEY, 50); + return properties.getPtgScheduler().getCorePoolSize(); } /** @@ -702,12 +507,11 @@ public int getPtGCorePoolSize() { * threads are idle. If there are more than corePoolSize but less than maximumPoolSize threads * running, a new thread will be created only if the queue is full. By setting corePoolSize and * maximumPoolSize the same, you create a fixed-size thread pool. maxPoolSize - the maximum number - * of threads to allow in the pool. key="scheduler.chunksched.ptg.workerMaxPoolSize"; default - * value=200; + * of threads to allow in the pool. */ public int getPtGMaxPoolSize() { - return cr.getConfiguration().getInt(PTG_MAX_POOL_SIZE_KEY, 200); + return properties.getPtgScheduler().getMaxPoolSize(); } /** @@ -721,11 +525,10 @@ public int getPtGMaxPoolSize() { * would exceed maxPoolSize, in which case, the task will be rejected. QueueSize - The initial * capacity for this priority queue used for holding tasks before they are executed. The queue * will hold only the Runnable tasks submitted by the execute method. - * key="scheduler.chunksched.ptg.queueSize"; default value=2000; */ public int getPtGQueueSize() { - return cr.getConfiguration().getInt(PTG_QUEUE_SIZE_KEY, 2000); + return properties.getPtgScheduler().getQueueSize(); } /** @@ -738,11 +541,11 @@ public int getPtGQueueSize() { * more than corePoolSize but less than maximumPoolSize threads running, a new thread will be * created only if the queue is full. By setting corePoolSize and maximumPoolSize the same, you * create a fixed-size thread pool. corePoolSize - the number of threads to keep in the pool, even - * if they are idle. key="scheduler.chunksched.bol.workerCorePoolSize"; default value=50; + * if they are idle. */ public int getBoLCorePoolSize() { - return cr.getConfiguration().getInt(BOL_CORE_POOL_SIZE_KEY, 50); + return properties.getBolScheduler().getCorePoolSize(); } /** @@ -755,11 +558,11 @@ public int getBoLCorePoolSize() { * more than corePoolSize but less than maximumPoolSize threads running, a new thread will be * created only if the queue is full. By setting corePoolSize and maximumPoolSize the same, you * create a fixed-size thread pool. maxPoolSize - the maximum number of threads to allow in the - * pool. key="scheduler.chunksched.bol.workerMaxPoolSize"; default value=200; + * pool. */ public int getBoLMaxPoolSize() { - return cr.getConfiguration().getInt(BOL_MAX_POOL_SIZE_KEY, 200); + return properties.getBolScheduler().getMaxPoolSize(); } /** @@ -772,12 +575,11 @@ public int getBoLMaxPoolSize() { * thread. - If a request cannot be queued, a new thread is created unless this would exceed * maxPoolSize, in which case, the task will be rejected. QueueSize - The initial capacity for * this priority queue used for holding tasks before they are executed. The queue will hold only - * the Runnable tasks submitted by the execute method. key="scheduler.chunksched.bol.queueSize"; - * default value=2000; + * the Runnable tasks submitted by the execute method. */ public int getBoLQueueSize() { - return cr.getConfiguration().getInt(BOL_QUEUE_SIZE_KEY, 2000); + return properties.getBolScheduler().getQueueSize(); } /** @@ -790,16 +592,16 @@ public int getBoLQueueSize() { * corePoolSize but less than maximumPoolSize threads running, a new thread will be created only * if the queue is full. By setting corePoolSize and maximumPoolSize the same, you create a * fixed-size thread pool. corePoolSize - the number of threads to keep in the pool, even if they - * are idle. key="scheduler.crusher.workerCorePoolSize"; default value=10; + * are idle. */ public int getCorePoolSize() { - return cr.getConfiguration().getInt(CORE_POOL_SIZE_KEY, 10); + return properties.getRequestsScheduler().getCorePoolSize(); } /** * Method used by the Scheduler Component to get the QuotaJobResultsHandler Max Pool Size for the - * Crisher. If no value is found in the configuration medium, then the default value is returned + * Crusher. If no value is found in the configuration medium, then the default value is returned * instead. Scheduler component uses a thread pool. Scheduler pool will automatically adjust the * pool size according to the bounds set by corePoolSize and maximumPoolSize. When a new task is * submitted in method execute, and fewer than corePoolSize threads are running, a new thread is @@ -807,11 +609,10 @@ public int getCorePoolSize() { * corePoolSize but less than maximumPoolSize threads running, a new thread will be created only * if the queue is full. By setting corePoolSize and maximumPoolSize the same, you create a * fixed-size thread pool. maxPoolSize - the maximum number of threads to allow in the pool. - * key="scheduler.crusher.workerMaxPoolSize"; default value=50; */ public int getMaxPoolSize() { - return cr.getConfiguration().getInt(MAX_POOL_SIZE_KEY, 50); + return properties.getRequestsScheduler().getMaxPoolSize(); } /** @@ -824,132 +625,82 @@ public int getMaxPoolSize() { * request cannot be queued, a new thread is created unless this would exceed maxPoolSize, in * which case, the task will be rejected. QueueSize - The initial capacity for this priority queue * used for holding tasks before they are executed. The queue will hold only the Runnable tasks - * submitted by the execute method. key="scheduler.crusher.queueSize"; default value=2000; + * submitted by the execute method. */ public int getQueueSize() { - return cr.getConfiguration().getInt(QUEUE_SIZE_KEY, 2000); + return properties.getRequestsScheduler().getQueueSize(); } /** - * getNamespaceConfigFilename - * - * @return String - */ - public String getNamespaceConfigFilename() { - - return cr.getConfiguration().getString(NAMESPACE_CONFIG_FILENAME_KEY, "namespace.xml"); - } - - /** - * Retrieve the namespace schema file name from the first line (attribute) of namespace.xml. - * - * @return String + * Method used by PtPChunk to find out if missing local directories should be created + * automatically or not. SRM 2.2 specification forbids automatic creation. */ - public String getNamespaceSchemaFilename() { + public boolean isAutomaticDirectoryCreationEnabled() { - return cr.getConfiguration().getString(NAMESPACE_SCHEMA_FILENAME_KEY, "Schema UNKNOWN!"); - } - - public int getNamespaceConfigRefreshRateInSeconds() { - - return cr.getConfiguration().getInt(NAMESPACE_CONFIG_REFRESH_RATE_IN_SECONDS_KEY, 3); + return properties.getDirectories().isEnableAutomaticCreation(); } /** - * getNamespaceAutomaticReloading + * Enable write permission on new created directory for LocalAuthorizationSource usage. * - * @return boolean Method used by Namespace Configuration Reloading Strategy (Peeper). If "peeper" - * found namespace.xml config file changed it checks if it can perform an automatic - * reload. If no value is found in the configuration medium, then the default one is used - * instead. key="namespace.automatic-config-reload"; default value=false - */ - public boolean getNamespaceAutomaticReloading() { - - return cr.getConfiguration().getBoolean(NAMESPACE_AUTOMATIC_RELOADING_KEY, false); - } - - /** - * Method used by NaiveGridFTP internal client in srmCopy to establish the time out in - * milliseconds for a reply from the server. If no value is found in the configuration medium, - * then the default one is used instead. key="NaiveGridFTP.TimeOut"; default value="15000" - */ - public int getGridFTPTimeOut() { - - return cr.getConfiguration().getInt(GRIDFTP_TIME_OUT_KEY, 15000); - } - - /** - * Method used by PtPChunk to find out if missing local directories should be created - * automatically or not. SRM 2.2 specification forbids automatic creation. If no value is found in - * the configuration medium, then the default one is used instead. - * key="automatic.directory.creation"; default value=false + * @return false by default, otherwise what is specified in the properties */ - public boolean getAutomaticDirectoryCreation() { + public boolean isDirectoryWritePermOnCreationEnabled() { - return cr.getConfiguration().getBoolean(AUTOMATIC_DIRECTORY_CREATION_KEY, false); + return properties.getDirectories().isEnableWritepermOnCreation(); } /** - * Method used by TOverwriteModeConverter to establish the default OverwriteMode to use. If no - * value is found in the configuration medium, then the default one is used instead. - * key="default.overwrite"; default value="N" + * Method used by TOverwriteModeConverter to establish the default OverwriteMode to use. */ - public String getDefaultOverwriteMode() { + public OverwriteMode getDefaultOverwriteMode() { - return cr.getConfiguration().getString(DEFAULT_OVERWRITE_MODE_KEY, "N"); + return OverwriteMode.valueOf(properties.getFiles().getDefaultOverwrite()); } /** - * Method used by FileStorageTypeConverter to establish the default TFileStorageType to use. If no - * value is found in the configuration medium, then the default one is used instead. - * key="default.storagetype"; default value="V" + * Method used by FileStorageTypeConverter to establish the default TFileStorageType to use. */ - public String getDefaultFileStorageType() { + public StorageType getDefaultFileStorageType() { - return cr.getConfiguration().getString(DEFAULT_FILE_STORAGE_TYPE_KEY, "V"); + return StorageType.valueOf(properties.getFiles().getDefaultStoragetype()); } /** - * Method used by RequestSummaryDAO to establish the batch size for removing expired requests. If - * no value is found in the configuration medium, then the default one is used instead. - * key="purge.size"; default value=800 + * Method used by RequestSummaryDAO to establish the batch size for removing expired requests. */ - public int getPurgeBatchSize() { + public int getCompletedRequestsAgentPurgeSize() { - return cr.getConfiguration().getInt(PURGE_BATCH_SIZE_KEY, 800); + return properties.getCompletedRequestsAgent().getPurgeSize(); } /** * Method used by RequestSummaryDAO to establish the time that must be elapsed for considering a * request expired. The time measure specified in the configuration medium is in _days_. The value - * returned by this method, is expressed in _seconds_ If no value is found in the configuration - * medium, then the default one is used instead. key="expired.request.time"; default value=7 (days - * - which correspond to 7 * 24 * 60 * 60 seconds) + * returned by this method, is expressed in _seconds_. */ - public long getExpiredRequestTime() { + public long getCompletedRequestsAgentPurgeAge() { - return cr.getConfiguration().getInt(EXPIRED_REQUEST_TIME_KEY, 604800); + return properties.getCompletedRequestsAgent().getPurgeAge(); } /** * Method used by RequestSummaryCatalog to establish the initial delay before starting the purging - * thread, in _seconds_. If no value is found in the configuration medium, then the default one is - * used instead. key="purge.delay"; default value=10 + * thread, in _seconds_. */ - public int getRequestPurgerDelay() { + public int getCompletedRequestsAgentDelay() { - return cr.getConfiguration().getInt(REQUEST_PURGER_DELAY_KEY, 10); + return properties.getCompletedRequestsAgent().getDelay(); } /** * Method used by RequestSummaryCatalog to establish the time interval in _seconds_ between - * successive purging checks. If no value is found in the configuration medium, then the default - * one is used instead. key="purge.interval"; default value=600 (1o minutes) + * successive purging checks. */ - public int getRequestPurgerPeriod() { + public int getCompletedRequestsAgentPeriod() { - return cr.getConfiguration().getInt(REQUEST_PURGER_PERIOD_KEY, 600); + return properties.getCompletedRequestsAgent().getInterval(); } /** @@ -957,390 +708,148 @@ public int getRequestPurgerPeriod() { * enabled or not. If no value is found in the configuration medium, then the default one is used * instead. key="purging"; default value=true */ - public boolean getExpiredRequestPurging() { + public boolean isCompletedRequestsAgentEnabled() { - return cr.getConfiguration().getBoolean(EXPIRED_REQUEST_PURGING_KEY, true); + return properties.getCompletedRequestsAgent().isEnabled(); + } + + public long getInProgressPtpExpirationTime() { + + return properties.getInprogressRequestsAgent().getPtpExpirationTime(); } /** * Method used by TURLBuilder to adding (in case) extra slashes after the "authority" part of a - * TURL If no value is found in the configuration medium, then the default one is used instead. - * key="extraslashes.file"; default value="" (that is 'file:///) value = "/" ==> file://// + * TURL. */ public String getExtraSlashesForFileTURL() { - return cr.getConfiguration().getString(EXTRA_SLASHES_FOR_FILE_TURL_KEY, ""); + return properties.getExtraslashes().getFile(); } /** * Method used by TURLBuilder to adding (in case) extra slashes after the "authority" part of a - * TURL If no value is found in the configuration medium, then the default one is used instead. - * key="extraslashes.rfio"; default value="" (that is 'rfio://:port')) value - * = "/" ==> 'rfio://:port/' + * TURL. */ public String getExtraSlashesForRFIOTURL() { - return cr.getConfiguration().getString(EXTRA_SLASHES_FOR_RFIO_TURL_KEY, ""); + return properties.getExtraslashes().getRfio(); } /** * Method used by TURLBuilder to adding (in case) extra slashes after the "authority" part of a - * TURL If no value is found in the configuration medium, then the default one is used instead. - * key="extraslashes.gsiftp"; default value="" (that is 'gsiftp://:port')) - * value = "/" ==> 'gsiftp://:port/' + * TURL. */ public String getExtraSlashesForGsiFTPTURL() { - return cr.getConfiguration().getString(EXTRA_SLASHES_FOR_GSIFTP_TURL_KEY, ""); + return properties.getExtraslashes().getGsiftp(); } /** * Method used by TURLBuilder to adding (in case) extra slashes after the "authority" part of a - * TURL If no value is found in the configuration medium, then the default one is used instead. - * key="extraslashes.root"; default value="/" (that is 'root://:port')) - * value = "" ==> 'root://:port' + * TURL. */ - public String getExtraSlashesForROOTTURL() { + public String getExtraSlashesForRootTURL() { - return cr.getConfiguration().getString(EXTRA_SLASHES_FOR_ROOT_TURL_KEY, "/"); + return properties.getExtraslashes().getRoot(); } /** * Method used by Ping Executor to retrieve the Properties File Name where the properties - * are stored. If no value is found in the configuration medium, then the default one - * is used instead. key="ping-properties.filename"; default value="" (that is - * 'gsiftp://:port')) value = "/" ==> - * 'gsiftp://:port/' + * are stored. */ public String getPingValuesPropertiesFilename() { - final String KEY = "ping-values.properties"; - return cr.getConfiguration().getString(PING_VALUES_PROPERTIES_FILENAME_KEY, KEY); + return properties.getPingPropertiesFilename(); } - /** - * If no value is found in the configuration medium, then the default one is used instead. - * key="health.electrocardiogram.period"; default value=60 (1 min) - */ public int getHearthbeatPeriod() { - return cr.getConfiguration().getInt(HEARTHBEAT_PERIOD_KEY, 60); - } - - /** - * getPerformanceGlancePeriod - * - * @return int If no value is found in the configuration medium, then the default one is used - * instead. key="health.performance.glance.timeInterval"; default value=15 (15 sec) - */ - public int getPerformanceGlanceTimeInterval() { - - return cr.getConfiguration().getInt(PERFORMANCE_GLANCE_TIME_INTERVAL_KEY, 15); + return properties.getHearthbeat().getPeriod(); } - /** - * getPerformanceGlancePeriod - * - * @return int If no value is found in the configuration medium, then the default one is used - * instead. key="health.performance.logbook.timeInterval"; default value=15 (15 sec) - */ - public int getPerformanceLogbookTimeInterval() { + public int getHearthbeatPerformanceGlanceTimeInterval() { - return cr.getConfiguration().getInt(PERFORMANCE_LOGBOOK_TIME_INTERVAL_KEY, 15); + return properties.getHearthbeat().getPerformanceGlanceTimeInterval(); } - /** - * getPerformanceMeasuring - * - * @return boolean If no value is found in the configuration medium, then the default one is used - * instead. key="health.performance.mesauring.enabled"; default value=false - */ - public boolean getPerformanceMeasuring() { + public int getHearthbeatPerformanceLogbookTimeInterval() { - return cr.getConfiguration().getBoolean(PERFORMANCE_MEASURING_KEY, false); + return properties.getHearthbeat().getPerformanceLogbookTimeInterval(); } - /** - * getBookKeppeingEnabled - * - * @return boolean Method used by Namespace Configuration Reloading Strategy (Peeper). If "peeper" - * found namespace.xml config file changed it checks if it can perform an automatic - * reload. If no value is found in the configuration medium, then the default one is used - * instead. key="health.bookkeeping.enabled"; default value=false - */ - public boolean getBookKeepingEnabled() { + public boolean isHearthbeatPerformanceMeasuringEnabled() { - return cr.getConfiguration().getBoolean(BOOK_KEEPING_ENABLED_KEY, false); + return properties.getHearthbeat().isPerformanceMeasuringEnabled(); } - /** - * Enable write permission on new created directory for LocalAuthorizationSource usage. - * - * @return false by default, otherwise what is specified in the properties - */ - public boolean getEnableWritePermOnDirectory() { + public boolean isHearthbeatBookkeepingEnabled() { - return cr.getConfiguration().getBoolean(ENABLE_WRITE_PERM_ON_DIRECTORY_KEY, false); + return properties.getHearthbeat().isBookkeepingEnabled(); } public int getMaxLoop() { - return cr.getConfiguration().getInt(MAX_LOOP_KEY, 10); + return properties.getAbortMaxloop(); } - /** - * Method used to retrieve the ClassName for the User Mapper Class If no value is found in the - * configuration medium, then the default one is used instead, that is - * "it.grid.storm.griduser.LcmapsJNAMapper" key="griduser.mapper.classname"; - */ public String getGridUserMapperClassname() { - final String CLASSNAME = "it.grid.storm.griduser.StormLcmapsJNAMapper"; - return cr.getConfiguration().getString(GRID_USER_MAPPER_CLASSNAME_KEY, CLASSNAME); - } - - /** - * Method used to retrieve the default path where the AuthzDB file are stored If no value is found - * in the configuration medium, then the default one is used instead, that is the "configuration - * directory" key="authzdb.path"; - */ - public String getAuthzDBPath() { - - return cr.getConfiguration().getString(AUTHZ_DB_PATH_KEY, cr.configurationDirectory()); + return "it.grid.storm.griduser.StormLcmapsJNAMapper"; } - /** - * Method used to retrieve the default refresh rate of the AuthzDB files If no value is found in - * the configuration medium, then the default one is used instead, that is the "5 sec" - * key="authzdb.refreshrate"; - */ - public int getRefreshRateAuthzDBfilesInSeconds() { - - return cr.getConfiguration().getInt(REFRESH_RATE_AUTHZDB_FILES_IN_SECONDS_KEY, 5); - } - - public boolean getRecallTableTestingMode() { - - return cr.getConfiguration().getBoolean(RECALL_TABLE_TESTING_MODE_KEY, false); - } - - /** - * Method used to retrieve the PORT where RESTful services listen (like the Recall Table service) - * If no value is found in the configuration medium, then the default one is used instead, that is - * the "9998" key="tape.recalltable.service.port"; - */ - public int getRestServicesPort() { - - return cr.getConfiguration().getInt(REST_SERVICES_PORT_KEY, 9998); - } - - public int getRestServicesMaxThreads() { - - return cr.getConfiguration().getInt(REST_SERVICES_MAX_THREAD, RestServer.DEFAULT_MAX_THREAD_NUM); - } - - public int getRestServicesMaxQueueSize() { - - return cr.getConfiguration().getInt(REST_SERVICES_MAX_QUEUE_SIZE, RestServer.DEFAULT_MAX_QUEUE_SIZE); - } - - /** - * Method used to retrieve the key string used to pass RETRY-VALUE parameter to Recall Table - * service key="tape.recalltable.service.param.retry-value"; - */ public String getRetryValueKey() { - return cr.getConfiguration().getString(RETRY_VALUE_KEY_KEY, "retry-value"); + return "retry-value"; } - /** - * Method used to retrieve the key string used to pass RETRY-VALUE parameter to Recall Table - * service key="tape.recalltable.service.param.status"; - */ public String getStatusKey() { - return cr.getConfiguration().getString(STATUS_KEY_KEY, "status"); + return "status"; } - /** - * Method used to retrieve the key string used to pass RETRY-VALUE parameter to Recall Table - * service key="tape.recalltable.service.param.takeover"; - */ public String getTaskoverKey() { - return cr.getConfiguration().getString(TASKOVER_KEY_KEY, "first"); - } - - public String getStoRMPropertiesVersion() { - - return cr.getConfiguration().getString(STORM_PROPERTIES_VERSION_KEY, "No version specified"); + return "first"; } - /** - * Flag to support or not the TAPE integration. Default value is false. - * - * @return - */ - public boolean getTapeSupportEnabled() { - - return cr.getConfiguration().getBoolean(TAPE_SUPPORT_ENABLED_KEY, false); - } - - /** - * @return - */ - public boolean getSynchronousQuotaCheckEnabled() { - - return cr.getConfiguration().getBoolean(SYNCHRONOUS_QUOTA_CHECK_ENABLED_KEY, false); - } - - /** - * - * @return the refresh period in seconds - */ public int getGPFSQuotaRefreshPeriod() { - return cr.getConfiguration().getInt(GPFS_QUOTA_REFRESH_PERIOD_KEY, 900); + return properties.getInfoQuotaRefreshPeriod(); } - /** - * @return - */ - public boolean getFastBootstrapEnabled() { + public long getServerPoolStatusCheckTimeout() { - return cr.getConfiguration().getBoolean(FAST_BOOTSTRAP_ENABLED_KEY, true); + return properties.getServerPoolStatusCheckTimeout(); } - /** - * @return - */ - public Long getServerPoolStatusCheckTimeout() { - - return cr.getConfiguration().getLong(SERVER_POOL_STATUS_CHECK_TIMEOUT_KEY, 20000); - } - - public boolean getSanityCheckEnabled() { - - return cr.getConfiguration().getBoolean(SANITY_CHECK_ENABLED_KEY, true); - } - - public Boolean getXmlRpcTokenEnabled() { + public boolean isSkipPtgACLSetup() { - return cr.getConfiguration().getBoolean(XMLRPC_SECURITY_ENABLED_KEY, false); - } - - public String getXmlRpcToken() { - - return cr.getConfiguration().getString(XMLRPC_SECURITY_TOKEN_KEY); - } - - public Boolean getPTGSkipACLSetup() { - - return cr.getConfiguration().getBoolean(PTG_SKIP_ACL_SETUP, false); - } - - @Override - public String toString() { - - StringBuilder configurationStringBuilder = new StringBuilder(); - try { - // This class methods - Method[] methods = Configuration.instance.getClass().getDeclaredMethods(); - - // This class fields - Field[] fields = Configuration.instance.getClass().getDeclaredFields(); - HashMap methodKeyMap = new HashMap<>(); - for (Field field : fields) { - String fieldName = field.getName(); - if (fieldName.endsWith("KEY") && field.getType().equals(String.class)) { - // from a field like GROUP_TAPE_WRITE_BUFFER_KEY = - // "tape.buffer.group.write" - // puts in the map the pair - // - String mapKey = "get" - + fieldName.substring(0, fieldName.lastIndexOf('_')).replace("_", "").toLowerCase(); - if (methodKeyMap.containsKey(mapKey)) { - String value = methodKeyMap.get(mapKey); - methodKeyMap.put(mapKey, value + " , " + (String) field.get(Configuration.instance)); - } else { - methodKeyMap.put(mapKey, (String) field.get(Configuration.instance)); - } - } - } - - Object field = null; - Object[] dummyArray = new Object[0]; - for (Method method : methods) { - /* - * with method.getModifiers() == 1 we check that the method is public (otherwise he can - * request real parameters) - */ - if (method.getName().substring(0, 3).equals("get") - && (!method.getName().equals("getInstance")) && method.getModifiers() == 1) { - field = method.invoke(Configuration.instance, dummyArray); - if (field.getClass().isArray()) { - field = ArrayUtils.toString(field); - } - String value = methodKeyMap.get(method.getName().toLowerCase()); - if (value == null) { - configurationStringBuilder.insert(0, - "!! Unable to find method " + method.getName() + " in methode key map!"); - } else { - configurationStringBuilder.append("Property " + value + " : "); - } - if (field.getClass().equals(String.class)) { - field = '\'' + ((String) field) + '\''; - } - configurationStringBuilder.append(method.getName() + "() == " + field.toString() + "\n"); - } - } - return configurationStringBuilder.toString(); - } catch (Exception e) { - if (e.getClass().isAssignableFrom(java.lang.reflect.InvocationTargetException.class)) { - configurationStringBuilder.insert(0, - "!!! Cannot do toString! Got an Exception: " + e.getCause() + "\n"); - } else { - configurationStringBuilder.insert(0, - "!!! Cannot do toString! Got an Exception: " + e + "\n"); - } - return configurationStringBuilder.toString(); - } + return properties.isSkipPtgAclSetup(); } public String getHTTPTURLPrefix() { - return cr.getConfiguration().getString(HTTP_TURL_PREFIX, "/fileTransfer"); - } - public long getInProgressPutRequestExpirationTime() { - return cr.getConfiguration().getLong(EXPIRED_INPROGRESS_PTP_TIME_KEY, 2592000L); + return properties.getHttpTurlPrefix(); } public int getNetworkAddressCacheTtl() { - return cr.getConfiguration().getInt(NETWORKADDRESS_CACHE_TTL, 0); - } - public int getNetworkAddressCacheNegativeTtl() { - return cr.getConfiguration().getInt(NETWORKADDRESS_CACHE_NEGATIVE_TTL, 0); + return 0; } - public boolean getDiskUsageServiceEnabled() { - - return cr.getConfiguration().getBoolean(DISKUSAGE_SERVICE_ENABLED, false); - } - - public int getDiskUsageServiceInitialDelay() { + public int getNetworkAddressCacheNegativeTtl() { - return cr.getConfiguration().getInt(DISKUSAGE_SERVICE_INITIAL_DELAY, DEFAULT_INITIAL_DELAY); + return 0; } - public int getDiskUsageServiceTasksInterval() { + public String getSiteName() { - // default: 604800 s => 1 week - return cr.getConfiguration().getInt(DISKUSAGE_SERVICE_TASKS_INTERVAL, DEFAULT_TASKS_INTERVAL); + return properties.getSite().getName(); } - public boolean getDiskUsageServiceTasksParallel() { + public QualityLevel getQualityLevel() { - return cr.getConfiguration().getBoolean(DISKUSAGE_SERVICE_TASKS_PARALLEL, DEFAULT_TASKS_PARALLEL); + return properties.getSite().getQualityLevel(); } } diff --git a/src/main/java/it/grid/storm/config/ConfigurationDefaults.java b/src/main/java/it/grid/storm/config/ConfigurationDefaults.java new file mode 100644 index 000000000..5fccf087e --- /dev/null +++ b/src/main/java/it/grid/storm/config/ConfigurationDefaults.java @@ -0,0 +1,128 @@ +package it.grid.storm.config; + +import it.grid.storm.config.model.v2.OverwriteMode; +import it.grid.storm.config.model.v2.QualityLevel; +import it.grid.storm.config.model.v2.StorageType; + +public class ConfigurationDefaults { + + /* Endpoint info */ + public static final String DEFAULT_SITENAME = "StoRM site"; + public static final QualityLevel DEFAULT_QUALITY_LEVEL = QualityLevel.PREPRODUCTION; + + /* SRM port for endpoints */ + public static final int DEFAULT_SRM_PORT = 8444; + + /* Database */ + public static final int DB_PORT = 3306; + public static final String DB_USERNAME = "storm"; + public static final String DB_PASSWORD = "storm"; + public static final String DB_PROPERTIES = "serverTimezone=UTC&autoReconnect=true"; + + /* Database connection pool */ + public static final int DB_POOL_SIZE = -1; + public static final int DB_POOL_MIN_IDLE = 10; + public static final int DB_POOL_MAX_WAIT_MILLIS = 5000; + public static final boolean DB_POOL_TEST_ON_BORROW = true; + public static final boolean DB_POOL_TEST_WHILE_IDLE = true; + + /* REST service */ + public static final int REST_SERVICES_PORT = 9998; + public static final int REST_SERVICES_MAX_THREADS = 100; + public static final int REST_SERVICES_MAX_QUEUE_SIZE = 1000; + + /* Sanity check enabled */ + public static final boolean SANITY_CHECK_ENABLED = true; + + /* XMLRPC */ + public static final int XMLRPC_MAX_THREADS = 256; + public static final int XMLRPC_MAX_QUEUE_SIZE = 1000; + public static final int XMLRPC_SERVER_PORT = 8080; + + /* Rest and XMLRPC security settings */ + public static final boolean SECURITY_ENABLED = true; + public static final String SECURITY_TOKEN = "secret"; + + /* Disk usage service */ + public static final boolean DISKUSAGE_SERVICE_ENABLED = false; + public static final int DISKUSAGE_SERVICE_INITIAL_DELAY = 0; + public static final long DISKUSAGE_SERVICE_TASKS_INTERVAL = 604800L; + public static final boolean DISKUSAGE_SERVICE_PARALLEL_TASKS_ENABLED = false; + + /* GC agents */ + public static final int INPROGRESS_REQUESTS_AGENT_DELAY = 50; + public static final int INPROGRESS_REQUESTS_AGENT_INTERVAL = 300; + public static final long INPROGRESS_REQUESTS_AGENT_PTP_EXPIRATION_TIME = 2592000L; + + public static final int EXPIRED_SPACES_AGENT_DELAY = 30; + public static final int EXPIRED_SPACES_AGENT_INTERVAL = 300; + + public static final boolean COMPLETED_REQUESTS_AGENT_ENABLED = true; + public static final int COMPLETED_REQUESTS_AGENT_DELAY = 10; + public static final int COMPLETED_REQUESTS_AGENT_INTERVAL = 600; + public static final long COMPLETED_REQUESTS_AGENT_PURGE_AGE = 604800L; + public static final int COMPLETED_REQUESTS_AGENT_PURGE_SIZE = 800; + + /* schedulers */ + public static final int PTP_SCHEDULER_CORE_POOL_SIZE = 50; + public static final int PTP_SCHEDULER_MAX_POOL_SIZE = 200; + public static final int PTP_SCHEDULER_QUEUE_SIZE = 1000; + + public static final int PTG_SCHEDULER_CORE_POOL_SIZE = 50; + public static final int PTG_SCHEDULER_MAX_POOL_SIZE = 200; + public static final int PTG_SCHEDULER_QUEUE_SIZE = 2000; + + public static final int BOL_SCHEDULER_CORE_POOL_SIZE = 50; + public static final int BOL_SCHEDULER_MAX_POOL_SIZE = 200; + public static final int BOL_SCHEDULER_QUEUE_SIZE = 2000; + + public static final int REQUESTS_SCHEDULER_CORE_POOL_SIZE = 10; + public static final int REQUESTS_SCHEDULER_MAX_POOL_SIZE = 50; + public static final int REQUESTS_SCHEDULER_QUEUE_SIZE = 2000; + + public static final int REQUESTS_PICKER_AGENT_DELAY = 1; + public static final int REQUESTS_PICKER_AGENT_INTERVAL = 2; + public static final int REQUESTS_PICKER_AGENT_MAX_FETCHED_SIZE = 100; + + /* advanced */ + + public static final long FILE_DEFAULT_SIZE = 1000000L; + public static final long FILE_LIFETIME_DEFAULT = 259200L; + public static final long PIN_LIFETIME_DEFAULT = 259200L; + public static final long PIN_LIFETIME_MAXIMUM = 1814400L; + + public static final int LS_MAX_NUMBER_OF_ENTRY = 2000; + public static final boolean LS_DEFAULT_ALL_LEVEL_RECURSIVE = false; + public static final short LS_DEFAULT_NUM_OF_LEVELS = 1; + public static final short LS_DEFAULT_OFFSET = 0; + + public static final boolean AUTOMATIC_DIRECTORY_CREATION = false; + public static final boolean ENABLE_WRITE_PERM_ON_DIRECTORY = false; + + public static final String DEFAULT_OVERWRITE_MODE = OverwriteMode.N.name(); + public static final String DEFAULT_FILE_STORAGE_TYPE = StorageType.V.name(); + + public static final String EXTRA_SLASHES_FOR_FILE_TURL = ""; + public static final String EXTRA_SLASHES_FOR_RFIO_TURL = ""; + public static final String EXTRA_SLASHES_FOR_GSIFTP_TURL = "/"; + public static final String EXTRA_SLASHES_FOR_ROOT_TURL = "/"; + + public static final String PING_VALUES_PROPERTIES_FILENAME = "ping-values.properties"; + + public static final int HEARTHBEAT_PERIOD = 60; + public static final int PERFORMANCE_GLANCE_TIME_INTERVAL = 15; + public static final int PERFORMANCE_LOGBOOK_TIME_INTERVAL = 15; + public static final boolean PERFORMANCE_MEASURING = false; + public static final boolean BOOK_KEEPING_ENABLED = false; + + public static final int MAX_LOOP = 10; + + public static final int GPFS_QUOTA_REFRESH_PERIOD = 900; + + public static final long SERVER_POOL_STATUS_CHECK_TIMEOUT = 20000; + + public static final boolean PTG_SKIP_ACL_SETUP = false; + + public static final String HTTP_TURL_PREFIX = ""; // was "/filetransfer" + +} diff --git a/src/main/java/it/grid/storm/config/DefaultValue.java b/src/main/java/it/grid/storm/config/DefaultValue.java deleted file mode 100644 index b065f079d..000000000 --- a/src/main/java/it/grid/storm/config/DefaultValue.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.config; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import it.grid.storm.checksum.ChecksumAlgorithm; - -public class DefaultValue { - - private static final Logger log = LoggerFactory.getLogger(DefaultValue.class); - - private DefaultValue() { - - } - - /** - * Retrieve default Space Type for anonymous user - */ - public static String getAnonymous_SpaceType() { - - return "volatile"; - } - - /** - * Retrieve default Space Type for named VO - */ - public static String getNamedVO_SpaceType(String voname) { - - String result = null; - if (result == null) { - log.info("Searching for ANONYMOUS parameter value.."); - result = getAnonymous_SpaceType(); - } - return result; - } - - /** - * Retrieve default Total Space Size for anonymous user - */ - public static long getAnonymous_TotalSpaceSize() { - - return 104857600; // 100 Mb - } - - /** - * Retrieve default Total Space Size for named VO - */ - public static long getNamedVO_TotalSpaceSize(String voname) { - - long result = -1; - if (result == -1) { - log.info("Searching for ANONYMOUS parameter value.."); - result = getAnonymous_TotalSpaceSize(); - } - return result; - } - - /** - * Retrieve default Total Space Size for anonymous user - */ - public static long getAnonymous_GuaranteedSpaceSize() { - - return 10485760; // 10 Mb - } - - /** - * Retrieve default Total Space Size for named VO - */ - public static long getNamedVO_GuaranteedSpaceSize(String voname) { - - long result = -1; - if (result == -1) { - log - .info("Searching for ANONYMOUS parameter value.."); - result = getAnonymous_GuaranteedSpaceSize(); - } - return result; - } - - /** - * Retrieve default Total Space Life Time for anonymous user - */ - public static long getAnonymous_SpaceLifeTime() { - - return 86400; // 24h - } - - /** - * Retrieve default Space Life Time for named VO - */ - public static long getNamedVO_SpaceLifeTime(String voname) { - - long result = -1; - if (result == -1) { - log.info("Searching for ANONYMOUS parameter value.."); - result = getAnonymous_SpaceLifeTime(); - } - return result; - } - - /** - * Retrieve default Checksum Algorithm - */ - public static ChecksumAlgorithm getChecksumAlgorithm() { - - return ChecksumAlgorithm.ADLER32; - } - -} diff --git a/src/main/java/it/grid/storm/config/converter/StormPropertiesConversionException.java b/src/main/java/it/grid/storm/config/converter/StormPropertiesConversionException.java new file mode 100644 index 000000000..83da9b83b --- /dev/null +++ b/src/main/java/it/grid/storm/config/converter/StormPropertiesConversionException.java @@ -0,0 +1,13 @@ +package it.grid.storm.config.converter; + +public class StormPropertiesConversionException extends Exception { + + /** + * + */ + private static final long serialVersionUID = 1L; + + public StormPropertiesConversionException(String message) { + super(message); + } +} diff --git a/src/main/java/it/grid/storm/config/converter/StormPropertiesConverter.java b/src/main/java/it/grid/storm/config/converter/StormPropertiesConverter.java new file mode 100644 index 000000000..7a864437e --- /dev/null +++ b/src/main/java/it/grid/storm/config/converter/StormPropertiesConverter.java @@ -0,0 +1,452 @@ +package it.grid.storm.config.converter; + +import static it.grid.storm.config.model.v1.StormProperties.AUTOMATIC_DIRECTORY_CREATION_KEY; +import static it.grid.storm.config.model.v1.StormProperties.BOL_CORE_POOL_SIZE_KEY; +import static it.grid.storm.config.model.v1.StormProperties.BOL_MAX_POOL_SIZE_KEY; +import static it.grid.storm.config.model.v1.StormProperties.BOL_QUEUE_SIZE_KEY; +import static it.grid.storm.config.model.v1.StormProperties.BOOK_KEEPING_ENABLED_KEY; +import static it.grid.storm.config.model.v1.StormProperties.CLEANING_INITIAL_DELAY_KEY; +import static it.grid.storm.config.model.v1.StormProperties.CLEANING_TIME_INTERVAL_KEY; +import static it.grid.storm.config.model.v1.StormProperties.CORE_POOL_SIZE_KEY; +import static it.grid.storm.config.model.v1.StormProperties.DB_PASSWORD_KEY; +import static it.grid.storm.config.model.v1.StormProperties.DB_URL_HOSTNAME_KEY; +import static it.grid.storm.config.model.v1.StormProperties.DB_URL_PROPERTIES_KEY; +import static it.grid.storm.config.model.v1.StormProperties.DB_USER_NAME_KEY; +import static it.grid.storm.config.model.v1.StormProperties.DEFAULT_FILE_STORAGE_TYPE_KEY; +import static it.grid.storm.config.model.v1.StormProperties.DEFAULT_OVERWRITE_MODE_KEY; +import static it.grid.storm.config.model.v1.StormProperties.DISKUSAGE_SERVICE_ENABLED; +import static it.grid.storm.config.model.v1.StormProperties.DISKUSAGE_SERVICE_INITIAL_DELAY; +import static it.grid.storm.config.model.v1.StormProperties.DISKUSAGE_SERVICE_TASKS_INTERVAL; +import static it.grid.storm.config.model.v1.StormProperties.DISKUSAGE_SERVICE_TASKS_PARALLEL; +import static it.grid.storm.config.model.v1.StormProperties.ENABLE_WRITE_PERM_ON_DIRECTORY_KEY; +import static it.grid.storm.config.model.v1.StormProperties.EXPIRED_INPROGRESS_PTP_TIME_KEY; +import static it.grid.storm.config.model.v1.StormProperties.EXPIRED_REQUEST_PURGING_KEY; +import static it.grid.storm.config.model.v1.StormProperties.EXPIRED_REQUEST_TIME_KEY; +import static it.grid.storm.config.model.v1.StormProperties.EXTRA_SLASHES_FOR_FILE_TURL_KEY; +import static it.grid.storm.config.model.v1.StormProperties.EXTRA_SLASHES_FOR_GSIFTP_TURL_KEY; +import static it.grid.storm.config.model.v1.StormProperties.EXTRA_SLASHES_FOR_RFIO_TURL_KEY; +import static it.grid.storm.config.model.v1.StormProperties.EXTRA_SLASHES_FOR_ROOT_TURL_KEY; +import static it.grid.storm.config.model.v1.StormProperties.FILE_DEFAULT_SIZE_KEY; +import static it.grid.storm.config.model.v1.StormProperties.FILE_LIFETIME_DEFAULT_KEY; +import static it.grid.storm.config.model.v1.StormProperties.GPFS_QUOTA_REFRESH_PERIOD_KEY; +import static it.grid.storm.config.model.v1.StormProperties.HEARTHBEAT_PERIOD_KEY; +import static it.grid.storm.config.model.v1.StormProperties.HTTP_TURL_PREFIX; +import static it.grid.storm.config.model.v1.StormProperties.LS_ALL_LEVEL_RECURSIVE_KEY; +import static it.grid.storm.config.model.v1.StormProperties.LS_MAX_NUMBER_OF_ENTRY_KEY; +import static it.grid.storm.config.model.v1.StormProperties.LS_NUM_OF_LEVELS_KEY; +import static it.grid.storm.config.model.v1.StormProperties.LS_OFFSET_KEY; +import static it.grid.storm.config.model.v1.StormProperties.MANAGED_SURLS_KEY; +import static it.grid.storm.config.model.v1.StormProperties.MAX_LOOP_KEY; +import static it.grid.storm.config.model.v1.StormProperties.MAX_POOL_SIZE_KEY; +import static it.grid.storm.config.model.v1.StormProperties.PERFORMANCE_GLANCE_TIME_INTERVAL_KEY; +import static it.grid.storm.config.model.v1.StormProperties.PERFORMANCE_LOGBOOK_TIME_INTERVAL_KEY; +import static it.grid.storm.config.model.v1.StormProperties.PERFORMANCE_MEASURING_KEY; +import static it.grid.storm.config.model.v1.StormProperties.PICKING_INITIAL_DELAY_KEY; +import static it.grid.storm.config.model.v1.StormProperties.PICKING_MAX_BATCH_SIZE_KEY; +import static it.grid.storm.config.model.v1.StormProperties.PICKING_TIME_INTERVAL_KEY; +import static it.grid.storm.config.model.v1.StormProperties.PING_VALUES_PROPERTIES_FILENAME_KEY; +import static it.grid.storm.config.model.v1.StormProperties.PIN_LIFETIME_DEFAULT_KEY; +import static it.grid.storm.config.model.v1.StormProperties.PIN_LIFETIME_MAXIMUM_KEY; +import static it.grid.storm.config.model.v1.StormProperties.PTG_CORE_POOL_SIZE_KEY; +import static it.grid.storm.config.model.v1.StormProperties.PTG_MAX_POOL_SIZE_KEY; +import static it.grid.storm.config.model.v1.StormProperties.PTG_QUEUE_SIZE_KEY; +import static it.grid.storm.config.model.v1.StormProperties.PTG_SKIP_ACL_SETUP; +import static it.grid.storm.config.model.v1.StormProperties.PTP_CORE_POOL_SIZE_KEY; +import static it.grid.storm.config.model.v1.StormProperties.PTP_MAX_POOL_SIZE_KEY; +import static it.grid.storm.config.model.v1.StormProperties.PTP_QUEUE_SIZE_KEY; +import static it.grid.storm.config.model.v1.StormProperties.PURGE_BATCH_SIZE_KEY; +import static it.grid.storm.config.model.v1.StormProperties.QUEUE_SIZE_KEY; +import static it.grid.storm.config.model.v1.StormProperties.REQUEST_PURGER_DELAY_KEY; +import static it.grid.storm.config.model.v1.StormProperties.REQUEST_PURGER_PERIOD_KEY; +import static it.grid.storm.config.model.v1.StormProperties.REST_SERVICES_MAX_QUEUE_SIZE; +import static it.grid.storm.config.model.v1.StormProperties.REST_SERVICES_MAX_THREAD; +import static it.grid.storm.config.model.v1.StormProperties.REST_SERVICES_PORT_KEY; +import static it.grid.storm.config.model.v1.StormProperties.SANITY_CHECK_ENABLED_KEY; +import static it.grid.storm.config.model.v1.StormProperties.SERVER_POOL_STATUS_CHECK_TIMEOUT_KEY; +import static it.grid.storm.config.model.v1.StormProperties.SERVICE_HOSTNAME_KEY; +import static it.grid.storm.config.model.v1.StormProperties.SERVICE_PORT_KEY; +import static it.grid.storm.config.model.v1.StormProperties.TRANSIT_INITIAL_DELAY_KEY; +import static it.grid.storm.config.model.v1.StormProperties.TRANSIT_TIME_INTERVAL_KEY; +import static it.grid.storm.config.model.v1.StormProperties.XMLRPC_MAX_QUEUE_SIZE_KEY; +import static it.grid.storm.config.model.v1.StormProperties.XMLRPC_MAX_THREAD_KEY; +import static it.grid.storm.config.model.v1.StormProperties.XMLRPC_SECURITY_ENABLED_KEY; +import static it.grid.storm.config.model.v1.StormProperties.XMLRPC_SECURITY_TOKEN_KEY; +import static it.grid.storm.config.model.v1.StormProperties.XMLRPC_SERVER_PORT_KEY; +import static java.lang.String.format; +import static java.time.format.DateTimeFormatter.ISO_DATE_TIME; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileWriter; +import java.io.IOException; +import java.time.LocalDateTime; +import java.util.Enumeration; +import java.util.List; +import java.util.Properties; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +import org.apache.commons.collections4.properties.SortedProperties; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Lists; + +import it.grid.storm.config.model.v2.StormProperties; + +public class StormPropertiesConverter { + + private static final Logger log = LoggerFactory.getLogger(StormPropertiesConverter.class); + + private static final List MANDATORY_KEYS = + Lists.newArrayList(SERVICE_HOSTNAME_KEY, SERVICE_PORT_KEY, MANAGED_SURLS_KEY); + + public static void convert(File source, File target) + throws IOException, StormPropertiesConversionException { + + Properties old = new Properties(); + old.load(new FileInputStream(source)); + + List missingProps = + MANDATORY_KEYS.stream().filter(k -> !old.containsKey(k)).collect(Collectors.toList()); + if (missingProps.size() > 0) { + String message = format("Missing mandatory properties '%s' for conversion", missingProps); + log.error(message); + throw new StormPropertiesConversionException(message); + } + + SortedProperties properties = new SortedProperties(); + + // version + properties.setProperty("version", StormProperties.VERSION); + + // srmEndpoints + String publicHost = old.getProperty(SERVICE_HOSTNAME_KEY).trim(); + String publicPort = old.getProperty(SERVICE_PORT_KEY).trim(); + properties.setProperty("srm_endpoints[0].host", publicHost); + properties.setProperty("srm_endpoints[0].port", publicPort); + int i = 1; + for (String surl : old.getProperty(MANAGED_SURLS_KEY).trim().split(",")) { + Pattern pattern = Pattern.compile("srm://(.*?)/.*"); + Matcher matcher = pattern.matcher(surl); + if (matcher.find()) { + String host = matcher.group(1).split(":")[0]; + String port = matcher.group(1).split(":")[1]; + if (!publicHost.equals(host)) { + properties.setProperty("srm_endpoints[" + i + "].host", host); + properties.setProperty("srm_endpoints[" + i + "].port", port); + i += 1; + } + } + } + + // db + if (old.containsKey(DB_URL_HOSTNAME_KEY)) { + properties.setProperty("db.hostname", old.getProperty(DB_URL_HOSTNAME_KEY).trim()); + } + if (old.containsKey(DB_USER_NAME_KEY)) { + properties.setProperty("db.username", old.getProperty(DB_USER_NAME_KEY).trim()); + } + if (old.containsKey(DB_PASSWORD_KEY)) { + properties.setProperty("db.password", old.getProperty(DB_PASSWORD_KEY).trim()); + } + if (old.containsKey(DB_URL_PROPERTIES_KEY)) { + properties.setProperty("db.properties", old.getProperty(DB_URL_PROPERTIES_KEY).trim()); + } + + // xmlrpc + if (old.containsKey(XMLRPC_SERVER_PORT_KEY)) { + properties.setProperty("xmlrpc.port", old.getProperty(XMLRPC_SERVER_PORT_KEY).trim()); + } + if (old.containsKey(XMLRPC_MAX_THREAD_KEY)) { + properties.setProperty("xmlrpc.max_threads", old.getProperty(XMLRPC_MAX_THREAD_KEY).trim()); + } + if (old.containsKey(XMLRPC_MAX_QUEUE_SIZE_KEY)) { + properties.setProperty("xmlrpc.max_queue_size", + old.getProperty(XMLRPC_MAX_QUEUE_SIZE_KEY).trim()); + } + + // rest + if (old.containsKey(REST_SERVICES_PORT_KEY)) { + properties.setProperty("rest.port", old.getProperty(REST_SERVICES_PORT_KEY).trim()); + } + if (old.containsKey(REST_SERVICES_MAX_THREAD)) { + properties.setProperty("rest.max_threads", old.getProperty(REST_SERVICES_MAX_THREAD).trim()); + } + if (old.containsKey(REST_SERVICES_MAX_QUEUE_SIZE)) { + properties.setProperty("rest.max_queue_size", + old.getProperty(REST_SERVICES_MAX_QUEUE_SIZE).trim()); + } + + // security + if (old.containsKey(XMLRPC_SECURITY_ENABLED_KEY)) { + properties.setProperty("security.enabled", old.getProperty(XMLRPC_SECURITY_ENABLED_KEY)); + } + if (old.containsKey(XMLRPC_SECURITY_TOKEN_KEY)) { + properties.setProperty("security.token", old.getProperty(XMLRPC_SECURITY_TOKEN_KEY).trim()); + } + + // du + if (old.containsKey(DISKUSAGE_SERVICE_ENABLED)) { + properties.setProperty("du.enabled", old.getProperty(DISKUSAGE_SERVICE_ENABLED)); + } + if (old.containsKey(DISKUSAGE_SERVICE_INITIAL_DELAY)) { + properties.setProperty("du.initial_delay", old.getProperty(DISKUSAGE_SERVICE_INITIAL_DELAY)); + } + if (old.containsKey(DISKUSAGE_SERVICE_TASKS_PARALLEL)) { + properties.setProperty("du.parallel_tasks_enabled", + old.getProperty(DISKUSAGE_SERVICE_TASKS_PARALLEL)); + } + if (old.containsKey(DISKUSAGE_SERVICE_TASKS_INTERVAL)) { + properties.setProperty("du.tasks_interval", + old.getProperty(DISKUSAGE_SERVICE_TASKS_INTERVAL)); + } + + // sanity check + if (old.containsKey(SANITY_CHECK_ENABLED_KEY)) { + properties.setProperty("sanity_checks_enabled", old.getProperty(SANITY_CHECK_ENABLED_KEY)); + } + + // ls + if (old.containsKey(LS_MAX_NUMBER_OF_ENTRY_KEY)) { + properties.setProperty("synch_ls.max_entries", old.getProperty(LS_MAX_NUMBER_OF_ENTRY_KEY)); + } + if (old.containsKey(LS_ALL_LEVEL_RECURSIVE_KEY)) { + properties.setProperty("synch_ls.default_all_level_recursive", + old.getProperty(LS_ALL_LEVEL_RECURSIVE_KEY)); + } + if (old.containsKey(LS_NUM_OF_LEVELS_KEY)) { + properties.setProperty("synch_ls.default_num_levels", old.getProperty(LS_NUM_OF_LEVELS_KEY)); + } + if (old.containsKey(LS_OFFSET_KEY)) { + properties.setProperty("synch_ls.default_offset", old.getProperty(LS_OFFSET_KEY)); + } + + // directories + if (old.containsKey(AUTOMATIC_DIRECTORY_CREATION_KEY)) { + properties.setProperty("directories.enable_automatic_creation", + old.getProperty(AUTOMATIC_DIRECTORY_CREATION_KEY)); + } + if (old.containsKey(ENABLE_WRITE_PERM_ON_DIRECTORY_KEY)) { + properties.setProperty("directories.enable_writeperm_on_creation", + old.getProperty(ENABLE_WRITE_PERM_ON_DIRECTORY_KEY)); + } + + // files + if (old.containsKey(FILE_DEFAULT_SIZE_KEY)) { + properties.setProperty("files.default_size", old.getProperty(FILE_DEFAULT_SIZE_KEY)); + } + if (old.containsKey(FILE_LIFETIME_DEFAULT_KEY)) { + properties.setProperty("files.default_lifetime", old.getProperty(FILE_LIFETIME_DEFAULT_KEY)); + } + if (old.containsKey(DEFAULT_OVERWRITE_MODE_KEY)) { + properties.setProperty("files.default_overwrite", + old.getProperty(DEFAULT_OVERWRITE_MODE_KEY).trim()); + } + if (old.containsKey(DEFAULT_FILE_STORAGE_TYPE_KEY)) { + properties.setProperty("files.default_storagetype", + old.getProperty(DEFAULT_FILE_STORAGE_TYPE_KEY).trim()); + } + + // extraslashes + if (old.containsKey(EXTRA_SLASHES_FOR_FILE_TURL_KEY)) { + properties.setProperty("extraslashes.file", + old.getProperty(EXTRA_SLASHES_FOR_FILE_TURL_KEY).trim()); + } + if (old.containsKey(EXTRA_SLASHES_FOR_RFIO_TURL_KEY)) { + properties.setProperty("extraslashes.rfio", + old.getProperty(EXTRA_SLASHES_FOR_RFIO_TURL_KEY).trim()); + } + if (old.containsKey(EXTRA_SLASHES_FOR_ROOT_TURL_KEY)) { + properties.setProperty("extraslashes.root", + old.getProperty(EXTRA_SLASHES_FOR_ROOT_TURL_KEY).trim()); + } + if (old.containsKey(EXTRA_SLASHES_FOR_GSIFTP_TURL_KEY)) { + properties.setProperty("extraslashes.gsiftp", + old.getProperty(EXTRA_SLASHES_FOR_GSIFTP_TURL_KEY).trim()); + } + + // skip ptg acl + if (old.containsKey(PTG_SKIP_ACL_SETUP)) { + properties.setProperty("skip_ptg_acl_setup", old.getProperty(PTG_SKIP_ACL_SETUP)); + } + + // hearthbeat + if (old.containsKey(BOOK_KEEPING_ENABLED_KEY)) { + properties.setProperty("hearthbeat.bookkeeping_enabled", + old.getProperty(BOOK_KEEPING_ENABLED_KEY)); + } + if (old.containsKey(PERFORMANCE_MEASURING_KEY)) { + properties.setProperty("hearthbeat.performance_measuring_enabled", + old.getProperty(PERFORMANCE_MEASURING_KEY)); + } + if (old.containsKey(HEARTHBEAT_PERIOD_KEY)) { + properties.setProperty("hearthbeat.period", old.getProperty(HEARTHBEAT_PERIOD_KEY)); + } + if (old.containsKey(PERFORMANCE_LOGBOOK_TIME_INTERVAL_KEY)) { + properties.setProperty("hearthbeat.performance_logbook_time_interval", + old.getProperty(PERFORMANCE_LOGBOOK_TIME_INTERVAL_KEY)); + } + if (old.containsKey(PERFORMANCE_GLANCE_TIME_INTERVAL_KEY)) { + properties.setProperty("hearthbeat.performance_glance_time_interval", + old.getProperty(PERFORMANCE_GLANCE_TIME_INTERVAL_KEY)); + } + + // requests picker + if (old.containsKey(PICKING_INITIAL_DELAY_KEY)) { + properties.setProperty("requests_picker_agent.delay", + old.getProperty(PICKING_INITIAL_DELAY_KEY)); + } + if (old.containsKey(PICKING_TIME_INTERVAL_KEY)) { + properties.setProperty("requests_picker_agent.interval", + old.getProperty(PICKING_TIME_INTERVAL_KEY)); + } + if (old.containsKey(PICKING_MAX_BATCH_SIZE_KEY)) { + properties.setProperty("requests_picker_agent.max_fetched_size", + old.getProperty(PICKING_MAX_BATCH_SIZE_KEY)); + } + + // requests scheduler + if (old.containsKey(CORE_POOL_SIZE_KEY)) { + properties.setProperty("requests_scheduler.core_pool_size", + old.getProperty(CORE_POOL_SIZE_KEY)); + } + if (old.containsKey(MAX_POOL_SIZE_KEY)) { + properties.setProperty("requests_scheduler.max_pool_size", + old.getProperty(MAX_POOL_SIZE_KEY)); + } + if (old.containsKey(QUEUE_SIZE_KEY)) { + properties.setProperty("requests_scheduler.queue_size", old.getProperty(QUEUE_SIZE_KEY)); + } + + // ptp requests scheduler + if (old.containsKey(PTP_CORE_POOL_SIZE_KEY)) { + properties.setProperty("ptp_scheduler.core_pool_size", + old.getProperty(PTP_CORE_POOL_SIZE_KEY)); + } + if (old.containsKey(PTP_MAX_POOL_SIZE_KEY)) { + properties.setProperty("ptp_scheduler.max_pool_size", old.getProperty(PTP_MAX_POOL_SIZE_KEY)); + } + if (old.containsKey(PTP_QUEUE_SIZE_KEY)) { + properties.setProperty("ptp_scheduler.queue_size", old.getProperty(PTP_QUEUE_SIZE_KEY)); + } + + // ptg requests scheduler + if (old.containsKey(PTG_CORE_POOL_SIZE_KEY)) { + properties.setProperty("ptg_scheduler.core_pool_size", + old.getProperty(PTG_CORE_POOL_SIZE_KEY)); + } + if (old.containsKey(PTG_MAX_POOL_SIZE_KEY)) { + properties.setProperty("ptg_scheduler.max_pool_size", old.getProperty(PTG_MAX_POOL_SIZE_KEY)); + } + if (old.containsKey(PTG_QUEUE_SIZE_KEY)) { + properties.setProperty("ptg_scheduler.queue_size", old.getProperty(PTG_QUEUE_SIZE_KEY)); + } + + // bol requests scheduler + if (old.containsKey(BOL_CORE_POOL_SIZE_KEY)) { + properties.setProperty("bol_scheduler.core_pool_size", + old.getProperty(BOL_CORE_POOL_SIZE_KEY)); + } + if (old.containsKey(BOL_MAX_POOL_SIZE_KEY)) { + properties.setProperty("bol_scheduler.max_pool_size", old.getProperty(BOL_MAX_POOL_SIZE_KEY)); + } + if (old.containsKey(BOL_QUEUE_SIZE_KEY)) { + properties.setProperty("bol_scheduler.queue_size", old.getProperty(BOL_QUEUE_SIZE_KEY)); + } + + // pin lifetime + if (old.containsKey(PIN_LIFETIME_DEFAULT_KEY)) { + properties.setProperty("pinlifetime.default", old.getProperty(PIN_LIFETIME_DEFAULT_KEY)); + } + if (old.containsKey(PIN_LIFETIME_MAXIMUM_KEY)) { + properties.setProperty("pinlifetime.maximum", old.getProperty(PIN_LIFETIME_MAXIMUM_KEY)); + } + + // storage spaces agent + if (old.containsKey(CLEANING_INITIAL_DELAY_KEY)) { + properties.setProperty("expired_spaces_agent.delay", + old.getProperty(CLEANING_INITIAL_DELAY_KEY)); + } + if (old.containsKey(CLEANING_TIME_INTERVAL_KEY)) { + properties.setProperty("expired_spaces_agent.interval", + old.getProperty(CLEANING_TIME_INTERVAL_KEY)); + } + + // in progress requests agent + if (old.containsKey(TRANSIT_INITIAL_DELAY_KEY)) { + properties.setProperty("inprogress_requests_agent.delay", + old.getProperty(TRANSIT_INITIAL_DELAY_KEY)); + } + if (old.containsKey(TRANSIT_TIME_INTERVAL_KEY)) { + properties.setProperty("inprogress_requests_agent.interval", + old.getProperty(TRANSIT_TIME_INTERVAL_KEY)); + } + if (old.containsKey(EXPIRED_INPROGRESS_PTP_TIME_KEY)) { + properties.setProperty("inprogress_requests_agent.ptp_expiration_time", + old.getProperty(EXPIRED_INPROGRESS_PTP_TIME_KEY)); + } + + // completed requests agent + if (old.containsKey(EXPIRED_REQUEST_PURGING_KEY)) { + properties.setProperty("completed_requests_agent.enabled", + old.getProperty(EXPIRED_REQUEST_PURGING_KEY)); + } + if (old.containsKey(EXPIRED_REQUEST_TIME_KEY)) { + properties.setProperty("completed_requests_agent.purge_age", + old.getProperty(EXPIRED_REQUEST_TIME_KEY)); + } + if (old.containsKey(PURGE_BATCH_SIZE_KEY)) { + properties.setProperty("completed_requests_agent.purge_size", + old.getProperty(PURGE_BATCH_SIZE_KEY)); + } + if (old.containsKey(REQUEST_PURGER_DELAY_KEY)) { + properties.setProperty("completed_requests_agent.delay", + old.getProperty(REQUEST_PURGER_DELAY_KEY)); + } + if (old.containsKey(REQUEST_PURGER_PERIOD_KEY)) { + properties.setProperty("completed_requests_agent.interval", + old.getProperty(REQUEST_PURGER_PERIOD_KEY)); + } + + // others + if (old.containsKey(HTTP_TURL_PREFIX)) { + properties.setProperty("http_turl_prefix", old.getProperty(HTTP_TURL_PREFIX)); + } + if (old.containsKey(SERVER_POOL_STATUS_CHECK_TIMEOUT_KEY)) { + properties.setProperty("server_pool_status_check_timeout", + old.getProperty(SERVER_POOL_STATUS_CHECK_TIMEOUT_KEY)); + } + if (old.containsKey(MAX_LOOP_KEY)) { + properties.setProperty("abort_maxloop", old.getProperty(MAX_LOOP_KEY)); + } + if (old.containsKey(GPFS_QUOTA_REFRESH_PERIOD_KEY)) { + properties.setProperty("info_quota_refresh_period", + old.getProperty(GPFS_QUOTA_REFRESH_PERIOD_KEY)); + } + if (old.containsKey(PING_VALUES_PROPERTIES_FILENAME_KEY)) { + properties.setProperty("ping_properties_filename", + old.getProperty(PING_VALUES_PROPERTIES_FILENAME_KEY).trim()); + } + + String description = format("Configuration generated from '%s'", source); + saveToFile(target, properties, description); + } + + private static void saveToFile(File target, Properties properties, String description) + throws IOException { + + Enumeration keys = properties.keys(); + FileWriter fw = new FileWriter(target); + fw.write(format("# %s%n", description)); + fw.write(format("# Created at %s%n", ISO_DATE_TIME.format(LocalDateTime.now()))); + while (keys.hasMoreElements()) { + String key = String.valueOf(keys.nextElement()); + String value = String.valueOf(properties.get(key)); + fw.write(String.format("%s: %s%n", key, value)); + } + fw.close(); + } + +} diff --git a/src/main/java/it/grid/storm/config/model/v1/StormProperties.java b/src/main/java/it/grid/storm/config/model/v1/StormProperties.java new file mode 100644 index 000000000..e48b2be7f --- /dev/null +++ b/src/main/java/it/grid/storm/config/model/v1/StormProperties.java @@ -0,0 +1,104 @@ +package it.grid.storm.config.model.v1; + +public class StormProperties { + + /* Configuration file properties */ + public static final String MANAGED_SURLS_KEY = "storm.service.SURL.endpoint"; + public static final String SERVICE_DEFAULT_PORTS = "storm.service.SURL.default-ports"; + public static final String SERVICE_HOSTNAME_KEY = "storm.service.FE-public.hostname"; + public static final String SERVICE_PORT_KEY = "storm.service.port"; + public static final String DB_URL_HOSTNAME_KEY = "storm.service.request-db.host"; + public static final String DB_URL_PROPERTIES_KEY = "storm.service.request-db.properties"; + public static final String DB_USER_NAME_KEY = "storm.service.request-db.username"; + public static final String DB_PASSWORD_KEY = "storm.service.request-db.passwd"; + public static final String BE_PERSISTENCE_POOL_DB_MAX_ACTIVE_KEY = + "persistence.internal-db.connection-pool.maxActive"; + public static final String BE_PERSISTENCE_POOL_DB_MAX_WAIT_KEY = + "persistence.internal-db.connection-pool.maxWait"; + public static final String XMLRPC_SERVER_PORT_KEY = "synchcall.xmlrpc.unsecureServerPort"; + public static final String XMLRPC_MAX_THREAD_KEY = "synchcall.xmlrpc.maxthread"; + public static final String XMLRPC_MAX_QUEUE_SIZE_KEY = "synchcall.xmlrpc.max_queue_size"; + public static final String REST_SERVICES_PORT_KEY = "storm.rest.services.port"; + public static final String REST_SERVICES_MAX_THREAD = "storm.rest.services.maxthread"; + public static final String REST_SERVICES_MAX_QUEUE_SIZE = "storm.rest.services.max_queue_size"; + public static final String XMLRPC_SECURITY_ENABLED_KEY = "synchcall.xmlrpc.security.enabled"; + public static final String XMLRPC_SECURITY_TOKEN_KEY = "synchcall.xmlrpc.security.token"; + + public static final String DISKUSAGE_SERVICE_ENABLED = "storm.service.du.enabled"; + public static final String DISKUSAGE_SERVICE_INITIAL_DELAY = "storm.service.du.delaySecs"; + public static final String DISKUSAGE_SERVICE_TASKS_INTERVAL = "storm.service.du.periodSecs"; + public static final String DISKUSAGE_SERVICE_TASKS_PARALLEL = "storm.service.du.parallelTasks"; + + public static final String SANITY_CHECK_ENABLED_KEY = "sanity-check.enabled"; + + public static final String LS_MAX_NUMBER_OF_ENTRY_KEY = "synchcall.directoryManager.maxLsEntry"; + public static final String LS_ALL_LEVEL_RECURSIVE_KEY = + "synchcall.directoryManager.default.AllLevelRecursive"; + public static final String LS_NUM_OF_LEVELS_KEY = "synchcall.directoryManager.default.Levels"; + public static final String LS_OFFSET_KEY = "synchcall.directoryManager.default.Offset"; + + public static final String AUTOMATIC_DIRECTORY_CREATION_KEY = "directory.automatic-creation"; + public static final String ENABLE_WRITE_PERM_ON_DIRECTORY_KEY = "directory.writeperm"; + + public static final String FILE_DEFAULT_SIZE_KEY = "fileSize.default"; + public static final String FILE_LIFETIME_DEFAULT_KEY = "fileLifetime.default"; + public static final String DEFAULT_OVERWRITE_MODE_KEY = "default.overwrite"; + public static final String DEFAULT_FILE_STORAGE_TYPE_KEY = "default.storagetype"; + + public static final String EXTRA_SLASHES_FOR_FILE_TURL_KEY = "extraslashes.file"; + public static final String EXTRA_SLASHES_FOR_RFIO_TURL_KEY = "extraslashes.rfio"; + public static final String EXTRA_SLASHES_FOR_GSIFTP_TURL_KEY = "extraslashes.gsiftp"; + public static final String EXTRA_SLASHES_FOR_ROOT_TURL_KEY = "extraslashes.root"; + + public static final String PTG_SKIP_ACL_SETUP = "ptg.skip-acl-setup"; + + public static final String HEARTHBEAT_PERIOD_KEY = "health.electrocardiogram.period"; + public static final String PERFORMANCE_GLANCE_TIME_INTERVAL_KEY = + "health.performance.glance.timeInterval"; + public static final String PERFORMANCE_LOGBOOK_TIME_INTERVAL_KEY = + "health.performance.logbook.timeInterval"; + public static final String PERFORMANCE_MEASURING_KEY = "health.performance.mesauring.enabled"; + public static final String BOOK_KEEPING_ENABLED_KEY = "health.bookkeeping.enabled"; + + public static final String PICKING_INITIAL_DELAY_KEY = "asynch.PickingInitialDelay"; + public static final String PICKING_TIME_INTERVAL_KEY = "asynch.PickingTimeInterval"; + public static final String PICKING_MAX_BATCH_SIZE_KEY = "asynch.PickingMaxBatchSize"; + + public static final String CORE_POOL_SIZE_KEY = "scheduler.crusher.workerCorePoolSize"; + public static final String MAX_POOL_SIZE_KEY = "scheduler.crusher.workerMaxPoolSize"; + public static final String QUEUE_SIZE_KEY = "scheduler.crusher.queueSize"; + + public static final String PTP_CORE_POOL_SIZE_KEY = "scheduler.chunksched.ptp.workerCorePoolSize"; + public static final String PTP_MAX_POOL_SIZE_KEY = "scheduler.chunksched.ptp.workerMaxPoolSize"; + public static final String PTP_QUEUE_SIZE_KEY = "scheduler.chunksched.ptp.queueSize"; + public static final String PTG_CORE_POOL_SIZE_KEY = "scheduler.chunksched.ptg.workerCorePoolSize"; + public static final String PTG_MAX_POOL_SIZE_KEY = "scheduler.chunksched.ptg.workerMaxPoolSize"; + public static final String PTG_QUEUE_SIZE_KEY = "scheduler.chunksched.ptg.queueSize"; + public static final String BOL_CORE_POOL_SIZE_KEY = "scheduler.chunksched.bol.workerCorePoolSize"; + public static final String BOL_MAX_POOL_SIZE_KEY = "scheduler.chunksched.bol.workerMaxPoolSize"; + public static final String BOL_QUEUE_SIZE_KEY = "scheduler.chunksched.bol.queueSize"; + + public static final String PIN_LIFETIME_DEFAULT_KEY = "pinLifetime.default"; + public static final String PIN_LIFETIME_MAXIMUM_KEY = "pinLifetime.maximum"; + + public static final String CLEANING_INITIAL_DELAY_KEY = "gc.pinnedfiles.cleaning.delay"; + public static final String CLEANING_TIME_INTERVAL_KEY = "gc.pinnedfiles.cleaning.interval"; + + public static final String TRANSIT_INITIAL_DELAY_KEY = "transit.delay"; + public static final String TRANSIT_TIME_INTERVAL_KEY = "transit.interval"; + public static final String EXPIRED_INPROGRESS_PTP_TIME_KEY = "expired.inprogress.time"; + + public static final String PURGE_BATCH_SIZE_KEY = "purge.size"; + public static final String EXPIRED_REQUEST_TIME_KEY = "expired.request.time"; + public static final String REQUEST_PURGER_DELAY_KEY = "purge.delay"; + public static final String REQUEST_PURGER_PERIOD_KEY = "purge.interval"; + public static final String EXPIRED_REQUEST_PURGING_KEY = "purging"; + + public static final String PING_VALUES_PROPERTIES_FILENAME_KEY = "ping-properties.filename"; + public static final String MAX_LOOP_KEY = "abort.maxloop"; + public static final String GPFS_QUOTA_REFRESH_PERIOD_KEY = "info.quota.refresh.period"; + public static final String SERVER_POOL_STATUS_CHECK_TIMEOUT_KEY = + "server-pool.status-check.timeout"; + public static final String HTTP_TURL_PREFIX = "http.turl_prefix"; + +} diff --git a/src/main/java/it/grid/storm/config/model/v2/AdvancedDirectorySettings.java b/src/main/java/it/grid/storm/config/model/v2/AdvancedDirectorySettings.java new file mode 100644 index 000000000..1cdd7c672 --- /dev/null +++ b/src/main/java/it/grid/storm/config/model/v2/AdvancedDirectorySettings.java @@ -0,0 +1,47 @@ +package it.grid.storm.config.model.v2; + +import static it.grid.storm.config.ConfigurationDefaults.AUTOMATIC_DIRECTORY_CREATION; +import static it.grid.storm.config.ConfigurationDefaults.ENABLE_WRITE_PERM_ON_DIRECTORY; + +import com.fasterxml.jackson.databind.PropertyNamingStrategy; +import com.fasterxml.jackson.databind.annotation.JsonNaming; + +@JsonNaming(PropertyNamingStrategy.SnakeCaseStrategy.class) +public class AdvancedDirectorySettings { + + private boolean enableAutomaticCreation; + private boolean enableWritepermOnCreation; + + public AdvancedDirectorySettings() { + enableAutomaticCreation = AUTOMATIC_DIRECTORY_CREATION; + enableWritepermOnCreation = ENABLE_WRITE_PERM_ON_DIRECTORY; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("AdvancedDirectorySettings [enableAutomaticCreation="); + builder.append(enableAutomaticCreation); + builder.append(", enableWritepermOnCreation="); + builder.append(enableWritepermOnCreation); + builder.append("]"); + return builder.toString(); + } + + public boolean isEnableAutomaticCreation() { + return enableAutomaticCreation; + } + + public void setEnableAutomaticCreation(boolean enableAutomaticCreation) { + this.enableAutomaticCreation = enableAutomaticCreation; + } + + public boolean isEnableWritepermOnCreation() { + return enableWritepermOnCreation; + } + + public void setEnableWritepermOnCreation(boolean enableWritepermOnCreation) { + this.enableWritepermOnCreation = enableWritepermOnCreation; + } + +} diff --git a/src/main/java/it/grid/storm/config/model/v2/AdvancedFileSettings.java b/src/main/java/it/grid/storm/config/model/v2/AdvancedFileSettings.java new file mode 100644 index 000000000..6adff01da --- /dev/null +++ b/src/main/java/it/grid/storm/config/model/v2/AdvancedFileSettings.java @@ -0,0 +1,73 @@ +package it.grid.storm.config.model.v2; + +import static it.grid.storm.config.ConfigurationDefaults.DEFAULT_FILE_STORAGE_TYPE; +import static it.grid.storm.config.ConfigurationDefaults.DEFAULT_OVERWRITE_MODE; +import static it.grid.storm.config.ConfigurationDefaults.FILE_DEFAULT_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.FILE_LIFETIME_DEFAULT; + +import com.fasterxml.jackson.databind.PropertyNamingStrategy; +import com.fasterxml.jackson.databind.annotation.JsonNaming; + +@JsonNaming(PropertyNamingStrategy.SnakeCaseStrategy.class) +public class AdvancedFileSettings { + + private long defaultSize; + private long defaultLifetime; + private String defaultOverwrite; + private String defaultStoragetype; + + public AdvancedFileSettings() { + defaultSize = FILE_DEFAULT_SIZE; + defaultLifetime = FILE_LIFETIME_DEFAULT; + defaultOverwrite = DEFAULT_OVERWRITE_MODE; + defaultStoragetype = DEFAULT_FILE_STORAGE_TYPE; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("AdvancedFileSettings [defaultSize="); + builder.append(defaultSize); + builder.append(", defaultLifetime="); + builder.append(defaultLifetime); + builder.append(", defaultOverwrite="); + builder.append(defaultOverwrite); + builder.append(", defaultStoragetype="); + builder.append(defaultStoragetype); + builder.append("]"); + return builder.toString(); + } + + public long getDefaultSize() { + return defaultSize; + } + + public void setDefaultSize(long defaultSize) { + this.defaultSize = defaultSize; + } + + public long getDefaultLifetime() { + return defaultLifetime; + } + + public void setDefaultLifetime(long defaultLifetime) { + this.defaultLifetime = defaultLifetime; + } + + public String getDefaultOverwrite() { + return defaultOverwrite; + } + + public void setDefaultOverwrite(String defaultOverwrite) { + this.defaultOverwrite = defaultOverwrite; + } + + public String getDefaultStoragetype() { + return defaultStoragetype; + } + + public void setDefaultStoragetype(String defaultStoragetype) { + this.defaultStoragetype = defaultStoragetype; + } + +} diff --git a/src/main/java/it/grid/storm/config/model/v2/BolScheduler.java b/src/main/java/it/grid/storm/config/model/v2/BolScheduler.java new file mode 100644 index 000000000..e9e48aea4 --- /dev/null +++ b/src/main/java/it/grid/storm/config/model/v2/BolScheduler.java @@ -0,0 +1,60 @@ +package it.grid.storm.config.model.v2; + +import static it.grid.storm.config.ConfigurationDefaults.BOL_SCHEDULER_CORE_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.BOL_SCHEDULER_MAX_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.BOL_SCHEDULER_QUEUE_SIZE; + +import com.fasterxml.jackson.databind.PropertyNamingStrategy; +import com.fasterxml.jackson.databind.annotation.JsonNaming; + +@JsonNaming(PropertyNamingStrategy.SnakeCaseStrategy.class) +public class BolScheduler { + + private int corePoolSize; + private int maxPoolSize; + private int queueSize; + + public BolScheduler() { + corePoolSize = BOL_SCHEDULER_CORE_POOL_SIZE; + maxPoolSize = BOL_SCHEDULER_MAX_POOL_SIZE; + queueSize = BOL_SCHEDULER_QUEUE_SIZE; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("BolScheduler [corePoolSize="); + builder.append(corePoolSize); + builder.append(", maxPoolSize="); + builder.append(maxPoolSize); + builder.append(", queueSize="); + builder.append(queueSize); + builder.append("]"); + return builder.toString(); + } + + public int getCorePoolSize() { + return corePoolSize; + } + + public void setCorePoolSize(int corePoolSize) { + this.corePoolSize = corePoolSize; + } + + public int getMaxPoolSize() { + return maxPoolSize; + } + + public void setMaxPoolSize(int maxPoolSize) { + this.maxPoolSize = maxPoolSize; + } + + public int getQueueSize() { + return queueSize; + } + + public void setQueueSize(int queueSize) { + this.queueSize = queueSize; + } + +} diff --git a/src/main/java/it/grid/storm/config/model/v2/CompletedRequestsAgent.java b/src/main/java/it/grid/storm/config/model/v2/CompletedRequestsAgent.java new file mode 100644 index 000000000..6def87f33 --- /dev/null +++ b/src/main/java/it/grid/storm/config/model/v2/CompletedRequestsAgent.java @@ -0,0 +1,86 @@ +package it.grid.storm.config.model.v2; + +import static it.grid.storm.config.ConfigurationDefaults.COMPLETED_REQUESTS_AGENT_DELAY; +import static it.grid.storm.config.ConfigurationDefaults.COMPLETED_REQUESTS_AGENT_ENABLED; +import static it.grid.storm.config.ConfigurationDefaults.COMPLETED_REQUESTS_AGENT_INTERVAL; +import static it.grid.storm.config.ConfigurationDefaults.COMPLETED_REQUESTS_AGENT_PURGE_AGE; +import static it.grid.storm.config.ConfigurationDefaults.COMPLETED_REQUESTS_AGENT_PURGE_SIZE; + +import com.fasterxml.jackson.databind.PropertyNamingStrategy; +import com.fasterxml.jackson.databind.annotation.JsonNaming; + +@JsonNaming(PropertyNamingStrategy.SnakeCaseStrategy.class) +public class CompletedRequestsAgent { + + private boolean enabled; + private int delay; + private int interval; + private long purgeAge; + private int purgeSize; + + public CompletedRequestsAgent() { + enabled = COMPLETED_REQUESTS_AGENT_ENABLED; + delay = COMPLETED_REQUESTS_AGENT_DELAY; + interval = COMPLETED_REQUESTS_AGENT_INTERVAL; + purgeAge = COMPLETED_REQUESTS_AGENT_PURGE_AGE; + purgeSize = COMPLETED_REQUESTS_AGENT_PURGE_SIZE; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("CompletedRequestsAgent [enabled="); + builder.append(enabled); + builder.append(", delay="); + builder.append(delay); + builder.append(", interval="); + builder.append(interval); + builder.append(", purgeAge="); + builder.append(purgeAge); + builder.append(", purgeSize="); + builder.append(purgeSize); + builder.append("]"); + return builder.toString(); + } + + public boolean isEnabled() { + return enabled; + } + + public void setEnabled(boolean enabled) { + this.enabled = enabled; + } + + public int getDelay() { + return delay; + } + + public void setDelay(int delay) { + this.delay = delay; + } + + public int getInterval() { + return interval; + } + + public void setInterval(int interval) { + this.interval = interval; + } + + public long getPurgeAge() { + return purgeAge; + } + + public void setPurgeAge(long purgeAge) { + this.purgeAge = purgeAge; + } + + public int getPurgeSize() { + return purgeSize; + } + + public void setPurgeSize(int purgeSize) { + this.purgeSize = purgeSize; + } + +} diff --git a/src/main/java/it/grid/storm/config/model/v2/DatabaseConnection.java b/src/main/java/it/grid/storm/config/model/v2/DatabaseConnection.java new file mode 100644 index 000000000..31d46a6d8 --- /dev/null +++ b/src/main/java/it/grid/storm/config/model/v2/DatabaseConnection.java @@ -0,0 +1,101 @@ +package it.grid.storm.config.model.v2; + +import static it.grid.storm.config.ConfigurationDefaults.DB_PASSWORD; +import static it.grid.storm.config.ConfigurationDefaults.DB_PORT; +import static it.grid.storm.config.ConfigurationDefaults.DB_PROPERTIES; +import static it.grid.storm.config.ConfigurationDefaults.DB_USERNAME; + +import java.net.InetAddress; +import java.net.UnknownHostException; + +import com.fasterxml.jackson.databind.PropertyNamingStrategy; +import com.fasterxml.jackson.databind.annotation.JsonNaming; + +@JsonNaming(PropertyNamingStrategy.SnakeCaseStrategy.class) +public class DatabaseConnection { + + private String username; + private String password; + private String hostname; + private int port; + private String properties; + + private DatabasePoolProperties pool; + + public DatabaseConnection() throws UnknownHostException { + username = DB_USERNAME; + password = DB_PASSWORD; + hostname = InetAddress.getLocalHost().getHostName(); + port = DB_PORT; + properties = DB_PROPERTIES; + pool = new DatabasePoolProperties(); + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("DatabaseConnection [username="); + builder.append(username); + builder.append(", password="); + builder.append(password); + builder.append(", hostname="); + builder.append(hostname); + builder.append(", port="); + builder.append(port); + builder.append(", properties="); + builder.append(properties); + builder.append(", pool="); + builder.append(pool); + builder.append("]"); + return builder.toString(); + } + + public String getUsername() { + return username; + } + + public void setUsername(String username) { + this.username = username; + } + + public String getPassword() { + return password; + } + + public void setPassword(String password) { + this.password = password; + } + + public String getHostname() { + return hostname; + } + + public void setHostname(String hostname) { + this.hostname = hostname; + } + + public int getPort() { + return port; + } + + public void setPort(int port) { + this.port = port; + } + + public String getProperties() { + return properties; + } + + public void setProperties(String properties) { + this.properties = properties; + } + + public DatabasePoolProperties getPool() { + return pool; + } + + public void setPool(DatabasePoolProperties pool) { + this.pool = pool; + } + +} diff --git a/src/main/java/it/grid/storm/config/model/v2/DatabasePoolProperties.java b/src/main/java/it/grid/storm/config/model/v2/DatabasePoolProperties.java new file mode 100644 index 000000000..2fea815cd --- /dev/null +++ b/src/main/java/it/grid/storm/config/model/v2/DatabasePoolProperties.java @@ -0,0 +1,86 @@ +package it.grid.storm.config.model.v2; + +import static it.grid.storm.config.ConfigurationDefaults.DB_POOL_MAX_WAIT_MILLIS; +import static it.grid.storm.config.ConfigurationDefaults.DB_POOL_MIN_IDLE; +import static it.grid.storm.config.ConfigurationDefaults.DB_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.DB_POOL_TEST_ON_BORROW; +import static it.grid.storm.config.ConfigurationDefaults.DB_POOL_TEST_WHILE_IDLE; + +import com.fasterxml.jackson.databind.PropertyNamingStrategy; +import com.fasterxml.jackson.databind.annotation.JsonNaming; + +@JsonNaming(PropertyNamingStrategy.SnakeCaseStrategy.class) +public class DatabasePoolProperties { + + private int size; + private int minIdle; + private int maxWaitMillis; + private boolean testOnBorrow; + private boolean testWhileIdle; + + public DatabasePoolProperties() { + size = DB_POOL_SIZE; + minIdle = DB_POOL_MIN_IDLE; + maxWaitMillis = DB_POOL_MAX_WAIT_MILLIS; + testOnBorrow = DB_POOL_TEST_ON_BORROW; + testWhileIdle = DB_POOL_TEST_WHILE_IDLE; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("DatabasePoolProperties [size="); + builder.append(size); + builder.append(", minIdle="); + builder.append(minIdle); + builder.append(", maxWaitMillis="); + builder.append(maxWaitMillis); + builder.append(", testOnBorrow="); + builder.append(testOnBorrow); + builder.append(", testWhileIdle="); + builder.append(testWhileIdle); + builder.append("]"); + return builder.toString(); + } + + public int getSize() { + return size; + } + + public void setSize(int size) { + this.size = size; + } + + public int getMinIdle() { + return minIdle; + } + + public void setMinIdle(int minIdle) { + this.minIdle = minIdle; + } + + public int getMaxWaitMillis() { + return maxWaitMillis; + } + + public void setMaxWaitMillis(int maxWaitMillis) { + this.maxWaitMillis = maxWaitMillis; + } + + public boolean isTestOnBorrow() { + return testOnBorrow; + } + + public void setTestOnBorrow(boolean testOnBorrow) { + this.testOnBorrow = testOnBorrow; + } + + public boolean isTestWhileIdle() { + return testWhileIdle; + } + + public void setTestWhileIdle(boolean testWhileIdle) { + this.testWhileIdle = testWhileIdle; + } + +} diff --git a/src/main/java/it/grid/storm/config/model/v2/DiskUsageService.java b/src/main/java/it/grid/storm/config/model/v2/DiskUsageService.java new file mode 100644 index 000000000..f2dbd7c32 --- /dev/null +++ b/src/main/java/it/grid/storm/config/model/v2/DiskUsageService.java @@ -0,0 +1,73 @@ +package it.grid.storm.config.model.v2; + +import static it.grid.storm.config.ConfigurationDefaults.DISKUSAGE_SERVICE_ENABLED; +import static it.grid.storm.config.ConfigurationDefaults.DISKUSAGE_SERVICE_INITIAL_DELAY; +import static it.grid.storm.config.ConfigurationDefaults.DISKUSAGE_SERVICE_PARALLEL_TASKS_ENABLED; +import static it.grid.storm.config.ConfigurationDefaults.DISKUSAGE_SERVICE_TASKS_INTERVAL; + +import com.fasterxml.jackson.databind.PropertyNamingStrategy; +import com.fasterxml.jackson.databind.annotation.JsonNaming; + +@JsonNaming(PropertyNamingStrategy.SnakeCaseStrategy.class) +public class DiskUsageService { + + private boolean enabled; + private boolean parallelTasksEnabled; + private int initialDelay; + private long tasksInterval; + + public DiskUsageService() { + enabled = DISKUSAGE_SERVICE_ENABLED; + parallelTasksEnabled = DISKUSAGE_SERVICE_PARALLEL_TASKS_ENABLED; + initialDelay = DISKUSAGE_SERVICE_INITIAL_DELAY; + tasksInterval = DISKUSAGE_SERVICE_TASKS_INTERVAL; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("DiskUsageService [enabled="); + builder.append(enabled); + builder.append(", parallelTasksEnabled="); + builder.append(parallelTasksEnabled); + builder.append(", initialDelay="); + builder.append(initialDelay); + builder.append(", tasksInterval="); + builder.append(tasksInterval); + builder.append("]"); + return builder.toString(); + } + + public boolean isEnabled() { + return enabled; + } + + public void setEnabled(boolean enabled) { + this.enabled = enabled; + } + + public boolean isParallelTasksEnabled() { + return parallelTasksEnabled; + } + + public void setParallelTasksEnabled(boolean parallelTasksEnabled) { + this.parallelTasksEnabled = parallelTasksEnabled; + } + + public int getInitialDelay() { + return initialDelay; + } + + public void setInitialDelay(int initialDelay) { + this.initialDelay = initialDelay; + } + + public long getTasksInterval() { + return tasksInterval; + } + + public void setTasksInterval(long tasksInterval) { + this.tasksInterval = tasksInterval; + } + +} diff --git a/src/main/java/it/grid/storm/config/model/v2/ExpiredSpacesAgent.java b/src/main/java/it/grid/storm/config/model/v2/ExpiredSpacesAgent.java new file mode 100644 index 000000000..f641eae21 --- /dev/null +++ b/src/main/java/it/grid/storm/config/model/v2/ExpiredSpacesAgent.java @@ -0,0 +1,47 @@ +package it.grid.storm.config.model.v2; + +import static it.grid.storm.config.ConfigurationDefaults.EXPIRED_SPACES_AGENT_DELAY; +import static it.grid.storm.config.ConfigurationDefaults.EXPIRED_SPACES_AGENT_INTERVAL; + +import com.fasterxml.jackson.databind.PropertyNamingStrategy; +import com.fasterxml.jackson.databind.annotation.JsonNaming; + +@JsonNaming(PropertyNamingStrategy.SnakeCaseStrategy.class) +public class ExpiredSpacesAgent { + + private int delay; + private int interval; + + public ExpiredSpacesAgent() { + delay = EXPIRED_SPACES_AGENT_DELAY; + interval = EXPIRED_SPACES_AGENT_INTERVAL; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("ExpiredSpacesAgent [delay="); + builder.append(delay); + builder.append(", interval="); + builder.append(interval); + builder.append("]"); + return builder.toString(); + } + + public int getDelay() { + return delay; + } + + public void setDelay(int delay) { + this.delay = delay; + } + + public int getInterval() { + return interval; + } + + public void setInterval(int interval) { + this.interval = interval; + } + +} diff --git a/src/main/java/it/grid/storm/config/model/v2/ExtraslashesSettings.java b/src/main/java/it/grid/storm/config/model/v2/ExtraslashesSettings.java new file mode 100644 index 000000000..a5c22933a --- /dev/null +++ b/src/main/java/it/grid/storm/config/model/v2/ExtraslashesSettings.java @@ -0,0 +1,73 @@ +package it.grid.storm.config.model.v2; + +import static it.grid.storm.config.ConfigurationDefaults.EXTRA_SLASHES_FOR_FILE_TURL; +import static it.grid.storm.config.ConfigurationDefaults.EXTRA_SLASHES_FOR_GSIFTP_TURL; +import static it.grid.storm.config.ConfigurationDefaults.EXTRA_SLASHES_FOR_RFIO_TURL; +import static it.grid.storm.config.ConfigurationDefaults.EXTRA_SLASHES_FOR_ROOT_TURL; + +import com.fasterxml.jackson.databind.PropertyNamingStrategy; +import com.fasterxml.jackson.databind.annotation.JsonNaming; + +@JsonNaming(PropertyNamingStrategy.SnakeCaseStrategy.class) +public class ExtraslashesSettings { + + private String file; + private String rfio; + private String root; + private String gsiftp; + + public ExtraslashesSettings() { + file = EXTRA_SLASHES_FOR_FILE_TURL; + rfio = EXTRA_SLASHES_FOR_RFIO_TURL; + root = EXTRA_SLASHES_FOR_ROOT_TURL; + gsiftp = EXTRA_SLASHES_FOR_GSIFTP_TURL; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("ExtraslashesSettings [file="); + builder.append(file); + builder.append(", rfio="); + builder.append(rfio); + builder.append(", root="); + builder.append(root); + builder.append(", gsiftp="); + builder.append(gsiftp); + builder.append("]"); + return builder.toString(); + } + + public String getFile() { + return file; + } + + public void setFile(String file) { + this.file = file; + } + + public String getRfio() { + return rfio; + } + + public void setRfio(String rfio) { + this.rfio = rfio; + } + + public String getRoot() { + return root; + } + + public void setRoot(String root) { + this.root = root; + } + + public String getGsiftp() { + return gsiftp; + } + + public void setGsiftp(String gsiftp) { + this.gsiftp = gsiftp; + } + +} diff --git a/src/main/java/it/grid/storm/config/model/v2/HearthbeatSettings.java b/src/main/java/it/grid/storm/config/model/v2/HearthbeatSettings.java new file mode 100644 index 000000000..363646709 --- /dev/null +++ b/src/main/java/it/grid/storm/config/model/v2/HearthbeatSettings.java @@ -0,0 +1,86 @@ +package it.grid.storm.config.model.v2; + +import static it.grid.storm.config.ConfigurationDefaults.BOOK_KEEPING_ENABLED; +import static it.grid.storm.config.ConfigurationDefaults.HEARTHBEAT_PERIOD; +import static it.grid.storm.config.ConfigurationDefaults.PERFORMANCE_GLANCE_TIME_INTERVAL; +import static it.grid.storm.config.ConfigurationDefaults.PERFORMANCE_LOGBOOK_TIME_INTERVAL; +import static it.grid.storm.config.ConfigurationDefaults.PERFORMANCE_MEASURING; + +import com.fasterxml.jackson.databind.PropertyNamingStrategy; +import com.fasterxml.jackson.databind.annotation.JsonNaming; + +@JsonNaming(PropertyNamingStrategy.SnakeCaseStrategy.class) +public class HearthbeatSettings { + + private boolean bookkeepingEnabled; + private boolean performanceMeasuringEnabled; + private int period; + private int performanceLogbookTimeInterval; + private int performanceGlanceTimeInterval; + + public HearthbeatSettings() { + bookkeepingEnabled = BOOK_KEEPING_ENABLED; + performanceMeasuringEnabled = PERFORMANCE_MEASURING; + period = HEARTHBEAT_PERIOD; + performanceLogbookTimeInterval = PERFORMANCE_LOGBOOK_TIME_INTERVAL; + performanceGlanceTimeInterval = PERFORMANCE_GLANCE_TIME_INTERVAL; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("HearthbeatSettings [bookkeepingEnabled="); + builder.append(bookkeepingEnabled); + builder.append(", performanceMeasuringEnabled="); + builder.append(performanceMeasuringEnabled); + builder.append(", period="); + builder.append(period); + builder.append(", performanceLogbookTimeInterval="); + builder.append(performanceLogbookTimeInterval); + builder.append(", performanceGlanceTimeInterval="); + builder.append(performanceGlanceTimeInterval); + builder.append("]"); + return builder.toString(); + } + + public boolean isBookkeepingEnabled() { + return bookkeepingEnabled; + } + + public void setBookkeepingEnabled(boolean bookkeepingEnabled) { + this.bookkeepingEnabled = bookkeepingEnabled; + } + + public boolean isPerformanceMeasuringEnabled() { + return performanceMeasuringEnabled; + } + + public void setPerformanceMeasuringEnabled(boolean performanceMeasuringEnabled) { + this.performanceMeasuringEnabled = performanceMeasuringEnabled; + } + + public int getPeriod() { + return period; + } + + public void setPeriod(int period) { + this.period = period; + } + + public int getPerformanceLogbookTimeInterval() { + return performanceLogbookTimeInterval; + } + + public void setPerformanceLogbookTimeInterval(int performanceLogbookTimeInterval) { + this.performanceLogbookTimeInterval = performanceLogbookTimeInterval; + } + + public int getPerformanceGlanceTimeInterval() { + return performanceGlanceTimeInterval; + } + + public void setPerformanceGlanceTimeInterval(int performanceGlanceTimeInterval) { + this.performanceGlanceTimeInterval = performanceGlanceTimeInterval; + } + +} diff --git a/src/main/java/it/grid/storm/config/model/v2/InProgressRequestsAgent.java b/src/main/java/it/grid/storm/config/model/v2/InProgressRequestsAgent.java new file mode 100644 index 000000000..ce042b250 --- /dev/null +++ b/src/main/java/it/grid/storm/config/model/v2/InProgressRequestsAgent.java @@ -0,0 +1,60 @@ +package it.grid.storm.config.model.v2; + +import static it.grid.storm.config.ConfigurationDefaults.INPROGRESS_REQUESTS_AGENT_DELAY; +import static it.grid.storm.config.ConfigurationDefaults.INPROGRESS_REQUESTS_AGENT_INTERVAL; +import static it.grid.storm.config.ConfigurationDefaults.INPROGRESS_REQUESTS_AGENT_PTP_EXPIRATION_TIME; + +import com.fasterxml.jackson.databind.PropertyNamingStrategy; +import com.fasterxml.jackson.databind.annotation.JsonNaming; + +@JsonNaming(PropertyNamingStrategy.SnakeCaseStrategy.class) +public class InProgressRequestsAgent { + + private int delay; + private int interval; + private long ptpExpirationTime; + + public InProgressRequestsAgent() { + delay = INPROGRESS_REQUESTS_AGENT_DELAY; + interval = INPROGRESS_REQUESTS_AGENT_INTERVAL; + ptpExpirationTime = INPROGRESS_REQUESTS_AGENT_PTP_EXPIRATION_TIME; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("InProgressRequestsAgent [delay="); + builder.append(delay); + builder.append(", interval="); + builder.append(interval); + builder.append(", ptpExpirationTime="); + builder.append(ptpExpirationTime); + builder.append("]"); + return builder.toString(); + } + + public int getDelay() { + return delay; + } + + public void setDelay(int delay) { + this.delay = delay; + } + + public int getInterval() { + return interval; + } + + public void setInterval(int interval) { + this.interval = interval; + } + + public long getPtpExpirationTime() { + return ptpExpirationTime; + } + + public void setPtpExpirationTime(long ptpExpirationTime) { + this.ptpExpirationTime = ptpExpirationTime; + } + +} diff --git a/src/main/java/it/grid/storm/config/model/v2/OverwriteMode.java b/src/main/java/it/grid/storm/config/model/v2/OverwriteMode.java new file mode 100644 index 000000000..190dee268 --- /dev/null +++ b/src/main/java/it/grid/storm/config/model/v2/OverwriteMode.java @@ -0,0 +1,11 @@ +package it.grid.storm.config.model.v2; + +import com.fasterxml.jackson.annotation.JsonFormat; + +@JsonFormat(shape = JsonFormat.Shape.STRING) +public enum OverwriteMode { + + N, + A, + D; +} diff --git a/src/main/java/it/grid/storm/config/model/v2/PinlifetimeSettings.java b/src/main/java/it/grid/storm/config/model/v2/PinlifetimeSettings.java new file mode 100644 index 000000000..588ac76da --- /dev/null +++ b/src/main/java/it/grid/storm/config/model/v2/PinlifetimeSettings.java @@ -0,0 +1,49 @@ +package it.grid.storm.config.model.v2; + +import static it.grid.storm.config.ConfigurationDefaults.PIN_LIFETIME_DEFAULT; +import static it.grid.storm.config.ConfigurationDefaults.PIN_LIFETIME_MAXIMUM; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.PropertyNamingStrategy; +import com.fasterxml.jackson.databind.annotation.JsonNaming; + +@JsonNaming(PropertyNamingStrategy.SnakeCaseStrategy.class) +public class PinlifetimeSettings { + + @JsonProperty("default") + private long defaultValue; + private long maximum; + + public PinlifetimeSettings() { + defaultValue = PIN_LIFETIME_DEFAULT; + maximum = PIN_LIFETIME_MAXIMUM; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("PinlifetimeSettings [defaultValue="); + builder.append(defaultValue); + builder.append(", maximum="); + builder.append(maximum); + builder.append("]"); + return builder.toString(); + } + + public long getDefaultValue() { + return defaultValue; + } + + public void setDefaultValue(long defaultValue) { + this.defaultValue = defaultValue; + } + + public long getMaximum() { + return maximum; + } + + public void setMaximum(long maximum) { + this.maximum = maximum; + } + +} diff --git a/src/main/java/it/grid/storm/config/model/v2/PtgScheduler.java b/src/main/java/it/grid/storm/config/model/v2/PtgScheduler.java new file mode 100644 index 000000000..8015145f8 --- /dev/null +++ b/src/main/java/it/grid/storm/config/model/v2/PtgScheduler.java @@ -0,0 +1,60 @@ +package it.grid.storm.config.model.v2; + +import static it.grid.storm.config.ConfigurationDefaults.PTG_SCHEDULER_CORE_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.PTG_SCHEDULER_MAX_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.PTG_SCHEDULER_QUEUE_SIZE; + +import com.fasterxml.jackson.databind.PropertyNamingStrategy; +import com.fasterxml.jackson.databind.annotation.JsonNaming; + +@JsonNaming(PropertyNamingStrategy.SnakeCaseStrategy.class) +public class PtgScheduler { + + private int corePoolSize; + private int maxPoolSize; + private int queueSize; + + public PtgScheduler() { + corePoolSize = PTG_SCHEDULER_CORE_POOL_SIZE; + maxPoolSize = PTG_SCHEDULER_MAX_POOL_SIZE; + queueSize = PTG_SCHEDULER_QUEUE_SIZE; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("PtgScheduler [corePoolSize="); + builder.append(corePoolSize); + builder.append(", maxPoolSize="); + builder.append(maxPoolSize); + builder.append(", queueSize="); + builder.append(queueSize); + builder.append("]"); + return builder.toString(); + } + + public int getCorePoolSize() { + return corePoolSize; + } + + public void setCorePoolSize(int corePoolSize) { + this.corePoolSize = corePoolSize; + } + + public int getMaxPoolSize() { + return maxPoolSize; + } + + public void setMaxPoolSize(int maxPoolSize) { + this.maxPoolSize = maxPoolSize; + } + + public int getQueueSize() { + return queueSize; + } + + public void setQueueSize(int queueSize) { + this.queueSize = queueSize; + } + +} diff --git a/src/main/java/it/grid/storm/config/model/v2/PtpScheduler.java b/src/main/java/it/grid/storm/config/model/v2/PtpScheduler.java new file mode 100644 index 000000000..8a2738745 --- /dev/null +++ b/src/main/java/it/grid/storm/config/model/v2/PtpScheduler.java @@ -0,0 +1,59 @@ +package it.grid.storm.config.model.v2; + +import static it.grid.storm.config.ConfigurationDefaults.PTP_SCHEDULER_CORE_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.PTP_SCHEDULER_MAX_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.PTP_SCHEDULER_QUEUE_SIZE; + +import com.fasterxml.jackson.databind.PropertyNamingStrategy; +import com.fasterxml.jackson.databind.annotation.JsonNaming; + +@JsonNaming(PropertyNamingStrategy.SnakeCaseStrategy.class) +public class PtpScheduler { + + private int corePoolSize; + private int maxPoolSize; + private int queueSize; + + public PtpScheduler() { + corePoolSize = PTP_SCHEDULER_CORE_POOL_SIZE; + maxPoolSize = PTP_SCHEDULER_MAX_POOL_SIZE; + queueSize = PTP_SCHEDULER_QUEUE_SIZE; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("PtpScheduler [corePoolSize="); + builder.append(corePoolSize); + builder.append(", maxPoolSize="); + builder.append(maxPoolSize); + builder.append(", queueSize="); + builder.append(queueSize); + builder.append("]"); + return builder.toString(); + } + + public int getCorePoolSize() { + return corePoolSize; + } + + public void setCorePoolSize(int corePoolSize) { + this.corePoolSize = corePoolSize; + } + + public int getMaxPoolSize() { + return maxPoolSize; + } + + public void setMaxPoolSize(int maxPoolSize) { + this.maxPoolSize = maxPoolSize; + } + + public int getQueueSize() { + return queueSize; + } + + public void setQueueSize(int queueSize) { + this.queueSize = queueSize; + } +} diff --git a/src/main/java/it/grid/storm/config/model/v2/QualityLevel.java b/src/main/java/it/grid/storm/config/model/v2/QualityLevel.java new file mode 100644 index 000000000..9c7931dfb --- /dev/null +++ b/src/main/java/it/grid/storm/config/model/v2/QualityLevel.java @@ -0,0 +1,24 @@ +package it.grid.storm.config.model.v2; + +import com.fasterxml.jackson.annotation.JsonFormat; +import com.fasterxml.jackson.annotation.JsonValue; + +@JsonFormat(shape = JsonFormat.Shape.STRING) +public enum QualityLevel { + + DEVELOPMENT("development"), + TESTING("testing"), + PREPRODUCTION("pre-production"), + PRODUCTION("production"); + + private String value; + + private QualityLevel(String value) { + this.value = value.toLowerCase(); + } + + @JsonValue + public String getValue() { + return value; + } +} diff --git a/src/main/java/it/grid/storm/config/model/v2/RequestsPickerAgent.java b/src/main/java/it/grid/storm/config/model/v2/RequestsPickerAgent.java new file mode 100644 index 000000000..26f4dcf8d --- /dev/null +++ b/src/main/java/it/grid/storm/config/model/v2/RequestsPickerAgent.java @@ -0,0 +1,61 @@ +package it.grid.storm.config.model.v2; + +import static it.grid.storm.config.ConfigurationDefaults.REQUESTS_PICKER_AGENT_DELAY; +import static it.grid.storm.config.ConfigurationDefaults.REQUESTS_PICKER_AGENT_INTERVAL; +import static it.grid.storm.config.ConfigurationDefaults.REQUESTS_PICKER_AGENT_MAX_FETCHED_SIZE; + +import com.fasterxml.jackson.databind.PropertyNamingStrategy; +import com.fasterxml.jackson.databind.annotation.JsonNaming; + +@JsonNaming(PropertyNamingStrategy.SnakeCaseStrategy.class) +public class RequestsPickerAgent { + + private int delay; + private int interval; + private int maxFetchedSize; + + public RequestsPickerAgent() { + delay = REQUESTS_PICKER_AGENT_DELAY; + interval = REQUESTS_PICKER_AGENT_INTERVAL; + maxFetchedSize = REQUESTS_PICKER_AGENT_MAX_FETCHED_SIZE; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("RequestsPickerAgent [delay="); + builder.append(delay); + builder.append(", interval="); + builder.append(interval); + builder.append(", maxFetchedSize="); + builder.append(maxFetchedSize); + builder.append("]"); + return builder.toString(); + } + + public int getDelay() { + return delay; + } + + public void setDelay(int delay) { + this.delay = delay; + } + + public int getInterval() { + return interval; + } + + public void setInterval(int interval) { + this.interval = interval; + } + + public int getMaxFetchedSize() { + return maxFetchedSize; + } + + public void setMaxFetchedSize(int maxFetchedSize) { + this.maxFetchedSize = maxFetchedSize; + } + + +} diff --git a/src/main/java/it/grid/storm/config/model/v2/RequestsScheduler.java b/src/main/java/it/grid/storm/config/model/v2/RequestsScheduler.java new file mode 100644 index 000000000..b32f6e9d3 --- /dev/null +++ b/src/main/java/it/grid/storm/config/model/v2/RequestsScheduler.java @@ -0,0 +1,47 @@ +package it.grid.storm.config.model.v2; + +import static it.grid.storm.config.ConfigurationDefaults.REQUESTS_SCHEDULER_CORE_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.REQUESTS_SCHEDULER_MAX_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.REQUESTS_SCHEDULER_QUEUE_SIZE; + +import com.fasterxml.jackson.databind.PropertyNamingStrategy; +import com.fasterxml.jackson.databind.annotation.JsonNaming; + +@JsonNaming(PropertyNamingStrategy.SnakeCaseStrategy.class) +public class RequestsScheduler { + + private int corePoolSize; + private int maxPoolSize; + private int queueSize; + + public RequestsScheduler() { + corePoolSize = REQUESTS_SCHEDULER_CORE_POOL_SIZE; + maxPoolSize = REQUESTS_SCHEDULER_MAX_POOL_SIZE; + queueSize = REQUESTS_SCHEDULER_QUEUE_SIZE; + } + + public int getCorePoolSize() { + return corePoolSize; + } + + public void setCorePoolSize(int corePoolSize) { + this.corePoolSize = corePoolSize; + } + + public int getMaxPoolSize() { + return maxPoolSize; + } + + public void setMaxPoolSize(int maxPoolSize) { + this.maxPoolSize = maxPoolSize; + } + + public int getQueueSize() { + return queueSize; + } + + public void setQueueSize(int queueSize) { + this.queueSize = queueSize; + } + +} diff --git a/src/main/java/it/grid/storm/config/model/v2/RestServer.java b/src/main/java/it/grid/storm/config/model/v2/RestServer.java new file mode 100644 index 000000000..7cb4f08ce --- /dev/null +++ b/src/main/java/it/grid/storm/config/model/v2/RestServer.java @@ -0,0 +1,48 @@ +package it.grid.storm.config.model.v2; + +import static it.grid.storm.config.ConfigurationDefaults.REST_SERVICES_MAX_QUEUE_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.REST_SERVICES_MAX_THREADS; +import static it.grid.storm.config.ConfigurationDefaults.REST_SERVICES_PORT; + +import com.fasterxml.jackson.databind.PropertyNamingStrategy; +import com.fasterxml.jackson.databind.annotation.JsonNaming; + +@JsonNaming(PropertyNamingStrategy.SnakeCaseStrategy.class) +public class RestServer { + + private int port; + private int maxThreads; + private int maxQueueSize; + + public RestServer() { + port = REST_SERVICES_PORT; + maxThreads = REST_SERVICES_MAX_THREADS; + maxQueueSize = REST_SERVICES_MAX_QUEUE_SIZE; + } + + public void setMaxThreads(int maxThreads) { + this.maxThreads = maxThreads > 0 ? maxThreads : REST_SERVICES_MAX_THREADS; + } + + public void setMaxQueueSize(int maxQueueSize) { + this.maxQueueSize = maxQueueSize > 0 ? maxQueueSize : REST_SERVICES_MAX_QUEUE_SIZE; + } + + public int getPort() { + return port; + } + + public void setPort(int port) { + this.port = port; + } + + public int getMaxThreads() { + return maxThreads; + } + + public int getMaxQueueSize() { + return maxQueueSize; + } + + +} diff --git a/src/main/java/it/grid/storm/config/model/v2/SecuritySettings.java b/src/main/java/it/grid/storm/config/model/v2/SecuritySettings.java new file mode 100644 index 000000000..e9f318000 --- /dev/null +++ b/src/main/java/it/grid/storm/config/model/v2/SecuritySettings.java @@ -0,0 +1,36 @@ +package it.grid.storm.config.model.v2; + +import static it.grid.storm.config.ConfigurationDefaults.SECURITY_ENABLED; +import static it.grid.storm.config.ConfigurationDefaults.SECURITY_TOKEN; + +import com.fasterxml.jackson.databind.PropertyNamingStrategy; +import com.fasterxml.jackson.databind.annotation.JsonNaming; + +@JsonNaming(PropertyNamingStrategy.SnakeCaseStrategy.class) +public class SecuritySettings { + + private boolean enabled; + private String token; + + public SecuritySettings() { + enabled = SECURITY_ENABLED; + token = SECURITY_TOKEN; + } + + public boolean isEnabled() { + return enabled; + } + + public void setEnabled(boolean enabled) { + this.enabled = enabled; + } + + public String getToken() { + return token; + } + + public void setToken(String token) { + this.token = token; + } + +} diff --git a/src/main/java/it/grid/storm/config/model/v2/Site.java b/src/main/java/it/grid/storm/config/model/v2/Site.java new file mode 100644 index 000000000..1ff232a7e --- /dev/null +++ b/src/main/java/it/grid/storm/config/model/v2/Site.java @@ -0,0 +1,38 @@ +package it.grid.storm.config.model.v2; + +import static it.grid.storm.config.ConfigurationDefaults.DEFAULT_QUALITY_LEVEL; +import static it.grid.storm.config.ConfigurationDefaults.DEFAULT_SITENAME; + +import com.fasterxml.jackson.databind.PropertyNamingStrategy; +import com.fasterxml.jackson.databind.annotation.JsonNaming; + +@JsonNaming(PropertyNamingStrategy.SnakeCaseStrategy.class) +public class Site { + + private String name; + private QualityLevel qualityLevel; + + public Site() { + + setName(DEFAULT_SITENAME); + setQualityLevel(DEFAULT_QUALITY_LEVEL); + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public QualityLevel getQualityLevel() { + return qualityLevel; + } + + public void setQualityLevel(QualityLevel qualityLevel) { + this.qualityLevel = qualityLevel; + } + + +} diff --git a/src/main/java/it/grid/storm/config/model/v2/SrmEndpoint.java b/src/main/java/it/grid/storm/config/model/v2/SrmEndpoint.java new file mode 100644 index 000000000..4a32d4e9b --- /dev/null +++ b/src/main/java/it/grid/storm/config/model/v2/SrmEndpoint.java @@ -0,0 +1,67 @@ +package it.grid.storm.config.model.v2; + +import java.util.Objects; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.PropertyNamingStrategy; +import com.fasterxml.jackson.databind.annotation.JsonNaming; + +@JsonNaming(PropertyNamingStrategy.SnakeCaseStrategy.class) +public class SrmEndpoint { + + private String host; + private int port; + + @JsonCreator + public SrmEndpoint(@JsonProperty(value = "host", required = true) String host, + @JsonProperty(value = "port", required = false, defaultValue = "8444") int port) { + this.host = host; + this.port = port; + } + + @Override + public int hashCode() { + return Objects.hash(host, port); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + SrmEndpoint other = (SrmEndpoint) obj; + return Objects.equals(host, other.host) && port == other.port; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("Endpoint [host="); + builder.append(host); + builder.append(", port="); + builder.append(port); + builder.append("]"); + return builder.toString(); + } + + public String getHost() { + return host; + } + + public void setHost(String host) { + this.host = host; + } + + public int getPort() { + return port; + } + + public void setPort(int port) { + this.port = port; + } + +} diff --git a/src/main/java/it/grid/storm/config/model/v2/StorageType.java b/src/main/java/it/grid/storm/config/model/v2/StorageType.java new file mode 100644 index 000000000..0a17d6ef6 --- /dev/null +++ b/src/main/java/it/grid/storm/config/model/v2/StorageType.java @@ -0,0 +1,11 @@ +package it.grid.storm.config.model.v2; + +import com.fasterxml.jackson.annotation.JsonFormat; + +@JsonFormat(shape = JsonFormat.Shape.STRING) +public enum StorageType { + + V, + D, + P; +} diff --git a/src/main/java/it/grid/storm/config/model/v2/StormProperties.java b/src/main/java/it/grid/storm/config/model/v2/StormProperties.java new file mode 100644 index 000000000..908eb591f --- /dev/null +++ b/src/main/java/it/grid/storm/config/model/v2/StormProperties.java @@ -0,0 +1,390 @@ +package it.grid.storm.config.model.v2; + +import static it.grid.storm.config.ConfigurationDefaults.DEFAULT_SRM_PORT; +import static it.grid.storm.config.ConfigurationDefaults.GPFS_QUOTA_REFRESH_PERIOD; +import static it.grid.storm.config.ConfigurationDefaults.HTTP_TURL_PREFIX; +import static it.grid.storm.config.ConfigurationDefaults.MAX_LOOP; +import static it.grid.storm.config.ConfigurationDefaults.PING_VALUES_PROPERTIES_FILENAME; +import static it.grid.storm.config.ConfigurationDefaults.PTG_SKIP_ACL_SETUP; +import static it.grid.storm.config.ConfigurationDefaults.SANITY_CHECK_ENABLED; +import static it.grid.storm.config.ConfigurationDefaults.SERVER_POOL_STATUS_CHECK_TIMEOUT; + +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.List; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.PropertyNamingStrategy; +import com.fasterxml.jackson.databind.annotation.JsonNaming; + +import jersey.repackaged.com.google.common.collect.Lists; + +@JsonNaming(PropertyNamingStrategy.SnakeCaseStrategy.class) +public class StormProperties { + + public static final String VERSION = "2.0"; + public static final String UNRECOGNIZED_VERSION = "unknown"; + + private String version; + private List srmEndpoints; + private DatabaseConnection db; + private RestServer rest; + private XmlRpcServer xmlrpc; + private SecuritySettings security; + private DiskUsageService du; + private InProgressRequestsAgent inprogressRequestsAgent; + private ExpiredSpacesAgent expiredSpacesAgent; + private CompletedRequestsAgent completedRequestsAgent; + private RequestsPickerAgent requestsPickerAgent; + private RequestsScheduler requestsScheduler; + private PtpScheduler ptpScheduler; + private PtgScheduler ptgScheduler; + private BolScheduler bolScheduler; + private boolean sanityChecksEnabled; + private ExtraslashesSettings extraslashes; + private SynchLsSettings synchLs; + private PinlifetimeSettings pinlifetime; + private boolean skipPtgAclSetup; + private AdvancedFileSettings files; + private AdvancedDirectorySettings directories; + private HearthbeatSettings hearthbeat; + private int infoQuotaRefreshPeriod; + private String httpTurlPrefix; + private long serverPoolStatusCheckTimeout; + private int abortMaxloop; + private String pingPropertiesFilename; + + private Site site; + + @JsonCreator + public StormProperties(@JsonProperty(value = "version", required = true) String version) + throws UnknownHostException { + this.version = version; + srmEndpoints = Lists + .newArrayList(new SrmEndpoint(InetAddress.getLocalHost().getHostName(), DEFAULT_SRM_PORT)); + db = new DatabaseConnection(); + rest = new RestServer(); + xmlrpc = new XmlRpcServer(); + security = new SecuritySettings(); + du = new DiskUsageService(); + inprogressRequestsAgent = new InProgressRequestsAgent(); + expiredSpacesAgent = new ExpiredSpacesAgent(); + completedRequestsAgent = new CompletedRequestsAgent(); + requestsPickerAgent = new RequestsPickerAgent(); + requestsScheduler = new RequestsScheduler(); + ptpScheduler = new PtpScheduler(); + ptgScheduler = new PtgScheduler(); + bolScheduler = new BolScheduler(); + sanityChecksEnabled = SANITY_CHECK_ENABLED; + extraslashes = new ExtraslashesSettings(); + synchLs = new SynchLsSettings(); + pinlifetime = new PinlifetimeSettings(); + skipPtgAclSetup = PTG_SKIP_ACL_SETUP; + files = new AdvancedFileSettings(); + directories = new AdvancedDirectorySettings(); + hearthbeat = new HearthbeatSettings(); + infoQuotaRefreshPeriod = GPFS_QUOTA_REFRESH_PERIOD; + httpTurlPrefix = HTTP_TURL_PREFIX; + serverPoolStatusCheckTimeout = SERVER_POOL_STATUS_CHECK_TIMEOUT; + abortMaxloop = MAX_LOOP; + pingPropertiesFilename = PING_VALUES_PROPERTIES_FILENAME; + setSite(new Site()); + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("StormProperties [version="); + builder.append(version); + builder.append(", srmEndpoints="); + builder.append(srmEndpoints); + builder.append(", db="); + builder.append(db); + builder.append(", rest="); + builder.append(rest); + builder.append(", xmlrpc="); + builder.append(xmlrpc); + builder.append(", security="); + builder.append(security); + builder.append(", du="); + builder.append(du); + builder.append(", inprogressRequestsAgent="); + builder.append(inprogressRequestsAgent); + builder.append(", expiredSpacesAgent="); + builder.append(expiredSpacesAgent); + builder.append(", completedRequestsAgent="); + builder.append(completedRequestsAgent); + builder.append(", requestsPickerAgent="); + builder.append(requestsPickerAgent); + builder.append(", requestsScheduler="); + builder.append(requestsScheduler); + builder.append(", ptpScheduler="); + builder.append(ptpScheduler); + builder.append(", ptgScheduler="); + builder.append(ptgScheduler); + builder.append(", bolScheduler="); + builder.append(bolScheduler); + builder.append(", sanityChecksEnabled="); + builder.append(sanityChecksEnabled); + builder.append(", extraslashes="); + builder.append(extraslashes); + builder.append(", synchLs="); + builder.append(synchLs); + builder.append(", pinlifetime="); + builder.append(pinlifetime); + builder.append(", skipPtgAclSetup="); + builder.append(skipPtgAclSetup); + builder.append(", files="); + builder.append(files); + builder.append(", directories="); + builder.append(directories); + builder.append(", hearthbeat="); + builder.append(hearthbeat); + builder.append(", infoQuotaRefreshPeriod="); + builder.append(infoQuotaRefreshPeriod); + builder.append(", httpTurlPrefix="); + builder.append(httpTurlPrefix); + builder.append(", serverPoolStatusCheckTimeout="); + builder.append(serverPoolStatusCheckTimeout); + builder.append(", abortMaxloop="); + builder.append(abortMaxloop); + builder.append(", pingPropertiesFilename="); + builder.append(pingPropertiesFilename); + builder.append("]"); + return builder.toString(); + } + + public String getVersion() { + return version; + } + + public void setVersion(String version) { + this.version = version; + } + + public List getSrmEndpoints() { + return srmEndpoints; + } + + public void setSrmEndpoints(List srmEndpoints) { + this.srmEndpoints = srmEndpoints; + } + + public DatabaseConnection getDb() { + return db; + } + + public void setDb(DatabaseConnection db) { + this.db = db; + } + + public RestServer getRest() { + return rest; + } + + public void setRest(RestServer rest) { + this.rest = rest; + } + + public XmlRpcServer getXmlrpc() { + return xmlrpc; + } + + public void setXmlrpc(XmlRpcServer xmlrpc) { + this.xmlrpc = xmlrpc; + } + + public SecuritySettings getSecurity() { + return security; + } + + public void setSecurity(SecuritySettings security) { + this.security = security; + } + + public DiskUsageService getDu() { + return du; + } + + public void setDu(DiskUsageService du) { + this.du = du; + } + + public InProgressRequestsAgent getInprogressRequestsAgent() { + return inprogressRequestsAgent; + } + + public void setInprogressRequestsAgent(InProgressRequestsAgent inprogressRequestsAgent) { + this.inprogressRequestsAgent = inprogressRequestsAgent; + } + + public ExpiredSpacesAgent getExpiredSpacesAgent() { + return expiredSpacesAgent; + } + + public void setExpiredSpacesAgent(ExpiredSpacesAgent expiredSpacesAgent) { + this.expiredSpacesAgent = expiredSpacesAgent; + } + + public CompletedRequestsAgent getCompletedRequestsAgent() { + return completedRequestsAgent; + } + + public void setCompletedRequestsAgent(CompletedRequestsAgent completedRequestsAgent) { + this.completedRequestsAgent = completedRequestsAgent; + } + + public RequestsPickerAgent getRequestsPickerAgent() { + return requestsPickerAgent; + } + + public void setRequestsPickerAgent(RequestsPickerAgent requestsPickerAgent) { + this.requestsPickerAgent = requestsPickerAgent; + } + + public RequestsScheduler getRequestsScheduler() { + return requestsScheduler; + } + + public void setRequestsScheduler(RequestsScheduler requestsScheduler) { + this.requestsScheduler = requestsScheduler; + } + + public PtpScheduler getPtpScheduler() { + return ptpScheduler; + } + + public void setPtpScheduler(PtpScheduler ptpScheduler) { + this.ptpScheduler = ptpScheduler; + } + + public PtgScheduler getPtgScheduler() { + return ptgScheduler; + } + + public void setPtgScheduler(PtgScheduler ptgScheduler) { + this.ptgScheduler = ptgScheduler; + } + + public BolScheduler getBolScheduler() { + return bolScheduler; + } + + public void setBolScheduler(BolScheduler bolScheduler) { + this.bolScheduler = bolScheduler; + } + + public boolean isSanityChecksEnabled() { + return sanityChecksEnabled; + } + + public void setSanityChecksEnabled(boolean sanityChecksEnabled) { + this.sanityChecksEnabled = sanityChecksEnabled; + } + + public ExtraslashesSettings getExtraslashes() { + return extraslashes; + } + + public void setExtraslashes(ExtraslashesSettings extraslashes) { + this.extraslashes = extraslashes; + } + + public SynchLsSettings getSynchLs() { + return synchLs; + } + + public void setSynchLs(SynchLsSettings synchLs) { + this.synchLs = synchLs; + } + + public PinlifetimeSettings getPinlifetime() { + return pinlifetime; + } + + public void setPinlifetime(PinlifetimeSettings pinlifetime) { + this.pinlifetime = pinlifetime; + } + + public boolean isSkipPtgAclSetup() { + return skipPtgAclSetup; + } + + public void setSkipPtgAclSetup(boolean skipPtgAclSetup) { + this.skipPtgAclSetup = skipPtgAclSetup; + } + + public AdvancedFileSettings getFiles() { + return files; + } + + public void setFiles(AdvancedFileSettings files) { + this.files = files; + } + + public AdvancedDirectorySettings getDirectories() { + return directories; + } + + public void setDirectories(AdvancedDirectorySettings directories) { + this.directories = directories; + } + + public HearthbeatSettings getHearthbeat() { + return hearthbeat; + } + + public void setHearthbeat(HearthbeatSettings hearthbeat) { + this.hearthbeat = hearthbeat; + } + + public int getInfoQuotaRefreshPeriod() { + return infoQuotaRefreshPeriod; + } + + public void setInfoQuotaRefreshPeriod(int infoQuotaRefreshPeriod) { + this.infoQuotaRefreshPeriod = infoQuotaRefreshPeriod; + } + + public String getHttpTurlPrefix() { + return httpTurlPrefix; + } + + public void setHttpTurlPrefix(String httpTurlPrefix) { + this.httpTurlPrefix = httpTurlPrefix; + } + + public long getServerPoolStatusCheckTimeout() { + return serverPoolStatusCheckTimeout; + } + + public void setServerPoolStatusCheckTimeout(long serverPoolStatusCheckTimeout) { + this.serverPoolStatusCheckTimeout = serverPoolStatusCheckTimeout; + } + + public int getAbortMaxloop() { + return abortMaxloop; + } + + public void setAbortMaxloop(int abortMaxloop) { + this.abortMaxloop = abortMaxloop; + } + + public String getPingPropertiesFilename() { + return pingPropertiesFilename; + } + + public void setPingPropertiesFilename(String pingPropertiesFilename) { + this.pingPropertiesFilename = pingPropertiesFilename; + } + + public Site getSite() { + return site; + } + + public void setSite(Site site) { + this.site = site; + } + +} diff --git a/src/main/java/it/grid/storm/config/model/v2/SynchLsSettings.java b/src/main/java/it/grid/storm/config/model/v2/SynchLsSettings.java new file mode 100644 index 000000000..f72e4073a --- /dev/null +++ b/src/main/java/it/grid/storm/config/model/v2/SynchLsSettings.java @@ -0,0 +1,73 @@ +package it.grid.storm.config.model.v2; + +import static it.grid.storm.config.ConfigurationDefaults.LS_DEFAULT_ALL_LEVEL_RECURSIVE; +import static it.grid.storm.config.ConfigurationDefaults.LS_DEFAULT_NUM_OF_LEVELS; +import static it.grid.storm.config.ConfigurationDefaults.LS_DEFAULT_OFFSET; +import static it.grid.storm.config.ConfigurationDefaults.LS_MAX_NUMBER_OF_ENTRY; + +import com.fasterxml.jackson.databind.PropertyNamingStrategy; +import com.fasterxml.jackson.databind.annotation.JsonNaming; + +@JsonNaming(PropertyNamingStrategy.SnakeCaseStrategy.class) +public class SynchLsSettings { + + private int maxEntries; + private boolean defaultAllLevelRecursive; + private short defaultNumLevels; + private short defaultOffset; + + public SynchLsSettings() { + maxEntries = LS_MAX_NUMBER_OF_ENTRY; + defaultAllLevelRecursive = LS_DEFAULT_ALL_LEVEL_RECURSIVE; + defaultNumLevels = LS_DEFAULT_NUM_OF_LEVELS; + defaultOffset = LS_DEFAULT_OFFSET; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("SynchLsSettings [maxEntries="); + builder.append(maxEntries); + builder.append(", defaultAllLevelRecursive="); + builder.append(defaultAllLevelRecursive); + builder.append(", defaultNumLevels="); + builder.append(defaultNumLevels); + builder.append(", defaultOffset="); + builder.append(defaultOffset); + builder.append("]"); + return builder.toString(); + } + + public int getMaxEntries() { + return maxEntries; + } + + public void setMaxEntries(int maxEntries) { + this.maxEntries = maxEntries; + } + + public boolean isDefaultAllLevelRecursive() { + return defaultAllLevelRecursive; + } + + public void setDefaultAllLevelRecursive(boolean defaultAllLevelRecursive) { + this.defaultAllLevelRecursive = defaultAllLevelRecursive; + } + + public short getDefaultNumLevels() { + return defaultNumLevels; + } + + public void setDefaultNumLevels(short defaultNumLevels) { + this.defaultNumLevels = defaultNumLevels; + } + + public short getDefaultOffset() { + return defaultOffset; + } + + public void setDefaultOffset(short defaultOffset) { + this.defaultOffset = defaultOffset; + } + +} diff --git a/src/main/java/it/grid/storm/config/model/v2/XmlRpcServer.java b/src/main/java/it/grid/storm/config/model/v2/XmlRpcServer.java new file mode 100644 index 000000000..f704a5157 --- /dev/null +++ b/src/main/java/it/grid/storm/config/model/v2/XmlRpcServer.java @@ -0,0 +1,59 @@ +package it.grid.storm.config.model.v2; + +import static it.grid.storm.config.ConfigurationDefaults.XMLRPC_MAX_QUEUE_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.XMLRPC_MAX_THREADS; +import static it.grid.storm.config.ConfigurationDefaults.XMLRPC_SERVER_PORT; + +import com.fasterxml.jackson.databind.PropertyNamingStrategy; +import com.fasterxml.jackson.databind.annotation.JsonNaming; + +@JsonNaming(PropertyNamingStrategy.SnakeCaseStrategy.class) +public class XmlRpcServer { + + private int port; + private int maxThreads; + private int maxQueueSize; + + public XmlRpcServer() { + port = XMLRPC_SERVER_PORT; + maxThreads = XMLRPC_MAX_THREADS; + maxQueueSize = XMLRPC_MAX_QUEUE_SIZE; + } + + public void setMaxThreads(int maxThreads) { + this.maxThreads = maxThreads > 0 ? maxThreads : XMLRPC_MAX_THREADS; + } + + public void setMaxQueueSize(int maxQueueSize) { + this.maxQueueSize = maxQueueSize > 0 ? maxQueueSize : XMLRPC_MAX_QUEUE_SIZE; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("XmlRpcServer [port="); + builder.append(port); + builder.append(", maxThreads="); + builder.append(maxThreads); + builder.append(", maxQueueSize="); + builder.append(maxQueueSize); + builder.append("]"); + return builder.toString(); + } + + public int getPort() { + return port; + } + + public void setPort(int port) { + this.port = port; + } + + public int getMaxThreads() { + return maxThreads; + } + + public int getMaxQueueSize() { + return maxQueueSize; + } +} diff --git a/src/main/java/it/grid/storm/filesystem/AclLockPool.java b/src/main/java/it/grid/storm/filesystem/AclLockPool.java index 8f1fbe1b1..a4a15050f 100644 --- a/src/main/java/it/grid/storm/filesystem/AclLockPool.java +++ b/src/main/java/it/grid/storm/filesystem/AclLockPool.java @@ -31,8 +31,6 @@ package it.grid.storm.filesystem; -import it.grid.storm.filesystem.AclLockPoolElement; - import java.util.Map; import java.util.concurrent.ConcurrentHashMap; diff --git a/src/main/java/it/grid/storm/filesystem/AclLockPoolElement.java b/src/main/java/it/grid/storm/filesystem/AclLockPoolElement.java index a2259ea70..24b7ae0ba 100644 --- a/src/main/java/it/grid/storm/filesystem/AclLockPoolElement.java +++ b/src/main/java/it/grid/storm/filesystem/AclLockPoolElement.java @@ -38,57 +38,60 @@ * Usage-counted semaphore object. * *

- * Each {@link #incrementUsageCountAndReturnSelf()} request increments the usage - * counter, and each {@link #decrementUsageCountAndGetIt()} request decrements - * it. + * Each {@link #incrementUsageCountAndReturnSelf()} request increments the usage counter, and each + * {@link #decrementUsageCountAndGetIt()} request decrements it. * * @author Riccardo Murri * @version $Revision: 1.5 $ */ class AclLockPoolElement extends Semaphore { - // ---- constructors ---- + // ---- constructors ---- - /** - * Default constructor. The semaphore is initialized for allowing only 1 - * permit at a time (thus serializing accesses through the acquire() and - * release() calls), and with the default fairness setting. The usage count is - * initialized to 0. - * - * @see java.util.concurrent.AtomicInteger; - * @see java.util.concurrent.Semaphore; - */ - public AclLockPoolElement() { + /** + * + */ + private static final long serialVersionUID = 1L; - super(1); - usageCount = new AtomicInteger(); - } + /** + * Default constructor. The semaphore is initialized for allowing only 1 permit at a time (thus + * serializing accesses through the acquire() and release() calls), and with the default fairness + * setting. The usage count is initialized to 0. + * + * @see java.util.concurrent.AtomicInteger; + * @see java.util.concurrent.Semaphore; + */ + public AclLockPoolElement() { - // --- public methods --- + super(1); + usageCount = new AtomicInteger(); + } - /** - * Return the lock object associated with the given file name, or create a new - * one if no mapping for the given path name is already in this map. - */ - public void incrementUsageCount() { + // --- public methods --- - usageCount.incrementAndGet(); - } + /** + * Return the lock object associated with the given file name, or create a new one if no mapping + * for the given path name is already in this map. + */ + public void incrementUsageCount() { - /** Return the stored usage count. */ - public int getUsageCount() { + usageCount.incrementAndGet(); + } - return usageCount.intValue(); - } + /** Return the stored usage count. */ + public int getUsageCount() { - /** Decrement the stored usage count. */ - public int decrementUsageCountAndGetIt() { + return usageCount.intValue(); + } - return usageCount.decrementAndGet(); - } + /** Decrement the stored usage count. */ + public int decrementUsageCountAndGetIt() { - // --- private instance variables --- // + return usageCount.decrementAndGet(); + } - /** Usage counter. */ - private final AtomicInteger usageCount; + // --- private instance variables --- // + + /** Usage counter. */ + private final AtomicInteger usageCount; } diff --git a/src/main/java/it/grid/storm/filesystem/CannotGiveAway.java b/src/main/java/it/grid/storm/filesystem/CannotGiveAway.java deleted file mode 100644 index ea811defd..000000000 --- a/src/main/java/it/grid/storm/filesystem/CannotGiveAway.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/** - * @file CannotGiveAway.java - * @author Riccardo Murri - * - * Source file for class CannotGiveAway - * - */ -/* - * Copyright (c) 2006, Riccardo Murri for the - * EGRID/INFN joint project StoRM. - * - * You may copy, distribute and modify this file under the terms of the - * LICENSE.txt file at the root of the StoRM backend source tree. - * - * $Id: CannotGiveAway.java,v 1.1 2006/03/31 13:35:01 rmurri Exp $ - */ - -package it.grid.storm.filesystem; - -/** - * Thrown when the StoRM process has insufficient privileges to change ownership - * of a file. - * - * Ownership change is a privileged operation on most POSIX systems, which - * usually requires "root" privileges. - * - * @author Riccardo Murri - * @version $Revision: 1.1 $ - */ -public class CannotGiveAway extends FilesystemError { - - public CannotGiveAway(final String msg) { - - super(msg); - } -} diff --git a/src/main/java/it/grid/storm/filesystem/FSException.java b/src/main/java/it/grid/storm/filesystem/FSException.java index d46bb18d5..6e96acceb 100644 --- a/src/main/java/it/grid/storm/filesystem/FSException.java +++ b/src/main/java/it/grid/storm/filesystem/FSException.java @@ -19,24 +19,29 @@ public class FSException extends Exception { - public FSException() { + /** + * + */ + private static final long serialVersionUID = 1L; - super(); - } + public FSException() { - public FSException(String message) { + super(); + } - super(message); - } + public FSException(String message) { - public FSException(String message, Throwable cause) { + super(message); + } - super(message, cause); - } + public FSException(String message, Throwable cause) { - public FSException(Throwable cause) { + super(message, cause); + } - super(cause); - } + public FSException(Throwable cause) { + + super(cause); + } } diff --git a/src/main/java/it/grid/storm/filesystem/FileSystemCheckerException.java b/src/main/java/it/grid/storm/filesystem/FileSystemCheckerException.java index abe4d8514..dda81d33c 100644 --- a/src/main/java/it/grid/storm/filesystem/FileSystemCheckerException.java +++ b/src/main/java/it/grid/storm/filesystem/FileSystemCheckerException.java @@ -22,9 +22,14 @@ */ public class FileSystemCheckerException extends Exception { - public FileSystemCheckerException(String message) { + /** + * + */ + private static final long serialVersionUID = 1L; - super(message); + public FileSystemCheckerException(String message) { - } + super(message); + + } } diff --git a/src/main/java/it/grid/storm/filesystem/FileSystemCheckerFactory.java b/src/main/java/it/grid/storm/filesystem/FileSystemCheckerFactory.java deleted file mode 100644 index ebef734b7..000000000 --- a/src/main/java/it/grid/storm/filesystem/FileSystemCheckerFactory.java +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS - * OF ANY KIND, either express or implied. See the License for the specific - * language governing permissions and limitations under the License. - */ -package it.grid.storm.filesystem; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * @author Michele Dibenedetto - */ -public class FileSystemCheckerFactory { - - private static Logger log = LoggerFactory - .getLogger(FileSystemCheckerFactory.class); - - public enum FileSystemCheckerType { - Mtab, Mounts - } - - private final FileSystemCheckerType chosenType; - private static final FileSystemCheckerType defaultType = FileSystemCheckerType.Mtab; - - private static FileSystemCheckerFactory instance = null; - - /** - * @param type - */ - private FileSystemCheckerFactory(FileSystemCheckerType type) { - - chosenType = type; - } - - /** - * Singleton getter method - * - * @return the class instance already created by a call to the init method, - * creates a new one using the defaultType otherwise - */ - public static FileSystemCheckerFactory getInstance() { - - if (instance == null) { - log - .info("FileSystemCheckerFactory not explicitaly initialized, " - + "using default checker type :{}", defaultType); - init(defaultType); - } - return instance; - } - - /** - * Initializes the class by creating the singleton instance if not already - * done, does nothing if it already exists and has the chosenType is the same - * as the provided FileSystemCheckerType throws an IllegalStateException - * otherwise - * - * @param type - * @throws IllegalArgumentException - * if type is null - * @throws IllegalStateException - * if class already initialized with a different - * FileSystemCheckerType - */ - public static void init(FileSystemCheckerType type) - throws IllegalArgumentException, IllegalStateException { - - if (type == null) { - log - .error("Unable to init FileSystemCheckerFactory. Received null FileSystemCheckerType parameter!"); - throw new IllegalArgumentException( - "Received null FileSystemCheckerType parameter!"); - } - if (instance == null) { - instance = new FileSystemCheckerFactory(type); - } else { - if (!instance.chosenType.equals(type)) { - log - .warn("FileSystemCheckerFactory already initialized for {}. " - + "Cannot initialize it again for {}.", - instance.chosenType, - type); - throw new IllegalStateException( - "Asked to initialize the already initialized FileSystemCheckerFactory " - + "with FileSystemCheckerType " + type - + ". Current FileSystemCheckerType is " + instance.chosenType); - } else { - log - .info("Asked to re-initialize the already initialized FileSystemCheckerFactory, nothing to do"); - } - } - } - - /** - * Creates the proper FileSystemChecker implementation object using the - * chosenType available attribute - * - * @return - */ - public FileSystemChecker createFileSystemChecker() - throws IllegalStateException, FileSystemCheckerException { - - switch (this.chosenType) { - case Mtab: - return FileSystemCheckerMtabMonolithic.getInstance(); - case Mounts: - return FileSystemCheckerMountsMonolithic.getInstance(); - default: - log - .error("No correct FileSystemChecker setted : " - + this.chosenType - + " unable to create the FileSystemChecker. Available FileSystemCheckerType : " - + FileSystemCheckerFactory.FileSystemCheckerType.values() - + " Please contact StoRM developers"); - throw new IllegalStateException( - "No correct FileSystemCheckerType setted : " + this.chosenType - + ". Available FileSystemCheckerType : " - + FileSystemCheckerFactory.FileSystemCheckerType.values() - + " Please contact StoRM developers"); - } - } - -} diff --git a/src/main/java/it/grid/storm/filesystem/FileSystemCheckerFromFile.java b/src/main/java/it/grid/storm/filesystem/FileSystemCheckerFromFile.java index c62694507..09fffd097 100644 --- a/src/main/java/it/grid/storm/filesystem/FileSystemCheckerFromFile.java +++ b/src/main/java/it/grid/storm/filesystem/FileSystemCheckerFromFile.java @@ -32,163 +32,154 @@ */ abstract class FileSystemCheckerFromFile implements FileSystemChecker { - private final Logger log; - private List GPFSMountPoints = null; - private long initInstant = 0L; - private static final String GPFS_FILESYSTEM_NAME = "gpfs"; - - protected FileSystemCheckerFromFile(Logger log) { - this.log = log; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.filesystem.FileSystemChecker#isGPFS(java.io.File) - */ - public boolean isGPFS(File file) throws IllegalArgumentException, - FileSystemCheckerException { - - if (file == null) { - log.error("IsGPFS method invoked with null File parameter!"); - throw new IllegalArgumentException("Provided null File argument"); - } - tryInit(); - return this.evaluate(file.getAbsolutePath()); - } - - protected synchronized void tryInit() throws FileSystemCheckerException { - - if (this.refreshNeeded()) { - this.init(); - } - } - - /** - * Checks is /etc/mtab file has been modified since last initialization - * - * @return true if a call of init() method is needed - */ - private boolean refreshNeeded() { - - boolean response = false; - if (initInstant == 0L - || initInstant < new File(getFilePath()).lastModified()) { - response = true; - } - return response; - } - - /** - * Initializes the object setting /etc/mtab parsing instant and the list of - * GPFS mount points - */ - private void init() throws FileSystemCheckerException { - - this.initInstant = Calendar.getInstance().getTimeInMillis(); - this.GPFSMountPoints = listGPFSMountPoints(); - } - - /** - * Checks if file path filePath belongs to one of the stored GPFS mount points - * - * @param filePath - * the file path to be checked - * - * @return true if file path filePath is on a GPFS mount points - */ - private synchronized boolean evaluate(String filePath) { - - boolean response = false; - for (String GPFSMountPoint : this.GPFSMountPoints) { - if (filePath.startsWith(GPFSMountPoint)) { - response = true; - break; - } - } - return response; - } - - /** - * Parse /etc/mtab file and retrieves all GPFS mount points - * - * @return a list of GPFS mount points - */ - private List listGPFSMountPoints() throws FileSystemCheckerException { - - LinkedList mountPointList = new LinkedList(); - BufferedReader mtab; - try { - mtab = new BufferedReader(new FileReader(getFilePath())); - } catch (FileNotFoundException e) { - log.error(e.getMessage(),e); - - throw new FileSystemCheckerException( - "Error while trying to create a reader for mtab file at " - + getFilePath() + ". FileNotFoundException : " + e.getMessage()); - } - String line; - try { - while ((line = mtab.readLine()) != null) { - if (this.skipLine(line)) { - continue; - } - LinkedList elementsList = tokenizeLine(line); - if (elementsList.get(getFsNameIndex()).equals(GPFS_FILESYSTEM_NAME)) { - mountPointList.add(elementsList.get(getMountPointIndex())); - } - } - } catch (IOException e) { - log.error(e.getMessage(), e); - throw new FileSystemCheckerException( - "Error while trying to read mtab file at " + getFilePath() - + ". IOException : " + e.getMessage()); - } - return mountPointList; - } - - /** - * Provides the path of file containing GPFS mount points - * - * @return the path of a file containing GPFS mount points - */ - protected abstract String getFilePath(); - - /** - * Provides the index of file system name in the list provided by method - * tokenizeLine - * - * @return the index of file system name in a tokenized list - */ - protected abstract int getFsNameIndex(); - - /** - * Provides the index of file mount point in the list provided by method - * tokenizeLine - * - * @return the index of file mount point in a tokenized list - */ - protected abstract int getMountPointIndex(); - - /** - * Tokenizes a line putting in a list all the strings from the line related to - * mounted partitions - * - * @param line - * a line from mounted partitions file containing informations about - * mounted partition - * @return a list of strings containing space-free informations about mounted - * partitions - */ - protected abstract LinkedList tokenizeLine(String line); - - /** - * Checks if the provided line has to be skipped because contains information - * not concerning to mounted partition - * - * @param a - * string line from mounted partitions file - * @return true if the line has to be skipped, true otherwise - */ - protected abstract boolean skipLine(String line); -} \ No newline at end of file + private final Logger log; + private List GPFSMountPoints = null; + private long initInstant = 0L; + private static final String GPFS_FILESYSTEM_NAME = "gpfs"; + + protected FileSystemCheckerFromFile(Logger log) { + this.log = log; + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.filesystem.FileSystemChecker#isGPFS(java.io.File) + */ + public boolean isGPFS(File file) throws IllegalArgumentException, FileSystemCheckerException { + + if (file == null) { + log.error("IsGPFS method invoked with null File parameter!"); + throw new IllegalArgumentException("Provided null File argument"); + } + tryInit(); + return this.evaluate(file.getAbsolutePath()); + } + + protected synchronized void tryInit() throws FileSystemCheckerException { + + if (this.refreshNeeded()) { + this.init(); + } + } + + /** + * Checks is /etc/mtab file has been modified since last initialization + * + * @return true if a call of init() method is needed + */ + private boolean refreshNeeded() { + + boolean response = false; + if (initInstant == 0L || initInstant < new File(getFilePath()).lastModified()) { + response = true; + } + return response; + } + + /** + * Initializes the object setting /etc/mtab parsing instant and the list of GPFS mount points + */ + private void init() throws FileSystemCheckerException { + + this.initInstant = Calendar.getInstance().getTimeInMillis(); + this.GPFSMountPoints = listGPFSMountPoints(); + } + + /** + * Checks if file path filePath belongs to one of the stored GPFS mount points + * + * @param filePath the file path to be checked + * + * @return true if file path filePath is on a GPFS mount points + */ + private synchronized boolean evaluate(String filePath) { + + boolean response = false; + for (String GPFSMountPoint : this.GPFSMountPoints) { + if (filePath.startsWith(GPFSMountPoint)) { + response = true; + break; + } + } + return response; + } + + /** + * Parse /etc/mtab file and retrieves all GPFS mount points + * + * @return a list of GPFS mount points + */ + private List listGPFSMountPoints() throws FileSystemCheckerException { + + LinkedList mountPointList = new LinkedList(); + BufferedReader mtab = null; + try { + mtab = new BufferedReader(new FileReader(getFilePath())); + String line; + while ((line = mtab.readLine()) != null) { + if (this.skipLine(line)) { + continue; + } + LinkedList elementsList = tokenizeLine(line); + if (elementsList.get(getFsNameIndex()).equals(GPFS_FILESYSTEM_NAME)) { + mountPointList.add(elementsList.get(getMountPointIndex())); + } + } + } catch (FileNotFoundException e) { + log.error(e.getMessage(), e); + + throw new FileSystemCheckerException("Error while trying to create a reader for mtab file at " + + getFilePath() + ". FileNotFoundException : " + e.getMessage()); + } catch (IOException e) { + log.error(e.getMessage(), e); + throw new FileSystemCheckerException("Error while trying to read mtab file at " + + getFilePath() + ". IOException : " + e.getMessage()); + } finally { + try { + mtab.close(); + } catch (IOException e) { + e.printStackTrace(); + } + } + return mountPointList; + } + + /** + * Provides the path of file containing GPFS mount points + * + * @return the path of a file containing GPFS mount points + */ + protected abstract String getFilePath(); + + /** + * Provides the index of file system name in the list provided by method tokenizeLine + * + * @return the index of file system name in a tokenized list + */ + protected abstract int getFsNameIndex(); + + /** + * Provides the index of file mount point in the list provided by method tokenizeLine + * + * @return the index of file mount point in a tokenized list + */ + protected abstract int getMountPointIndex(); + + /** + * Tokenizes a line putting in a list all the strings from the line related to mounted partitions + * + * @param line a line from mounted partitions file containing informations about mounted partition + * @return a list of strings containing space-free informations about mounted partitions + */ + protected abstract LinkedList tokenizeLine(String line); + + /** + * Checks if the provided line has to be skipped because contains information not concerning to + * mounted partition + * + * @param a string line from mounted partitions file + * @return true if the line has to be skipped, true otherwise + */ + protected abstract boolean skipLine(String line); +} diff --git a/src/main/java/it/grid/storm/filesystem/FileSystemCheckerMounts.java b/src/main/java/it/grid/storm/filesystem/FileSystemCheckerMounts.java deleted file mode 100644 index 769ea1e5b..000000000 --- a/src/main/java/it/grid/storm/filesystem/FileSystemCheckerMounts.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package it.grid.storm.filesystem; - -import java.util.Arrays; -import java.util.LinkedList; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * @author Michele Dibenedetto - * - */ -public class FileSystemCheckerMounts extends FileSystemCheckerFromFile { - - private static final Logger log = LoggerFactory - .getLogger(FileSystemCheckerMounts.class); - private static final String MOUNTS_FILE_PATH = "/proc/mounts"; - - private static final FileSystemCheckerMounts instance = new FileSystemCheckerMounts(); - - /** - * Singleton private constructor - */ - private FileSystemCheckerMounts() { - - super(log); - } - - /** - * Singleton instance getter. initialize the instance if needed - * - * @return singleton instance - */ - public static FileSystemCheckerMounts getInstance() - throws FileSystemCheckerException { - - instance.tryInit(); - return instance; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.filesystem.FileSystemCheckerFromFile#getFilePath() - */ - @Override - protected String getFilePath() { - - return MOUNTS_FILE_PATH; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.filesystem.FileSystemCheckerFromFile#getFsNameIndex() - */ - @Override - protected int getFsNameIndex() { - - return 2; - } - - /* - * (non-Javadoc) - * - * @see - * it.grid.storm.filesystem.FileSystemCheckerFromFile#getMountPointIndex() - */ - @Override - protected int getMountPointIndex() { - - return 1; - } - - /* - * (non-Javadoc) - * - * @see - * it.grid.storm.filesystem.FileSystemCheckerFromFile#skipLine(java.lang.String - * ) - */ - @Override - protected boolean skipLine(String line) { - - if (line.startsWith("#") || !line.startsWith("/dev/")) { - return true; - } - return false; - } - - /* - * (non-Javadoc) - * - * @see - * it.grid.storm.filesystem.FileSystemCheckerFromFile#tokenizeLine(java.lang - * .String) - */ - @Override - protected LinkedList tokenizeLine(String line) { - - String[] elementsArray = line.split(" "); - LinkedList elementsList = new LinkedList( - Arrays.asList(elementsArray)); - while (elementsList.remove("")) { - } - return elementsList; - } -} diff --git a/src/main/java/it/grid/storm/filesystem/FileSystemCheckerMountsMonolithic.java b/src/main/java/it/grid/storm/filesystem/FileSystemCheckerMountsMonolithic.java index 44d8c1360..69d786764 100644 --- a/src/main/java/it/grid/storm/filesystem/FileSystemCheckerMountsMonolithic.java +++ b/src/main/java/it/grid/storm/filesystem/FileSystemCheckerMountsMonolithic.java @@ -34,157 +34,152 @@ */ public class FileSystemCheckerMountsMonolithic implements FileSystemChecker { - private static final Logger log = LoggerFactory - .getLogger(FileSystemCheckerMountsMonolithic.class); - - private static final String GPFS_FILESYSTEM_NAME = "gpfs"; - private static final String MOUNTS_FILE_PATH = "/proc/mounts"; - - private static FileSystemCheckerMountsMonolithic instance = new FileSystemCheckerMountsMonolithic(); - private List GPFSMountPoints = null; - private long initInstant = 0L; - - /** - * Singleton private constructor - */ - private FileSystemCheckerMountsMonolithic() { - - super(); - } - - /** - * Singleton instance getter. initialize the instance if needed - * - * @return singleton instance - */ - public static FileSystemCheckerMountsMonolithic getInstance() - throws FileSystemCheckerException { - - synchronized (instance) { - if (instance.refreshNeeded()) { - instance.init(); - } - } - return instance; - } - - /** - * Initializes the object setting /etc/mtab parsing instant and the list of - * GPFS mount points - */ - private synchronized void init() throws FileSystemCheckerException { - - this.initInstant = Calendar.getInstance().getTimeInMillis(); - this.GPFSMountPoints = listGPFSMountPoints(); - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.filesystem.FileSystemChecker#isGPFS(java.io.File) - */ - public boolean isGPFS(File file) throws IllegalArgumentException, - FileSystemCheckerException { - - if (file == null) { - log.error("IsGPFS method invoked with null File parameter!"); - throw new IllegalArgumentException("Provided null File argument"); - } - synchronized (instance) { - if (refreshNeeded()) { - this.init(); - } - } - return this.evaluate(file.getAbsolutePath()); - } - - /** - * Checks is /etc/mtab file has been modified since last initialization - * - * @return true if a call of init() method is needed - */ - private synchronized boolean refreshNeeded() { - - boolean response = false; - if (initInstant == 0L - || initInstant < new File(MOUNTS_FILE_PATH).lastModified()) { - response = true; - } - return response; - } - - /** - * Checks if file path filePath belongs to one of the stored GPFS mount points - * - * @param filePath - * the file path to be checked - * - * @return true if file path filePath is on a GPFS mount points - */ - private boolean evaluate(String filePath) { - - boolean response = false; - for (String GPFSMountPoint : this.GPFSMountPoints) { - if (filePath.startsWith(GPFSMountPoint)) { - response = true; - break; - } - } - return response; - } - - /** - * Parse /etc/mtab file and retrieves all GPFS mount points - * - * @return a list of GPFS mount points - */ - private static List listGPFSMountPoints() - throws FileSystemCheckerException { - - LinkedList mountPointList = new LinkedList(); - BufferedReader mtab; - try { - mtab = new BufferedReader(new FileReader(MOUNTS_FILE_PATH)); - } catch (FileNotFoundException e) { - log.error(e.getMessage(), e); - throw new FileSystemCheckerException( - "Error while trying to create a reader for mtab file at " - + MOUNTS_FILE_PATH + ". FileNotFoundException : " + e.getMessage()); - } - String line; - try { - while ((line = mtab.readLine()) != null) { - if (line.startsWith("#") || !line.startsWith("/dev/")) { - continue; - } - LinkedList elementsList = tokenizeLine(line); - if (elementsList.get(2).equals(GPFS_FILESYSTEM_NAME)) { - mountPointList.add(elementsList.get(1)); - } - } - } catch (IOException e) { - log.error(e.getMessage(), e); - throw new FileSystemCheckerException( - "Error while trying to read mtab file at " + MOUNTS_FILE_PATH - + ". IOException : " + e.getMessage()); - } - return mountPointList; - } - - /** - * Transform the received string in a list of non spaced strings - * - * @param line - * a string - * @return a list of strings without spaces - */ - private static LinkedList tokenizeLine(String line) { - - String[] elementsArray = line.split(" "); - LinkedList elementsList = new LinkedList( - Arrays.asList(elementsArray)); - while (elementsList.remove("")) { - } - return elementsList; - } + private static final Logger log = + LoggerFactory.getLogger(FileSystemCheckerMountsMonolithic.class); + + private static final String GPFS_FILESYSTEM_NAME = "gpfs"; + private static final String MOUNTS_FILE_PATH = "/proc/mounts"; + + private static FileSystemCheckerMountsMonolithic instance = + new FileSystemCheckerMountsMonolithic(); + private List GPFSMountPoints = null; + private long initInstant = 0L; + + /** + * Singleton private constructor + */ + private FileSystemCheckerMountsMonolithic() { + + super(); + } + + /** + * Singleton instance getter. initialize the instance if needed + * + * @return singleton instance + */ + public static FileSystemCheckerMountsMonolithic getInstance() throws FileSystemCheckerException { + + synchronized (instance) { + if (instance.refreshNeeded()) { + instance.init(); + } + } + return instance; + } + + /** + * Initializes the object setting /etc/mtab parsing instant and the list of GPFS mount points + */ + private synchronized void init() throws FileSystemCheckerException { + + this.initInstant = Calendar.getInstance().getTimeInMillis(); + this.GPFSMountPoints = listGPFSMountPoints(); + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.filesystem.FileSystemChecker#isGPFS(java.io.File) + */ + public boolean isGPFS(File file) throws IllegalArgumentException, FileSystemCheckerException { + + if (file == null) { + log.error("IsGPFS method invoked with null File parameter!"); + throw new IllegalArgumentException("Provided null File argument"); + } + synchronized (instance) { + if (refreshNeeded()) { + this.init(); + } + } + return this.evaluate(file.getAbsolutePath()); + } + + /** + * Checks is /etc/mtab file has been modified since last initialization + * + * @return true if a call of init() method is needed + */ + private synchronized boolean refreshNeeded() { + + boolean response = false; + if (initInstant == 0L || initInstant < new File(MOUNTS_FILE_PATH).lastModified()) { + response = true; + } + return response; + } + + /** + * Checks if file path filePath belongs to one of the stored GPFS mount points + * + * @param filePath the file path to be checked + * + * @return true if file path filePath is on a GPFS mount points + */ + private boolean evaluate(String filePath) { + + boolean response = false; + for (String GPFSMountPoint : this.GPFSMountPoints) { + if (filePath.startsWith(GPFSMountPoint)) { + response = true; + break; + } + } + return response; + } + + /** + * Parse /etc/mtab file and retrieves all GPFS mount points + * + * @return a list of GPFS mount points + */ + private static List listGPFSMountPoints() throws FileSystemCheckerException { + + LinkedList mountPointList = new LinkedList(); + BufferedReader mtab = null; + try { + mtab = new BufferedReader(new FileReader(MOUNTS_FILE_PATH)); + String line; + while ((line = mtab.readLine()) != null) { + if (line.startsWith("#") || !line.startsWith("/dev/")) { + continue; + } + LinkedList elementsList = tokenizeLine(line); + if (elementsList.get(2).equals(GPFS_FILESYSTEM_NAME)) { + mountPointList.add(elementsList.get(1)); + } + } + } catch (FileNotFoundException e) { + log.error(e.getMessage(), e); + throw new FileSystemCheckerException("Error while trying to create a reader for mtab file at " + + MOUNTS_FILE_PATH + ". FileNotFoundException : " + e.getMessage()); + } catch (IOException e) { + log.error(e.getMessage(), e); + throw new FileSystemCheckerException("Error while trying to read mtab file at " + + MOUNTS_FILE_PATH + ". IOException : " + e.getMessage()); + } finally { + try { + mtab.close(); + } catch (IOException e) { + e.printStackTrace(); + } + } + return mountPointList; + } + + /** + * Transform the received string in a list of non spaced strings + * + * @param line a string + * @return a list of strings without spaces + */ + private static LinkedList tokenizeLine(String line) { + + String[] elementsArray = line.split(" "); + LinkedList elementsList = new LinkedList(Arrays.asList(elementsArray)); + while (elementsList.remove("")) { + } + return elementsList; + } } diff --git a/src/main/java/it/grid/storm/filesystem/FileSystemCheckerMtab.java b/src/main/java/it/grid/storm/filesystem/FileSystemCheckerMtab.java deleted file mode 100644 index 70280ee05..000000000 --- a/src/main/java/it/grid/storm/filesystem/FileSystemCheckerMtab.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package it.grid.storm.filesystem; - -import java.util.LinkedList; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * @author Michele Dibenedetto - * - */ -public class FileSystemCheckerMtab extends FileSystemCheckerFromFile { - - private static final Logger log = LoggerFactory - .getLogger(FileSystemCheckerMtab.class); - - private static FileSystemCheckerMtab instance = new FileSystemCheckerMtab(); - - /** - * Singleton private constructor - */ - private FileSystemCheckerMtab() { - - super(log); - } - - /** - * Singleton instance getter. initialize the instance if needed - * - * @return singleton instance - */ - public static FileSystemCheckerMtab getInstance() - throws FileSystemCheckerException { - - instance.tryInit(); - return instance; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.filesystem.FileSystemCheckerFromFile#getFilePath() - */ - @Override - protected String getFilePath() { - - return MtabUtil.getFilePath(); - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.filesystem.FileSystemCheckerFromFile#getFsNameIndex() - */ - @Override - protected int getFsNameIndex() { - - return MtabUtil.getFsNameIndex(); - } - - /* - * (non-Javadoc) - * - * @see - * it.grid.storm.filesystem.FileSystemCheckerFromFile#getMountPointIndex() - */ - @Override - protected int getMountPointIndex() { - - return MtabUtil.getMountPointIndex(); - } - - /* - * (non-Javadoc) - * - * @see - * it.grid.storm.filesystem.FileSystemCheckerFromFile#skipLine(java.lang.String - * ) - */ - @Override - protected boolean skipLine(String line) { - - return MtabUtil.skipLineForMountPoints(line); - } - - /* - * (non-Javadoc) - * - * @see - * it.grid.storm.filesystem.FileSystemCheckerFromFile#tokenizeLine(java.lang - * .String) - */ - @Override - protected LinkedList tokenizeLine(String line) { - - return MtabUtil.tokenizeLine(line); - } -} diff --git a/src/main/java/it/grid/storm/filesystem/FileSystemCheckerMtabMonolithic.java b/src/main/java/it/grid/storm/filesystem/FileSystemCheckerMtabMonolithic.java index 58de37d64..d61269f3b 100644 --- a/src/main/java/it/grid/storm/filesystem/FileSystemCheckerMtabMonolithic.java +++ b/src/main/java/it/grid/storm/filesystem/FileSystemCheckerMtabMonolithic.java @@ -34,157 +34,150 @@ */ public class FileSystemCheckerMtabMonolithic implements FileSystemChecker { - private static final Logger log = LoggerFactory - .getLogger(FileSystemCheckerMtabMonolithic.class); - - private static final String GPFS_FILESYSTEM_NAME = "gpfs"; - private static final String MTAB_FILE_PATH = "/etc/mtab"; - - private static FileSystemCheckerMtabMonolithic instance = new FileSystemCheckerMtabMonolithic(); - private List GPFSMountPoints = null; - private long initInstant = 0L; - - /** - * Singleton private constructor - */ - private FileSystemCheckerMtabMonolithic() { - - super(); - } - - /** - * Singleton instance getter. initialize the instance if needed - * - * @return singleton instance - */ - public static FileSystemCheckerMtabMonolithic getInstance() - throws FileSystemCheckerException { - - synchronized (instance) { - if (instance.refreshNeeded()) { - instance.init(); - } - } - return instance; - } - - /** - * Initializes the object setting /etc/mtab parsing instant and the list of - * GPFS mount points - */ - private synchronized void init() throws FileSystemCheckerException { - - this.initInstant = Calendar.getInstance().getTimeInMillis(); - this.GPFSMountPoints = listGPFSMountPoints(); - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.filesystem.FileSystemChecker#isGPFS(java.io.File) - */ - public boolean isGPFS(File file) throws IllegalArgumentException, - FileSystemCheckerException { - - if (file == null) { - log.error("IsGPFS method invoked with null File parameter!"); - throw new IllegalArgumentException("Provided null File argument"); - } - synchronized (instance) { - if (refreshNeeded()) { - this.init(); - } - } - return this.evaluate(file.getAbsolutePath()); - } - - /** - * Checks is /etc/mtab file has been modified since last initialization - * - * @return true if a call of init() method is needed - */ - private synchronized boolean refreshNeeded() { - - boolean response = false; - if (initInstant == 0L - || initInstant < new File(MTAB_FILE_PATH).lastModified()) { - response = true; - } - return response; - } - - /** - * Checks if file path filePath belongs to one of the stored GPFS mount points - * - * @param filePath - * the file path to be checked - * - * @return true if file path filePath is on a GPFS mount points - */ - private boolean evaluate(String filePath) { - - boolean response = false; - for (String GPFSMountPoint : this.GPFSMountPoints) { - if (filePath.startsWith(GPFSMountPoint)) { - response = true; - break; - } - } - return response; - } - - /** - * Parse /etc/mtab file and retrieves all GPFS mount points - * - * @return a list of GPFS mount points - */ - private static List listGPFSMountPoints() - throws FileSystemCheckerException { - - LinkedList mountPointList = new LinkedList(); - BufferedReader mtab; - try { - mtab = new BufferedReader(new FileReader(MTAB_FILE_PATH)); - } catch (FileNotFoundException e) { - log.error(e.getMessage(), e); - throw new FileSystemCheckerException( - "Error while trying to create a reader for mtab file at " - + MTAB_FILE_PATH + ". FileNotFoundException : " + e.getMessage()); - } - String line; - try { - while ((line = mtab.readLine()) != null) { - if (line.startsWith("#") || !line.startsWith("/dev/")) { - continue; - } - LinkedList elementsList = tokenizeLine(line); - if (elementsList.get(2).equals(GPFS_FILESYSTEM_NAME)) { - mountPointList.add(elementsList.get(1)); - } - } - } catch (IOException e) { - log.error(e.getMessage(), e); - throw new FileSystemCheckerException( - "Error while trying to read mtab file at " + MTAB_FILE_PATH - + ". IOException : " + e.getMessage()); - } - return mountPointList; - } - - /** - * Transform the received string in a list of non spaced strings - * - * @param line - * a string - * @return a list of strings without spaces - */ - private static LinkedList tokenizeLine(String line) { - - String[] elementsArray = line.split(" "); - LinkedList elementsList = new LinkedList( - Arrays.asList(elementsArray)); - while (elementsList.remove("")) { - } - return elementsList; - } + private static final Logger log = LoggerFactory.getLogger(FileSystemCheckerMtabMonolithic.class); + + private static final String GPFS_FILESYSTEM_NAME = "gpfs"; + private static final String MTAB_FILE_PATH = "/etc/mtab"; + + private static FileSystemCheckerMtabMonolithic instance = new FileSystemCheckerMtabMonolithic(); + private List GPFSMountPoints = null; + private long initInstant = 0L; + + /** + * Singleton private constructor + */ + private FileSystemCheckerMtabMonolithic() { + + super(); + } + + /** + * Singleton instance getter. initialize the instance if needed + * + * @return singleton instance + */ + public static FileSystemCheckerMtabMonolithic getInstance() throws FileSystemCheckerException { + + synchronized (instance) { + if (instance.refreshNeeded()) { + instance.init(); + } + } + return instance; + } + + /** + * Initializes the object setting /etc/mtab parsing instant and the list of GPFS mount points + */ + private synchronized void init() throws FileSystemCheckerException { + + this.initInstant = Calendar.getInstance().getTimeInMillis(); + this.GPFSMountPoints = listGPFSMountPoints(); + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.filesystem.FileSystemChecker#isGPFS(java.io.File) + */ + public boolean isGPFS(File file) throws IllegalArgumentException, FileSystemCheckerException { + + if (file == null) { + log.error("IsGPFS method invoked with null File parameter!"); + throw new IllegalArgumentException("Provided null File argument"); + } + synchronized (instance) { + if (refreshNeeded()) { + this.init(); + } + } + return this.evaluate(file.getAbsolutePath()); + } + + /** + * Checks is /etc/mtab file has been modified since last initialization + * + * @return true if a call of init() method is needed + */ + private synchronized boolean refreshNeeded() { + + boolean response = false; + if (initInstant == 0L || initInstant < new File(MTAB_FILE_PATH).lastModified()) { + response = true; + } + return response; + } + + /** + * Checks if file path filePath belongs to one of the stored GPFS mount points + * + * @param filePath the file path to be checked + * + * @return true if file path filePath is on a GPFS mount points + */ + private boolean evaluate(String filePath) { + + boolean response = false; + for (String GPFSMountPoint : this.GPFSMountPoints) { + if (filePath.startsWith(GPFSMountPoint)) { + response = true; + break; + } + } + return response; + } + + /** + * Parse /etc/mtab file and retrieves all GPFS mount points + * + * @return a list of GPFS mount points + */ + private static List listGPFSMountPoints() throws FileSystemCheckerException { + + LinkedList mountPointList = new LinkedList(); + BufferedReader mtab = null; + try { + mtab = new BufferedReader(new FileReader(MTAB_FILE_PATH)); + String line; + while ((line = mtab.readLine()) != null) { + if (line.startsWith("#") || !line.startsWith("/dev/")) { + continue; + } + LinkedList elementsList = tokenizeLine(line); + if (elementsList.get(2).equals(GPFS_FILESYSTEM_NAME)) { + mountPointList.add(elementsList.get(1)); + } + } + } catch (FileNotFoundException e) { + log.error(e.getMessage(), e); + throw new FileSystemCheckerException("Error while trying to create a reader for mtab file at " + + MTAB_FILE_PATH + ". FileNotFoundException : " + e.getMessage()); + } catch (IOException e) { + log.error(e.getMessage(), e); + throw new FileSystemCheckerException("Error while trying to read mtab file at " + + MTAB_FILE_PATH + ". IOException : " + e.getMessage()); + } finally { + try { + mtab.close(); + } catch (IOException e) { + e.printStackTrace(); + } + } + return mountPointList; + } + + /** + * Transform the received string in a list of non spaced strings + * + * @param line a string + * @return a list of strings without spaces + */ + private static LinkedList tokenizeLine(String line) { + + String[] elementsArray = line.split(" "); + LinkedList elementsList = new LinkedList(Arrays.asList(elementsArray)); + while (elementsList.remove("")) { + } + return elementsList; + } } diff --git a/src/main/java/it/grid/storm/filesystem/FilesystemPermission.java b/src/main/java/it/grid/storm/filesystem/FilesystemPermission.java index fbf673de4..37ba77efa 100644 --- a/src/main/java/it/grid/storm/filesystem/FilesystemPermission.java +++ b/src/main/java/it/grid/storm/filesystem/FilesystemPermission.java @@ -34,20 +34,19 @@ import it.grid.storm.filesystem.swig.fs_acl; /** - * Provides an abstraction of all operations that can be performed on a - * filesystem entry (file or directory). + * Provides an abstraction of all operations that can be performed on a filesystem entry (file or + * directory). * *

- * Note: this class is an interface to the fs_acl::permission_t type; if the - * low-level fs_acl::permission_t type ever gets modified, then the {@link - * toFsAclPermission()} method should be modified also. + * Note: this class is an interface to the fs_acl::permission_t type; if the low-level + * fs_acl::permission_t type ever gets modified, then the {@link toFsAclPermission()} method should + * be modified also. * *

- * To all effects, instances of this class are immutable. Permissions - * are read off or enforced onto disk files; they should not be altered by StoRM - * code. Still, if there is need for permission manipulation in StoRM, a - * MutableFilesystemPermission derived class can be provided, that promotes to - * public visibility the deny*() and permit*() methods. + * To all effects, instances of this class are immutable. Permissions are read off or + * enforced onto disk files; they should not be altered by StoRM code. Still, if there is need for + * permission manipulation in StoRM, a MutableFilesystemPermission derived class can be provided, + * that promotes to public visibility the deny*() and permit*() methods. * * * @see it.grid.storm.authorization.AuthorizationQueryInterface @@ -59,447 +58,438 @@ */ public class FilesystemPermission implements java.io.Serializable { - // --- constants used in the bitfield constructor --- // + /** + * + */ + private static final long serialVersionUID = 1L; - /** Permission to execute the file. */ - final static int EXECUTE = fs_acl.permission_flags.PERM_EXECUTE; + // --- constants used in the bitfield constructor --- // - /** Permission to write file contents. */ - final static int WRITE_DATA = fs_acl.permission_flags.PERM_WRITE_DATA; + /** Permission to execute the file. */ + final static int EXECUTE = fs_acl.permission_flags.PERM_EXECUTE; - /** Permission to read file contents. */ - final static int READ_DATA = fs_acl.permission_flags.PERM_READ_DATA; + /** Permission to write file contents. */ + final static int WRITE_DATA = fs_acl.permission_flags.PERM_WRITE_DATA; - /** - * Permission to change file extended ACL (that is, beyond normal UNIX - * permission bits). - */ - final static int WRITE_ACL = fs_acl.permission_flags.PERM_WRITE_ACL; + /** Permission to read file contents. */ + final static int READ_DATA = fs_acl.permission_flags.PERM_READ_DATA; - /** - * Permission to read file extended ACL (that is, beyond normal UNIX - * permission bits). - */ - final static int READ_ACL = fs_acl.permission_flags.PERM_READ_ACL; + /** + * Permission to change file extended ACL (that is, beyond normal UNIX permission bits). + */ + final static int WRITE_ACL = fs_acl.permission_flags.PERM_WRITE_ACL; - /** Permission to delete a filesystem entry (file or directory). */ - final static int DELETE = fs_acl.permission_flags.PERM_DELETE; + /** + * Permission to read file extended ACL (that is, beyond normal UNIX permission bits). + */ + final static int READ_ACL = fs_acl.permission_flags.PERM_READ_ACL; - /** Permission to descend to children directories of a directory. */ - final static int TRAVERSE_DIRECTORY = fs_acl.permission_flags.PERM_TRAVERSE_DIRECTORY; - - /** Permission to list directory contents. */ - final static int LIST_DIRECTORY = fs_acl.permission_flags.PERM_LIST_DIRECTORY; - - /** Permission to create a child subdirectory. */ - final static int CREATE_SUBDIRECTORY = fs_acl.permission_flags.PERM_CREATE_SUBDIRECTORY; - - /** Permission to create a new file. */ - final static int CREATE_FILE = fs_acl.permission_flags.PERM_CREATE_FILE; - - /** Permission to delete a file or directory within a directory. */ - final static int DELETE_CHILD = fs_acl.permission_flags.PERM_DELETE_CHILD; - - /** No permission at all. */ - final static int NONE = fs_acl.permission_flags.PERM_NONE; - - /** All permission bits set. */ - final static int ALL = fs_acl.permission_flags.PERM_ALL; - - // --- public constant instances --- // - - /** Permission to read a file. */ - public final static FilesystemPermission Read = new FilesystemPermission( - READ_DATA); - - /** Permission to read and write to a file. */ - public final static FilesystemPermission ReadWrite = new FilesystemPermission( - READ_DATA | WRITE_DATA); - - /** Permission to list directory contents. */ - public final static FilesystemPermission List = new FilesystemPermission( - LIST_DIRECTORY); - - /** - * Permission to traverse directory (descend path where directory is an - * intermediate step). - */ - public final static FilesystemPermission Traverse = new FilesystemPermission( - TRAVERSE_DIRECTORY); - - /** Permission to list and traverse directory. */ - public final static FilesystemPermission ListTraverse = new FilesystemPermission( - LIST_DIRECTORY | TRAVERSE_DIRECTORY); - - /** Permission to list, traverse and write directory. */ - public final static FilesystemPermission ListTraverseWrite = new FilesystemPermission( - LIST_DIRECTORY | TRAVERSE_DIRECTORY | WRITE_DATA); - - /** No permission at all. */ - public final static FilesystemPermission None = new FilesystemPermission( - FilesystemPermission.NONE); - - /** Permission to write. */ - public final static FilesystemPermission Write = new FilesystemPermission( - FilesystemPermission.WRITE_DATA); - - /** - * Permission to create file - */ - public final static FilesystemPermission Create = new FilesystemPermission( - FilesystemPermission.CREATE_FILE); - - /** - * Permission to create subdirectory - */ - public final static FilesystemPermission CreateSubdirectory = new FilesystemPermission( - FilesystemPermission.CREATE_SUBDIRECTORY); - - /** - * Permission to delete file or directory - */ - public final static FilesystemPermission Delete = new FilesystemPermission( - FilesystemPermission.DELETE); - - /** - * - */ - public final static FilesystemPermission ListDirectory = new FilesystemPermission( - FilesystemPermission.LIST_DIRECTORY); - - // --- constructors --- // - - /** - * Copy constructor. Takes another instance of the - * {@link it.grid.storm.filesyste.FilesystemPermission} interface and creates - * an instance of this class granting exactly the same permissions. - */ - public FilesystemPermission(final FilesystemPermission p) { - - this.permission = (p.permission & ALL); - }; - - /** - * Constructor that takes a bitfield of permissions and creates an instance of - * this class granting exactly those permissions. The bitfield - * argument has the same format of the fs_acl::permission_t type, or could be - * constructed by bitwise-OR'ing the READ_DATA, - * WRITE_DATA, ... constants defined elsewhere in this class. For - * any bit that is set in the bitfield argument, the - * corresponding permission will be granted from this object. - * - *

- * Example usage: - * - *

-	 * p = new FilesystemPermission(READ_DATA | WRITE_DATA);
-	 * // p.canReadFile() == true
-	 * // p.canWriteFile() == true
-	 * // p.canCreateNewFile() == false
-	 * 
- * - * @see fs_acl::permission_t - */ - public FilesystemPermission(final int bitfield) { - - this.permission = (bitfield & ALL); - }; - - /** - * Default constructor: creates an instance that denies permission on each and - * every operation. - * - *

- * This constructor's intended usage is in conjunction with the permission - * manipulation functions (in derived classes): - * - *

-	 * p = new FilesystemPermission().permitReadFile().permitWriteFile();
-	 * // p.canReadFile() == true
-	 * // p.canWriteFile() == true
-	 * // p.canCreateNewFile() == false
-	 * 
- */ - protected FilesystemPermission() { - - denyAll(); - } - - // --- permission conversion functions --- // - - /** - * Return an fs_acl::permission_t bitfield representing the same permissions - * that this object encodes. - * - * @see fs_acl::permission_t - */ - public int toFsAclPermission() { - - return permission; - } - - // --- permission test methods --- // - - /** - * Return true if permission is granted to read file contents. - */ - public boolean canReadFile() { - - return 0 != (permission & READ_DATA); - } - - /** - * Return true if permission is granted to write file contents. - * No distinction can be enforced between overwriting contents and appending - * to the file, so no distinction is made here. - */ - public boolean canWriteFile() { - - return 0 != (permission & WRITE_DATA); - } - - /** - * Return true if permission is granted to list directory - * contents. - */ - public boolean canListDirectory() { - - return 0 != (permission & LIST_DIRECTORY); - } - - /** - * Return true if permission is granted to descend to a - * subdirectory. - */ - public boolean canTraverseDirectory() { - - return 0 != (permission & TRAVERSE_DIRECTORY); - } - - /** - * Return true if permission is granted to create a new - * subdirectory. - */ - public boolean canMakeDirectory() { - - return 0 != (permission & CREATE_SUBDIRECTORY); - } - - /** - * Return true if permission is granted to create a new file. - */ - public boolean canCreateNewFile() { - - return 0 != (permission & CREATE_FILE); - } - - /** - * Return true if permission is granted to change filesystem - * entry (file or directory) ACL. - */ - public boolean canChangeAcl() { - - return 0 != (permission & WRITE_ACL); - } - - /** - * Return true if permission is granted to delete entry (file or - * directory). - */ - public boolean canDelete() { - - return 0 != (permission & DELETE); - } - - /** - * Return true if all permissions that are granted by - * other FilesystemPermission instance are also granted by this - * instance. That is, test if other is more restrictive than the this - * instance. - */ - public boolean allows(final FilesystemPermission other) { - - return (other.permission == (this.permission & other.permission)); - } - - /** - * Return true if all permission bits that are set in - * bitfield are also set in this instance. That is, test if - * bitfield represents a more restrictive than the this instance. - */ - public boolean allows(final int bitfield) { - - return (bitfield == (this.permission & bitfield)); - } - - // --- permission manipulation methods --- // - - /** - * Change instance status so that all subsequent can... calls - * will return false. - * - *

- * Returns the instance itself, so that calls to the permission manipulation - * functions can be chained: - * - *

-	 * p = new FilesystemPermission();
-	 * p.denyAll().permitReadFile().permitWriteFile();
-	 * // p.canReadFile() == true
-	 * // p.canWriteFile() == true
-	 * // p.canCreateNewFile() == false
-	 * 
- */ - protected FilesystemPermission denyAll() { - - this.permission = NONE; - return this; - } - - protected FilesystemPermission denyReadFile() { - - permission &= ~READ_DATA; - return this; - } - - protected FilesystemPermission denyWriteFile() { + /** Permission to delete a filesystem entry (file or directory). */ + final static int DELETE = fs_acl.permission_flags.PERM_DELETE; - permission &= ~WRITE_DATA; - return this; - } - - protected FilesystemPermission denyChangeAcl() { + /** Permission to descend to children directories of a directory. */ + final static int TRAVERSE_DIRECTORY = fs_acl.permission_flags.PERM_TRAVERSE_DIRECTORY; + + /** Permission to list directory contents. */ + final static int LIST_DIRECTORY = fs_acl.permission_flags.PERM_LIST_DIRECTORY; + + /** Permission to create a child subdirectory. */ + final static int CREATE_SUBDIRECTORY = fs_acl.permission_flags.PERM_CREATE_SUBDIRECTORY; + + /** Permission to create a new file. */ + final static int CREATE_FILE = fs_acl.permission_flags.PERM_CREATE_FILE; + + /** Permission to delete a file or directory within a directory. */ + final static int DELETE_CHILD = fs_acl.permission_flags.PERM_DELETE_CHILD; + + /** No permission at all. */ + final static int NONE = fs_acl.permission_flags.PERM_NONE; + + /** All permission bits set. */ + final static int ALL = fs_acl.permission_flags.PERM_ALL; + + // --- public constant instances --- // + + /** Permission to read a file. */ + public final static FilesystemPermission Read = new FilesystemPermission(READ_DATA); + + /** Permission to read and write to a file. */ + public final static FilesystemPermission ReadWrite = + new FilesystemPermission(READ_DATA | WRITE_DATA); + + /** Permission to list directory contents. */ + public final static FilesystemPermission List = new FilesystemPermission(LIST_DIRECTORY); + + /** + * Permission to traverse directory (descend path where directory is an intermediate step). + */ + public final static FilesystemPermission Traverse = new FilesystemPermission(TRAVERSE_DIRECTORY); + + /** Permission to list and traverse directory. */ + public final static FilesystemPermission ListTraverse = + new FilesystemPermission(LIST_DIRECTORY | TRAVERSE_DIRECTORY); + + /** Permission to list, traverse and write directory. */ + public final static FilesystemPermission ListTraverseWrite = + new FilesystemPermission(LIST_DIRECTORY | TRAVERSE_DIRECTORY | WRITE_DATA); + + /** No permission at all. */ + public final static FilesystemPermission None = + new FilesystemPermission(FilesystemPermission.NONE); + + /** Permission to write. */ + public final static FilesystemPermission Write = + new FilesystemPermission(FilesystemPermission.WRITE_DATA); + + /** + * Permission to create file + */ + public final static FilesystemPermission Create = + new FilesystemPermission(FilesystemPermission.CREATE_FILE); + + /** + * Permission to create subdirectory + */ + public final static FilesystemPermission CreateSubdirectory = + new FilesystemPermission(FilesystemPermission.CREATE_SUBDIRECTORY); + + /** + * Permission to delete file or directory + */ + public final static FilesystemPermission Delete = + new FilesystemPermission(FilesystemPermission.DELETE); + + /** + * + */ + public final static FilesystemPermission ListDirectory = + new FilesystemPermission(FilesystemPermission.LIST_DIRECTORY); + + // --- constructors --- // + + /** + * Copy constructor. Takes another instance of the + * {@link it.grid.storm.filesyste.FilesystemPermission} interface and creates an instance of this + * class granting exactly the same permissions. + */ + public FilesystemPermission(final FilesystemPermission p) { + + this.permission = (p.permission & ALL); + }; + + /** + * Constructor that takes a bitfield of permissions and creates an instance of this class granting + * exactly those permissions. The bitfield argument has the same format of the + * fs_acl::permission_t type, or could be constructed by bitwise-OR'ing the + * READ_DATA, WRITE_DATA, ... constants defined elsewhere in this class. + * For any bit that is set in the bitfield argument, the corresponding permission + * will be granted from this object. + * + *

+ * Example usage: + * + *

+   * p = new FilesystemPermission(READ_DATA | WRITE_DATA);
+   * // p.canReadFile() == true
+   * // p.canWriteFile() == true
+   * // p.canCreateNewFile() == false
+   * 
+ * + * @see fs_acl::permission_t + */ + public FilesystemPermission(final int bitfield) { + + this.permission = (bitfield & ALL); + }; + + /** + * Default constructor: creates an instance that denies permission on each and every operation. + * + *

+ * This constructor's intended usage is in conjunction with the permission manipulation functions + * (in derived classes): + * + *

+   * p = new FilesystemPermission().permitReadFile().permitWriteFile();
+   * // p.canReadFile() == true
+   * // p.canWriteFile() == true
+   * // p.canCreateNewFile() == false
+   * 
+ */ + protected FilesystemPermission() { + + denyAll(); + } + + // --- permission conversion functions --- // + + /** + * Return an fs_acl::permission_t bitfield representing the same permissions that this object + * encodes. + * + * @see fs_acl::permission_t + */ + public int toFsAclPermission() { + + return permission; + } + + // --- permission test methods --- // + + /** + * Return true if permission is granted to read file contents. + */ + public boolean canReadFile() { + + return 0 != (permission & READ_DATA); + } + + /** + * Return true if permission is granted to write file contents. No distinction can be + * enforced between overwriting contents and appending to the file, so no distinction is made + * here. + */ + public boolean canWriteFile() { + + return 0 != (permission & WRITE_DATA); + } + + /** + * Return true if permission is granted to list directory contents. + */ + public boolean canListDirectory() { + + return 0 != (permission & LIST_DIRECTORY); + } + + /** + * Return true if permission is granted to descend to a subdirectory. + */ + public boolean canTraverseDirectory() { + + return 0 != (permission & TRAVERSE_DIRECTORY); + } + + /** + * Return true if permission is granted to create a new subdirectory. + */ + public boolean canMakeDirectory() { + + return 0 != (permission & CREATE_SUBDIRECTORY); + } + + /** + * Return true if permission is granted to create a new file. + */ + public boolean canCreateNewFile() { + + return 0 != (permission & CREATE_FILE); + } + + /** + * Return true if permission is granted to change filesystem entry (file or + * directory) ACL. + */ + public boolean canChangeAcl() { + + return 0 != (permission & WRITE_ACL); + } + + /** + * Return true if permission is granted to delete entry (file or directory). + */ + public boolean canDelete() { + + return 0 != (permission & DELETE); + } + + /** + * Return true if all permissions that are granted by other + * FilesystemPermission instance are also granted by this instance. That is, test if other + * is more restrictive than the this instance. + */ + public boolean allows(final FilesystemPermission other) { + + return (other.permission == (this.permission & other.permission)); + } + + /** + * Return true if all permission bits that are set in bitfield are also set in + * this instance. That is, test if bitfield represents a more restrictive than the this + * instance. + */ + public boolean allows(final int bitfield) { + + return (bitfield == (this.permission & bitfield)); + } + + // --- permission manipulation methods --- // + + /** + * Change instance status so that all subsequent can... calls will return + * false. + * + *

+ * Returns the instance itself, so that calls to the permission manipulation functions can be + * chained: + * + *

+   * p = new FilesystemPermission();
+   * p.denyAll().permitReadFile().permitWriteFile();
+   * // p.canReadFile() == true
+   * // p.canWriteFile() == true
+   * // p.canCreateNewFile() == false
+   * 
+ */ + protected FilesystemPermission denyAll() { + + this.permission = NONE; + return this; + } + + protected FilesystemPermission denyReadFile() { - permission &= ~WRITE_ACL; - return this; - } + permission &= ~READ_DATA; + return this; + } - protected FilesystemPermission denyCreateNewFile() { + protected FilesystemPermission denyWriteFile() { - permission &= ~CREATE_FILE; - return this; - } + permission &= ~WRITE_DATA; + return this; + } - protected FilesystemPermission denyListDirectory() { + protected FilesystemPermission denyChangeAcl() { - permission &= ~LIST_DIRECTORY; - return this; - } + permission &= ~WRITE_ACL; + return this; + } - protected FilesystemPermission denyTraverseDirectory() { + protected FilesystemPermission denyCreateNewFile() { - permission &= ~TRAVERSE_DIRECTORY; - return this; - } + permission &= ~CREATE_FILE; + return this; + } - protected FilesystemPermission denyMakeDirectory() { + protected FilesystemPermission denyListDirectory() { - permission &= ~CREATE_SUBDIRECTORY; - return this; - } + permission &= ~LIST_DIRECTORY; + return this; + } - protected FilesystemPermission denyDelete() { + protected FilesystemPermission denyTraverseDirectory() { - permission &= ~DELETE; - return this; - } + permission &= ~TRAVERSE_DIRECTORY; + return this; + } - public FilesystemPermission deny(FilesystemPermission other) { + protected FilesystemPermission denyMakeDirectory() { - return new FilesystemPermission(this.permission & ~other.permission); - } + permission &= ~CREATE_SUBDIRECTORY; + return this; + } - /** - * Change instance status so that all subsequent can... calls - * will return true. Dangerous, use with caution. - * - *

- * Returns the instance itself, so that calls to the permission manipulation - * functions can be chained: - * - *

-	 * p = new FilesystemPermission();
-	 * p.permitAll().denyDelete().denyRename();
-	 * // p.canReadFile() == true
-	 * // p.canWriteFile() == true
-	 * // p.canDelete() == false
-	 * 
- */ - protected FilesystemPermission permitAll() { + protected FilesystemPermission denyDelete() { - permission = ALL; - return this; - } + permission &= ~DELETE; + return this; + } - protected FilesystemPermission permitReadFile() { + public FilesystemPermission deny(FilesystemPermission other) { - permission |= READ_DATA; - return this; - } + return new FilesystemPermission(this.permission & ~other.permission); + } - protected FilesystemPermission permitWriteFile() { + /** + * Change instance status so that all subsequent can... calls will return + * true. Dangerous, use with caution. + * + *

+ * Returns the instance itself, so that calls to the permission manipulation functions can be + * chained: + * + *

+   * p = new FilesystemPermission();
+   * p.permitAll().denyDelete().denyRename();
+   * // p.canReadFile() == true
+   * // p.canWriteFile() == true
+   * // p.canDelete() == false
+   * 
+ */ + protected FilesystemPermission permitAll() { - permission |= WRITE_DATA; - return this; - } + permission = ALL; + return this; + } - protected FilesystemPermission permitChangeAcl() { + protected FilesystemPermission permitReadFile() { - permission |= WRITE_ACL; - return this; - } + permission |= READ_DATA; + return this; + } - protected FilesystemPermission permitCreateNewFile() { + protected FilesystemPermission permitWriteFile() { - permission |= CREATE_FILE; - return this; - } + permission |= WRITE_DATA; + return this; + } - protected FilesystemPermission permitListDirectory() { + protected FilesystemPermission permitChangeAcl() { - permission |= LIST_DIRECTORY; - return this; - } + permission |= WRITE_ACL; + return this; + } - protected FilesystemPermission permitTraverseDirectory() { + protected FilesystemPermission permitCreateNewFile() { - permission |= TRAVERSE_DIRECTORY; - return this; - } + permission |= CREATE_FILE; + return this; + } - protected FilesystemPermission permitMakeDirectory() { + protected FilesystemPermission permitListDirectory() { - permission |= CREATE_SUBDIRECTORY; - return this; - } + permission |= LIST_DIRECTORY; + return this; + } - protected FilesystemPermission permitDelete() { + protected FilesystemPermission permitTraverseDirectory() { - permission |= DELETE; - return this; - } + permission |= TRAVERSE_DIRECTORY; + return this; + } - // --- internal status flags --- // + protected FilesystemPermission permitMakeDirectory() { - /** - * Method that returns an int representing This FilesystemPermission. It can - * be used as argument to FilesystemPermission constructor to get back an - * equivalent filesystemPermission Object. - */ - public int getInt() { + permission |= CREATE_SUBDIRECTORY; + return this; + } - return permission; - } + protected FilesystemPermission permitDelete() { - /** - * The permission set bitfield. Must match the type and representation used in - * fs_acl::permission_t: no conversion is done, the code just assumes that it - * can pass the value back and forth from Java to C++. - */ - protected int permission; + permission |= DELETE; + return this; + } - public String toString() { + // --- internal status flags --- // - return Integer.valueOf(permission).toString(); - } + /** + * Method that returns an int representing This FilesystemPermission. It can be used as argument + * to FilesystemPermission constructor to get back an equivalent filesystemPermission Object. + */ + public int getInt() { + + return permission; + } + + /** + * The permission set bitfield. Must match the type and representation used in + * fs_acl::permission_t: no conversion is done, the code just assumes that it can pass the value + * back and forth from Java to C++. + */ + protected int permission; + + public String toString() { + + return Integer.valueOf(permission).toString(); + } } diff --git a/src/main/java/it/grid/storm/filesystem/InvalidSpaceAttributesException.java b/src/main/java/it/grid/storm/filesystem/InvalidSpaceAttributesException.java index bf6c5cf9c..ea826e9d5 100644 --- a/src/main/java/it/grid/storm/filesystem/InvalidSpaceAttributesException.java +++ b/src/main/java/it/grid/storm/filesystem/InvalidSpaceAttributesException.java @@ -21,9 +21,8 @@ import it.grid.storm.common.types.SizeUnit; /** - * Class that represents an Exception thrown by the Space constructor if any of - * the supplied parameters are null, or totalSize is Empty, or guaranteedSize is - * greater than totalSize. + * Class that represents an Exception thrown by the Space constructor if any of the supplied + * parameters are null, or totalSize is Empty, or guaranteedSize is greater than totalSize. * * @author EGRID - ICTP Trieste * @version 1.0 @@ -31,64 +30,80 @@ */ public class InvalidSpaceAttributesException extends Exception { - private boolean nullGuarSize = false; // boolean true if garanteedSize is null - private boolean nullTotSize = false; // boolean true if totalSize is null - private boolean nullSpaFil = false; // boolean true if spaceFile is null - private boolean nullSS = false; // boolean true if SpaceSystem is null - private boolean emptyTotSize = false; // boolean true if totalSize is Empty - private boolean greater = false; // boolean true if guaranteedSize and - // totalSize are not null, not empty, and - // when interpreted as double of size BYTE - // it is _guaranteedSize_ that is GREATER - // than _totalSize_ - private double guaranteed = -1.0; // double that gets set only if (greater) is - // true, and represents _guaranteedSize_ - // expressed in bytes. - private double total = -1.0; // double that gets set only if (greater) is - // true, and represents _totalSize_ expressed in - // bytes. - - public InvalidSpaceAttributesException(TSizeInBytes guaranteedSize, - TSizeInBytes totalSize, LocalFile spaceFile, SpaceSystem ss) { - - nullGuarSize = guaranteedSize == null; - nullTotSize = totalSize == null; - nullSpaFil = spaceFile == null; - nullSS = ss == null; - emptyTotSize = (!nullTotSize) && totalSize.isEmpty(); - greater = (!nullGuarSize) - && (!nullTotSize) - && (!guaranteedSize.isEmpty()) - && (!totalSize.isEmpty()) - && (guaranteedSize.getSizeIn(SizeUnit.BYTES) > totalSize - .getSizeIn(SizeUnit.BYTES)); - if (greater) { - guaranteed = guaranteedSize.getSizeIn(SizeUnit.BYTES); - total = totalSize.getSizeIn(SizeUnit.BYTES); - } - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("InvalidSpaceAttributesException: nullGuaranteedSize="); - sb.append(nullGuarSize); - sb.append("; nullTotalSize="); - sb.append(nullTotSize); - sb.append("; nullSpaceFile="); - sb.append(nullSpaFil); - sb.append("; nullSpaceSystem="); - sb.append(nullSS); - sb.append("; emptyTotalSize="); - sb.append(emptyTotSize); - sb.append("; guaranteedSize greater than totalSize is "); - sb.append(greater); - if (greater) - sb.append(" with guaranteed="); - sb.append(guaranteed); - sb.append(" and total="); - sb.append(total); - return sb.toString(); - } + /** + * + */ + private static final long serialVersionUID = 1L; + + // boolean true if garanteedSize is null + private boolean nullGuarSize = false; + + // boolean true if totalSize is null + private boolean nullTotSize = false; + + // boolean true if spaceFile is null + private boolean nullSpaFil = false; + + // boolean true if SpaceSystem is null + private boolean nullSS = false; + + // boolean true if totalSize is Empty + private boolean emptyTotSize = false; + + // boolean true if guaranteedSize and totalSize are not null, not + // empty, and when interpreted as double of size BYTE + // it is _guaranteedSize_ that is GREATER + // than _totalSize_ + private boolean greater = false; + + // double that gets set only if (greater) is + // true, and represents _guaranteedSize_ + // expressed in bytes. + private double guaranteed = -1.0; + + // double that gets set only if (greater) is + // true, and represents _totalSize_ expressed in + // bytes. + private double total = -1.0; + + public InvalidSpaceAttributesException(TSizeInBytes guaranteedSize, TSizeInBytes totalSize, + LocalFile spaceFile, SpaceSystem ss) { + + nullGuarSize = guaranteedSize == null; + nullTotSize = totalSize == null; + nullSpaFil = spaceFile == null; + nullSS = ss == null; + emptyTotSize = (!nullTotSize) && totalSize.isEmpty(); + greater = + (!nullGuarSize) && (!nullTotSize) && (!guaranteedSize.isEmpty()) && (!totalSize.isEmpty()) + && (guaranteedSize.getSizeIn(SizeUnit.BYTES) > totalSize.getSizeIn(SizeUnit.BYTES)); + if (greater) { + guaranteed = guaranteedSize.getSizeIn(SizeUnit.BYTES); + total = totalSize.getSizeIn(SizeUnit.BYTES); + } + } + + @Override + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append("InvalidSpaceAttributesException: nullGuaranteedSize="); + sb.append(nullGuarSize); + sb.append("; nullTotalSize="); + sb.append(nullTotSize); + sb.append("; nullSpaceFile="); + sb.append(nullSpaFil); + sb.append("; nullSpaceSystem="); + sb.append(nullSS); + sb.append("; emptyTotalSize="); + sb.append(emptyTotSize); + sb.append("; guaranteedSize greater than totalSize is "); + sb.append(greater); + if (greater) + sb.append(" with guaranteed="); + sb.append(guaranteed); + sb.append(" and total="); + sb.append(total); + return sb.toString(); + } } diff --git a/src/main/java/it/grid/storm/filesystem/MtabUtil.java b/src/main/java/it/grid/storm/filesystem/MtabUtil.java index 3b350fb8c..a09fcde2a 100644 --- a/src/main/java/it/grid/storm/filesystem/MtabUtil.java +++ b/src/main/java/it/grid/storm/filesystem/MtabUtil.java @@ -28,157 +28,151 @@ */ public class MtabUtil { - private static final Logger log = LoggerFactory.getLogger(MtabUtil.class); + private static final Logger log = LoggerFactory.getLogger(MtabUtil.class); - private static final String MTAB_FILE_PATH = "/etc/mtab"; - - private static final int MTAB_DEVICE_INDEX = 0; - - private static final int MTAB_MOUNT_POINT_INDEX = 1; - - private static final int MTAB_FS_NAME_INDEX = 2; - - private static final int MTAB_MOUNT_OPTIONS_INDEX = 3; - - private static final int MTAB_DUMP_INDEX = 4; - - private static final int MTAB_FSC_ORDER_POSITION_INDEX = 5; - - public static String getFilePath() { - - return MTAB_FILE_PATH; - } - - /** - * @return the mtabDeviceIndex - */ - public static final int getMtabDeviceIndex() { - - return MTAB_DEVICE_INDEX; - } - - public static int getMountPointIndex() { - - return MTAB_MOUNT_POINT_INDEX; - } - - public static int getFsNameIndex() { - - return MTAB_FS_NAME_INDEX; - } - - /** - * @return the mtabMountOptionsIndex - */ - public static final int getMtabMountOptionsIndex() { - - return MTAB_MOUNT_OPTIONS_INDEX; - } - - /** - * @return the mtabDumpIndex - */ - public static final int getMtabDumpIndex() { - - return MTAB_DUMP_INDEX; - } - - /** - * @return the mtabFscOrderPositionIndex - */ - public static final int getMtabFscOrderPositionIndex() { - - return MTAB_FSC_ORDER_POSITION_INDEX; - } - - protected static boolean skipLineForMountPoints(String line) { - - return line.startsWith("#") || line.isEmpty(); - } - - public static Map getFSMountPoints() throws Exception { - - HashMap mountPointToFSMap = new HashMap(); - BufferedReader mtab = null; - try { - try { - mtab = new BufferedReader(new FileReader(getFilePath())); - } catch (FileNotFoundException e) { - log.error(e.getMessage(), e); - throw new Exception("Unable to get mount points. mtab file not found",e); - } - String line; - try { - while ((line = mtab.readLine()) != null) { - if (skipLineForMountPoints(line)) { - continue; - } - LinkedList elementsList = tokenizeLine(line); - if ((elementsList.size() - 1) < getMountPointIndex() - || (elementsList.size() - 1) < getFsNameIndex()) { - log.warn("FS mount point parsing error. " - + "Not enough elements found: {}. Skipping current line...", - elementsList); - } else { - mountPointToFSMap.put(elementsList.get(getMountPointIndex()), - elementsList.get(getFsNameIndex())); - } - } - } catch (IOException e) { - log.error(e.getMessage(), e); - throw new Exception( - "Unable to get mount points. Erro reading from mtab"); - } - } finally { - if (mtab != null) { - try { - mtab.close(); - } catch (IOException e) { - } - } - } - return mountPointToFSMap; - } - - public static List getRows() throws IOException { - - List rows = new ArrayList(); - BufferedReader mtab = new BufferedReader(new FileReader(getFilePath())); - String line; - while ((line = mtab.readLine()) != null) { - if (skipLineForMountPoints(line)) { - continue; - } - log.debug("mtab row from string {}", line); - MtabRow row = null; - try { - row = produceRow(line); - } catch (IllegalArgumentException e) { - log.warn("Skipping line {}. {}", line, e.getMessage(), e); - } - if (row != null) { - rows.add(row); - } - } - log.debug("Parsed {} mtab rows from file {}", - rows.size(), MTAB_FILE_PATH); - return rows; - } - - private static MtabRow produceRow(String line) - throws IllegalArgumentException { - - LinkedList elementsList = tokenizeLine(line); - return new MtabRow(elementsList); - } - - public static LinkedList tokenizeLine(String line) { - - String[] elementsArray = line.split(" "); - LinkedList elementsList = new LinkedList( - Arrays.asList(elementsArray)); - while (elementsList.remove("")) { - } - return elementsList; - } -} \ No newline at end of file + private static final String MTAB_FILE_PATH = "/etc/mtab"; + + private static final int MTAB_DEVICE_INDEX = 0; + + private static final int MTAB_MOUNT_POINT_INDEX = 1; + + private static final int MTAB_FS_NAME_INDEX = 2; + + private static final int MTAB_MOUNT_OPTIONS_INDEX = 3; + + private static final int MTAB_DUMP_INDEX = 4; + + private static final int MTAB_FSC_ORDER_POSITION_INDEX = 5; + + public static String getFilePath() { + + return MTAB_FILE_PATH; + } + + /** + * @return the mtabDeviceIndex + */ + public static final int getMtabDeviceIndex() { + + return MTAB_DEVICE_INDEX; + } + + public static int getMountPointIndex() { + + return MTAB_MOUNT_POINT_INDEX; + } + + public static int getFsNameIndex() { + + return MTAB_FS_NAME_INDEX; + } + + /** + * @return the mtabMountOptionsIndex + */ + public static final int getMtabMountOptionsIndex() { + + return MTAB_MOUNT_OPTIONS_INDEX; + } + + /** + * @return the mtabDumpIndex + */ + public static final int getMtabDumpIndex() { + + return MTAB_DUMP_INDEX; + } + + /** + * @return the mtabFscOrderPositionIndex + */ + public static final int getMtabFscOrderPositionIndex() { + + return MTAB_FSC_ORDER_POSITION_INDEX; + } + + protected static boolean skipLineForMountPoints(String line) { + + return line.startsWith("#") || line.isEmpty(); + } + + public static Map getFSMountPoints() throws Exception { + + HashMap mountPointToFSMap = new HashMap(); + BufferedReader mtab = null; + try { + mtab = new BufferedReader(new FileReader(getFilePath())); + + String line; + while ((line = mtab.readLine()) != null) { + if (skipLineForMountPoints(line)) { + continue; + } + LinkedList elementsList = tokenizeLine(line); + if ((elementsList.size() - 1) < getMountPointIndex() + || (elementsList.size() - 1) < getFsNameIndex()) { + log.warn("FS mount point parsing error. " + + "Not enough elements found: {}. Skipping current line...", elementsList); + } else { + mountPointToFSMap.put(elementsList.get(getMountPointIndex()), + elementsList.get(getFsNameIndex())); + } + } + } catch (FileNotFoundException e) { + log.error(e.getMessage(), e); + throw new Exception("Unable to get mount points. mtab file not found", e); + } catch (IOException e) { + log.error(e.getMessage(), e); + throw new Exception("Unable to get mount points. Erro reading from mtab"); + } finally { + if (mtab != null) { + try { + mtab.close(); + } catch (IOException e) { + e.printStackTrace(); + } + } + } + return mountPointToFSMap; + } + + public static List getRows() throws IOException { + + List rows = new ArrayList(); + BufferedReader mtab = new BufferedReader(new FileReader(getFilePath())); + String line; + while ((line = mtab.readLine()) != null) { + if (skipLineForMountPoints(line)) { + continue; + } + log.debug("mtab row from string {}", line); + MtabRow row = null; + try { + row = produceRow(line); + } catch (IllegalArgumentException e) { + log.warn("Skipping line {}. {}", line, e.getMessage(), e); + } + if (row != null) { + rows.add(row); + } + } + mtab.close(); + log.debug("Parsed {} mtab rows from file {}", rows.size(), MTAB_FILE_PATH); + return rows; + } + + private static MtabRow produceRow(String line) throws IllegalArgumentException { + + LinkedList elementsList = tokenizeLine(line); + return new MtabRow(elementsList); + } + + public static LinkedList tokenizeLine(String line) { + + String[] elementsArray = line.split(" "); + LinkedList elementsList = new LinkedList(Arrays.asList(elementsArray)); + while (elementsList.remove("")) { + } + return elementsList; + } +} diff --git a/src/main/java/it/grid/storm/filesystem/NullGPFSFilesystemException.java b/src/main/java/it/grid/storm/filesystem/NullGPFSFilesystemException.java deleted file mode 100644 index 9cb67287d..000000000 --- a/src/main/java/it/grid/storm/filesystem/NullGPFSFilesystemException.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.filesystem; - -/** - * Class that represents an Exception thrown by the GPFSSpaceSystem if it is - * instantited with a null GPFS filesystem - * - * @author EGRID - ICTP Trieste - * @version 1.0 - * @date May 2006 - */ -public class NullGPFSFilesystemException extends Exception { - - public String toString() { - - return "Supplied GPFS filesystem was null!"; - } -} diff --git a/src/main/java/it/grid/storm/filesystem/ReservationException.java b/src/main/java/it/grid/storm/filesystem/ReservationException.java index 7e8f948c3..d08fccb89 100644 --- a/src/main/java/it/grid/storm/filesystem/ReservationException.java +++ b/src/main/java/it/grid/storm/filesystem/ReservationException.java @@ -26,18 +26,23 @@ */ public class ReservationException extends Exception { - private String error = ""; - - /** - * Public constructor requiring a String explaining the nature of the error. - * If the String is null, then an empty one is used instead. - */ - public ReservationException(String error) { - - if (error != null) - this.error = error; - } - + /** + * + */ + private static final long serialVersionUID = 1L; + + private String error = ""; + + /** + * Public constructor requiring a String explaining the nature of the error. If the String is + * null, then an empty one is used instead. + */ + public ReservationException(String error) { + + if (error != null) + this.error = error; + } + public ReservationException(String error, Throwable cause) { super(error, cause); } @@ -46,6 +51,6 @@ public ReservationException(String error, Throwable cause) { public String toString() { - return error; - } + return error; + } } diff --git a/src/main/java/it/grid/storm/filesystem/SpaceSystemException.java b/src/main/java/it/grid/storm/filesystem/SpaceSystemException.java index b8e6b7758..9bbdcd3aa 100644 --- a/src/main/java/it/grid/storm/filesystem/SpaceSystemException.java +++ b/src/main/java/it/grid/storm/filesystem/SpaceSystemException.java @@ -18,8 +18,7 @@ package it.grid.storm.filesystem; /** - * Class that represents an Exception thrown whenever a SpaceSystem cannot be - * instantiated. + * Class that represents an Exception thrown whenever a SpaceSystem cannot be instantiated. * * @author EGRID - ICTP Trieste * @version 1.0 @@ -27,20 +26,25 @@ */ public class SpaceSystemException extends Exception { - private String explanation = ""; + /** + * + */ + private static final long serialVersionUID = 1L; - /** - * Constructor that requires a non-null String describing the problem - * encountered. If a null is supplied, then an empty String is used instead. - */ - public SpaceSystemException(String explanation) { + private String explanation = ""; - if (explanation != null) - this.explanation = explanation; - } + /** + * Constructor that requires a non-null String describing the problem encountered. If a null is + * supplied, then an empty String is used instead. + */ + public SpaceSystemException(String explanation) { - public String toString() { + if (explanation != null) + this.explanation = explanation; + } - return explanation; - } + public String toString() { + + return explanation; + } } diff --git a/src/main/java/it/grid/storm/filesystem/WrongFilesystemType.java b/src/main/java/it/grid/storm/filesystem/WrongFilesystemType.java deleted file mode 100644 index 14547b078..000000000 --- a/src/main/java/it/grid/storm/filesystem/WrongFilesystemType.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/** - * @file WrongFilesystemType.java - * @author Riccardo Murri - * - * Source code for class WrongFilesystemType - * - */ -/* - * Copyright (c) 2006, Riccardo Murri for the - * EGRID/INFN joint project StoRM. - * - * You may copy, distribute and modify this file under the terms of the - * LICENSE.txt file at the root of the StoRM backend source tree. - * - * $Id: WrongFilesystemType.java,v 1.1 2006/03/31 13:35:01 rmurri Exp $ - */ - -package it.grid.storm.filesystem; - -/** - * Thrown by genericfs subclasses ctors when the filesystem the passed pathname - * resides on, is not of a type supported by the class. - * - * Corresponds in usage to fs::wrong_filesystem_type exception thrown by C++ - * filesystem code. - * - * @see fs::wrong_filesystem_type - * - * @author Riccardo Murri - * @version $Revision: 1.1 $ - */ -public class WrongFilesystemType extends FilesystemError { - - public WrongFilesystemType(final String msg) { - - super(msg); - } -} diff --git a/src/main/java/it/grid/storm/filesystem/swig/test.java b/src/main/java/it/grid/storm/filesystem/swig/test.java index f027f28ef..ab47f48dd 100644 --- a/src/main/java/it/grid/storm/filesystem/swig/test.java +++ b/src/main/java/it/grid/storm/filesystem/swig/test.java @@ -3,21 +3,20 @@ import it.grid.storm.ea.StormEA; import it.grid.storm.filesystem.AclNotSupported; import it.grid.storm.filesystem.FilesystemError; -import it.grid.storm.filesystem.swig.posixfs; public class test extends posixfs { public test(String mntpath) throws AclNotSupported, FilesystemError { - super(mntpath); + super(mntpath); } protected test(long cPtr, boolean cMemoryOwn) { - super(cPtr, cMemoryOwn); + super(cPtr, cMemoryOwn); } @Override public boolean is_file_on_disk(String filename) throws it.grid.storm.filesystem.FilesystemError { - return StormEA.getOnline(filename); + return StormEA.getOnline(filename); } } diff --git a/src/main/java/it/grid/storm/griduser/CannotMapUserException.java b/src/main/java/it/grid/storm/griduser/CannotMapUserException.java index cd66ce979..57620cffe 100644 --- a/src/main/java/it/grid/storm/griduser/CannotMapUserException.java +++ b/src/main/java/it/grid/storm/griduser/CannotMapUserException.java @@ -22,24 +22,29 @@ */ public class CannotMapUserException extends GridUserException { - public CannotMapUserException() { + /** + * + */ + private static final long serialVersionUID = 1L; - super(); - } + public CannotMapUserException() { - public CannotMapUserException(String message) { + super(); + } - super(message); - } + public CannotMapUserException(String message) { - public CannotMapUserException(String message, Throwable cause) { + super(message); + } - super(message, cause); - } + public CannotMapUserException(String message, Throwable cause) { - public CannotMapUserException(Throwable cause) { + super(message, cause); + } - super(cause); - } + public CannotMapUserException(Throwable cause) { + + super(cause); + } } diff --git a/src/main/java/it/grid/storm/griduser/ExactDnMatch.java b/src/main/java/it/grid/storm/griduser/ExactDnMatch.java deleted file mode 100644 index f16f11881..000000000 --- a/src/main/java/it/grid/storm/griduser/ExactDnMatch.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.griduser; - -/** - * Match a proxy DN against a fixed-string pattern. - */ -public class ExactDnMatch implements DnMatch { - - /** - * Return true if fixedDn is the initial segment of - * proxyDn, and the remaining part of proxyDn - * consists only of "CN=..." fields. (That is, proxyDn may be a - * proxy DN derived from fixedDn by a proxy delegation process, - * according to RFC3820. - * - * @return true if the DNs do match. - */ - public boolean match(final String proxyDn, final String fixedDn) { - - assert (null != proxyDn); - assert (null != fixedDn); - - if (!proxyDn.startsWith(fixedDn)) - return false; - - if (!(proxyDn.charAt(1 + fixedDn.length()) == '/')) - /* fixedDn did not match up to DN field boundary, fail */ - return false; - - final String[] tails = proxyDn.substring(fixedDn.length()).split("/"); - for (int i = 1; i < tails.length; i++) - if (!tails[i].toUpperCase().startsWith("CN=")) - return false; - - return true; - } -} diff --git a/src/main/java/it/grid/storm/griduser/GridUserException.java b/src/main/java/it/grid/storm/griduser/GridUserException.java index 268670b23..06d126d8d 100644 --- a/src/main/java/it/grid/storm/griduser/GridUserException.java +++ b/src/main/java/it/grid/storm/griduser/GridUserException.java @@ -22,24 +22,29 @@ */ public class GridUserException extends Exception { - public GridUserException() { + /** + * + */ + private static final long serialVersionUID = 1L; - super(); - } + public GridUserException() { - public GridUserException(String message) { + super(); + } - super(message); - } + public GridUserException(String message) { - public GridUserException(String message, Throwable cause) { + super(message); + } - super(message, cause); - } + public GridUserException(String message, Throwable cause) { - public GridUserException(Throwable cause) { + super(message, cause); + } - super(cause); - } + public GridUserException(Throwable cause) { + + super(cause); + } } diff --git a/src/main/java/it/grid/storm/griduser/InvalidFqanSyntax.java b/src/main/java/it/grid/storm/griduser/InvalidFqanSyntax.java index b064a2bf6..f96e1b8e4 100644 --- a/src/main/java/it/grid/storm/griduser/InvalidFqanSyntax.java +++ b/src/main/java/it/grid/storm/griduser/InvalidFqanSyntax.java @@ -19,40 +19,44 @@ /** * Thrown when a invalid FQAN is detected by the - * {@link it.grid.storm.griduser.VomsGridUser#VomsGridUser(String,String[])} - * constructor. Holds and returns the offending FQAN string. + * {@link it.grid.storm.griduser.VomsGridUser#VomsGridUser(String,String[])} constructor. Holds and + * returns the offending FQAN string. */ public class InvalidFqanSyntax extends GridUserException { - /** The FQAN string that does not match the FQAN regexp */ - protected final String _offendingFqan; + /** + * + */ + private static final long serialVersionUID = 1L; - /** - * Constructor, with the offending FQAN and a separate exception message. - */ - public InvalidFqanSyntax(String offendingFqan, String message) { + /** The FQAN string that does not match the FQAN regexp */ + protected final String _offendingFqan; - super(message); + /** + * Constructor, with the offending FQAN and a separate exception message. + */ + public InvalidFqanSyntax(String offendingFqan, String message) { - assert (null == offendingFqan) : "Null string passed to InvalidFqanSyntax constructor"; + super(message); - _offendingFqan = offendingFqan; - } + assert (null == offendingFqan) : "Null string passed to InvalidFqanSyntax constructor"; - /** - * Constructor, specifying the offending FQAN only. A standard message is - * constructed. - */ - public InvalidFqanSyntax(String offendingFqan) { + _offendingFqan = offendingFqan; + } - // damn Java syntax, we cannot check offendingFqan before this... - super("Invalid FQAN: " + offendingFqan); + /** + * Constructor, specifying the offending FQAN only. A standard message is constructed. + */ + public InvalidFqanSyntax(String offendingFqan) { - _offendingFqan = offendingFqan; - } + // damn Java syntax, we cannot check offendingFqan before this... + super("Invalid FQAN: " + offendingFqan); - public String getOffendingFqan() { + _offendingFqan = offendingFqan; + } - return _offendingFqan; - } + public String getOffendingFqan() { + + return _offendingFqan; + } } diff --git a/src/main/java/it/grid/storm/griduser/InvalidGridUserAttributesException.java b/src/main/java/it/grid/storm/griduser/InvalidGridUserAttributesException.java deleted file mode 100644 index 2664a0f55..000000000 --- a/src/main/java/it/grid/storm/griduser/InvalidGridUserAttributesException.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.griduser; - -import java.io.Serializable; - -public class InvalidGridUserAttributesException extends Exception implements - Serializable { -} diff --git a/src/main/java/it/grid/storm/griduser/LcmapsJNAMapper.java b/src/main/java/it/grid/storm/griduser/LcmapsJNAMapper.java index e6508a0d8..219978ae0 100644 --- a/src/main/java/it/grid/storm/griduser/LcmapsJNAMapper.java +++ b/src/main/java/it/grid/storm/griduser/LcmapsJNAMapper.java @@ -16,140 +16,121 @@ */ package it.grid.storm.griduser; -import it.grid.storm.griduser.CannotMapUserException; -import it.grid.storm.griduser.LocalUser; -import it.grid.storm.jna.lcmaps.LcmapsAccountInterface; -import it.grid.storm.jna.lcmaps.LcmapsInterface; -import it.grid.storm.jna.lcmaps.LcmapsPoolindexInterface; -import it.grid.storm.jna.lcmaps.lcmaps_account_info_t; - import org.apache.commons.lang.ArrayUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import com.sun.jna.LastErrorException; +import it.grid.storm.jna.lcmaps.LcmapsAccountInterface; +import it.grid.storm.jna.lcmaps.LcmapsInterface; +import it.grid.storm.jna.lcmaps.LcmapsPoolindexInterface; +import it.grid.storm.jna.lcmaps.lcmaps_account_info_t; + /** * @author dibenedetto_m * */ public class LcmapsJNAMapper implements MapperInterface { - private static final Object lock = new Object(); - - private static final Logger log = LoggerFactory - .getLogger(LcmapsJNAMapper.class); - - private lcmaps_account_info_t account = new lcmaps_account_info_t(); - - private final String LCMAPS_DEFAULT_LOG_FILE = "/var/log/lcmaps.log"; - - private final String LCMAPS_LOG_FILE_PATH_ENV_VARIABLE = "LCMAPS_LOG_FILE"; - - private final short LCMAPS_LOG_TYPE = 3; - - /** - * @return - */ - private String getLcmapsLogFile() { - - String lcmaps_log_file = System.getenv(LCMAPS_LOG_FILE_PATH_ENV_VARIABLE); - if (lcmaps_log_file == null) { - lcmaps_log_file = LCMAPS_DEFAULT_LOG_FILE; - } - return lcmaps_log_file.trim(); - } - - public LocalUser map(String dn, String[] fqans) throws CannotMapUserException { - - LocalUser mappedUser = null; - synchronized (LcmapsJNAMapper.lock) { - log.debug("Mapping user with dn = {} and fqans='{}'", - dn, ArrayUtils.toString(fqans)); - - log.debug("Initializing Lcmaps"); - String lcmapsLogFile = getLcmapsLogFile(); - log.debug("Lcmaps log file is {}", lcmapsLogFile); - - int retVal = LcmapsInterface.INSTANCE.lcmaps_init_and_logfile( - lcmapsLogFile, null, LCMAPS_LOG_TYPE); - if (retVal != 0) { - log.error("Unable to initialize lcmaps. Return value is {}" , retVal); - throw new CannotMapUserException( - "Unable to initialize lcmaps. Return value is " + retVal); - } - retVal = LcmapsAccountInterface.INSTANCE - .lcmaps_account_info_init(account); - if (retVal != 0) { - throw new CannotMapUserException( - "Unable to initialize lcmaps. Return value is " + retVal); - } - int numFqans = (fqans == null ? 0 : fqans.length); - try { - retVal = LcmapsPoolindexInterface.INSTANCE - .lcmaps_return_account_without_gsi(dn, fqans, numFqans, 0, account); - } catch (LastErrorException e) { - log.error("Unable to map user dn <{}> fqans <{}>. Error: {}. Error code: {}", - dn, ArrayUtils.toString(fqans), - e.getMessage(), - e.getErrorCode(), - e); - throw new CannotMapUserException( - "Unable to initialize lcmaps. Return value is " + retVal); - } - if (retVal != 0) { - log.error("Unable to map user dn <{}> fqans <{}>. Retval: {}", - dn, ArrayUtils.toString(fqans), - retVal); - throw new CannotMapUserException("Unable to map user dn <" + dn - + "> fqans <" + ArrayUtils.toString(fqans) + "> . Return value is " - + retVal); - } - - if (account.uid < 0) { - log.error("Negative uid returned by lcmaps: {}", account.uid); - throw new CannotMapUserException( - "Unacceptable lower than zero uid returned by Lcmaps : " - + account.uid + " . Mapping error"); - } - if (account.npgid < 0 || account.nsgid < 0) { - log.error("Negative primary or secondary gid array size. npgid: {} nsgid: {}", - account.npgid, account.nsgid); - - throw new CannotMapUserException( - "Negative primary or secondary gid array size returned by Lcmaps : primary = " - + account.npgid - + ", secondary = " - + account.nsgid +". Mapping error"); - } - int[] gids = null; - int numGids = account.npgid + account.nsgid; - if (numGids > account.npgid) { - gids = new int[numGids]; - int index = 0; - if (account.npgid > 0) { - for (int gid : account.pgid_list.getPointer().getIntArray(0, - account.npgid)) { - gids[index] = gid; - index++; - } - } else { - log.warn("No primary gid returned by Lcmaps! Mapping error"); - } - for (int gid : account.sgid_list.getPointer().getIntArray(0, - account.nsgid)) { - gids[index] = gid; - index++; - } - } else { - if (account.npgid > 0) { - gids = account.pgid_list.getPointer().getIntArray(0, account.npgid); - } - } - log.info("Mapped user to : ", - account.uid, - ArrayUtils.toString(gids)); - mappedUser = new LocalUser(account.uid, gids, numGids); - } - return mappedUser; - } -} \ No newline at end of file + private static final Object lock = new Object(); + + private static final Logger log = LoggerFactory.getLogger(LcmapsJNAMapper.class); + + private lcmaps_account_info_t account = new lcmaps_account_info_t(); + + private final String LCMAPS_DEFAULT_LOG_FILE = "/var/log/lcmaps.log"; + + private final String LCMAPS_LOG_FILE_PATH_ENV_VARIABLE = "LCMAPS_LOG_FILE"; + + private final short LCMAPS_LOG_TYPE = 3; + + /** + * @return + */ + private String getLcmapsLogFile() { + + String lcmaps_log_file = System.getenv(LCMAPS_LOG_FILE_PATH_ENV_VARIABLE); + if (lcmaps_log_file == null) { + lcmaps_log_file = LCMAPS_DEFAULT_LOG_FILE; + } + return lcmaps_log_file.trim(); + } + + public LocalUser map(String dn, String[] fqans) throws CannotMapUserException { + + LocalUser mappedUser = null; + synchronized (LcmapsJNAMapper.lock) { + log.debug("Mapping user with dn = {} and fqans='{}'", dn, ArrayUtils.toString(fqans)); + + log.debug("Initializing Lcmaps"); + String lcmapsLogFile = getLcmapsLogFile(); + log.debug("Lcmaps log file is {}", lcmapsLogFile); + + int retVal = + LcmapsInterface.INSTANCE.lcmaps_init_and_logfile(lcmapsLogFile, null, LCMAPS_LOG_TYPE); + if (retVal != 0) { + log.error("Unable to initialize lcmaps. Return value is {}", retVal); + throw new CannotMapUserException("Unable to initialize lcmaps. Return value is " + retVal); + } + retVal = LcmapsAccountInterface.INSTANCE.lcmaps_account_info_init(account); + if (retVal != 0) { + throw new CannotMapUserException("Unable to initialize lcmaps. Return value is " + retVal); + } + int numFqans = (fqans == null ? 0 : fqans.length); + try { + retVal = LcmapsPoolindexInterface.INSTANCE.lcmaps_return_account_without_gsi(dn, fqans, + numFqans, 0, account); + } catch (LastErrorException e) { + log.error("Unable to map user dn <{}> fqans <{}>. Error: {}. Error code: {}", dn, + ArrayUtils.toString(fqans), e.getMessage(), e.getErrorCode(), e); + throw new CannotMapUserException("Unable to initialize lcmaps. Return value is " + retVal); + } + if (retVal != 0) { + log.error("Unable to map user dn <{}> fqans <{}>. Retval: {}", dn, + ArrayUtils.toString(fqans), retVal); + throw new CannotMapUserException("Unable to map user dn <" + dn + "> fqans <" + + ArrayUtils.toString(fqans) + "> . Return value is " + retVal); + } + + if (account.uid < 0) { + log.error("Negative uid returned by lcmaps: {}", account.uid); + throw new CannotMapUserException("Unacceptable lower than zero uid returned by Lcmaps : " + + account.uid + " . Mapping error"); + } + if (account.npgid < 0 || account.nsgid < 0) { + log.error("Negative primary or secondary gid array size. npgid: {} nsgid: {}", + account.npgid, account.nsgid); + + throw new CannotMapUserException( + "Negative primary or secondary gid array size returned by Lcmaps : primary = " + + account.npgid + ", secondary = " + account.nsgid + ". Mapping error"); + } + int[] gids = null; + int numGids = account.npgid + account.nsgid; + if (numGids > account.npgid) { + gids = new int[numGids]; + int index = 0; + if (account.npgid > 0) { + for (int gid : account.pgid_list.getPointer().getIntArray(0, account.npgid)) { + gids[index] = gid; + index++; + } + } else { + log.warn("No primary gid returned by Lcmaps! Mapping error"); + } + for (int gid : account.sgid_list.getPointer().getIntArray(0, account.nsgid)) { + gids[index] = gid; + index++; + } + } else { + if (account.npgid > 0) { + gids = account.pgid_list.getPointer().getIntArray(0, account.npgid); + } + } + log.info("Mapped user to : ", account.uid, ArrayUtils.toString(gids)); + mappedUser = new LocalUser(account.uid, gids, numGids); + } + return mappedUser; + } +} diff --git a/src/main/java/it/grid/storm/griduser/MapperInterface.java b/src/main/java/it/grid/storm/griduser/MapperInterface.java index 8199585f2..924d5ee24 100644 --- a/src/main/java/it/grid/storm/griduser/MapperInterface.java +++ b/src/main/java/it/grid/storm/griduser/MapperInterface.java @@ -17,11 +17,7 @@ package it.grid.storm.griduser; -import it.grid.storm.griduser.CannotMapUserException; -import it.grid.storm.griduser.LocalUser; - interface MapperInterface { - public LocalUser map(final String dn, final String[] fqans) - throws CannotMapUserException; + public LocalUser map(final String dn, final String[] fqans) throws CannotMapUserException; } diff --git a/src/main/java/it/grid/storm/griduser/StormLcmapsJNAMapper.java b/src/main/java/it/grid/storm/griduser/StormLcmapsJNAMapper.java index bf813c729..c17ada499 100644 --- a/src/main/java/it/grid/storm/griduser/StormLcmapsJNAMapper.java +++ b/src/main/java/it/grid/storm/griduser/StormLcmapsJNAMapper.java @@ -17,74 +17,68 @@ package it.grid.storm.griduser; import java.nio.IntBuffer; -import it.grid.storm.griduser.CannotMapUserException; -import it.grid.storm.griduser.LocalUser; -import it.grid.storm.jna.lcmaps.StormLcmapsLibrary; -import it.grid.storm.jna.lcmaps.StormLcmapsLibrary.Errors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import it.grid.storm.jna.lcmaps.StormLcmapsLibrary; +import it.grid.storm.jna.lcmaps.StormLcmapsLibrary.Errors; + /** * @author Michele Dibenedetto * */ public class StormLcmapsJNAMapper implements MapperInterface { - private static final Logger log = LoggerFactory - .getLogger(StormLcmapsJNAMapper.class); - - private final Object lock = new Object(); - - private final String LCMAPS_DEFAULT_LOG_FILE = "/var/log/lcmaps.log"; - - private final String LCMAPS_LOG_FILE_PATH_ENV_VARIABLE = "LCMAPS_LOG_FILE"; - - private static final StormLcmapsJNAMapper instance = new StormLcmapsJNAMapper(); - - private StormLcmapsJNAMapper() { - - } - - public static StormLcmapsJNAMapper getInstance() { - - return instance; - } - - private String getLcmapsLogFile() { - - String lcmapsLogFile = System.getenv(LCMAPS_LOG_FILE_PATH_ENV_VARIABLE); - if (lcmapsLogFile == null) { - lcmapsLogFile = LCMAPS_DEFAULT_LOG_FILE; - } - return lcmapsLogFile.trim(); - } - - public LocalUser map(final String dn, final String[] fqans) - throws CannotMapUserException { - - IntBuffer userId = IntBuffer.allocate(1), groupId = IntBuffer.allocate(1); - int retVal; - synchronized (lock) { - retVal = StormLcmapsLibrary.INSTANCE.map_user(getLcmapsLogFile(), dn, - fqans, 1, userId, groupId); - } - if (retVal != 0) { - Errors error = StormLcmapsLibrary.Errors.getError(retVal); - if (!error.equals(Errors.UNKNOW_ERROR)) { - log - .error("Unable to call successfully native map_user() method. " - + "Return value is {}", error); - } else { - log - .error("Unable to call successfully native map_user() method. " - + "Unknown return value: {}", retVal); - } - throw new CannotMapUserException( - "LCMAPS error, cannot map user credentials to local user."); - } - LocalUser localUser = new LocalUser(userId.get(), - new int[] { groupId.get() }, 1); - return localUser; - } -} \ No newline at end of file + private static final Logger log = LoggerFactory.getLogger(StormLcmapsJNAMapper.class); + + private final Object lock = new Object(); + + private final String LCMAPS_DEFAULT_LOG_FILE = "/var/log/lcmaps.log"; + + private final String LCMAPS_LOG_FILE_PATH_ENV_VARIABLE = "LCMAPS_LOG_FILE"; + + private static final StormLcmapsJNAMapper instance = new StormLcmapsJNAMapper(); + + private StormLcmapsJNAMapper() { + + } + + public static StormLcmapsJNAMapper getInstance() { + + return instance; + } + + private String getLcmapsLogFile() { + + String lcmapsLogFile = System.getenv(LCMAPS_LOG_FILE_PATH_ENV_VARIABLE); + if (lcmapsLogFile == null) { + lcmapsLogFile = LCMAPS_DEFAULT_LOG_FILE; + } + return lcmapsLogFile.trim(); + } + + public LocalUser map(final String dn, final String[] fqans) throws CannotMapUserException { + + IntBuffer userId = IntBuffer.allocate(1), groupId = IntBuffer.allocate(1); + int retVal; + synchronized (lock) { + retVal = + StormLcmapsLibrary.INSTANCE.map_user(getLcmapsLogFile(), dn, fqans, 1, userId, groupId); + } + if (retVal != 0) { + Errors error = StormLcmapsLibrary.Errors.getError(retVal); + if (!error.equals(Errors.UNKNOW_ERROR)) { + log.error("Unable to call successfully native map_user() method. " + "Return value is {}", + error); + } else { + log.error( + "Unable to call successfully native map_user() method. " + "Unknown return value: {}", + retVal); + } + throw new CannotMapUserException("LCMAPS error, cannot map user credentials to local user."); + } + LocalUser localUser = new LocalUser(userId.get(), new int[] {groupId.get()}, 1); + return localUser; + } +} diff --git a/src/main/java/it/grid/storm/health/HealthDirector.java b/src/main/java/it/grid/storm/health/HealthDirector.java index c1c0c2fe8..484aac986 100644 --- a/src/main/java/it/grid/storm/health/HealthDirector.java +++ b/src/main/java/it/grid/storm/health/HealthDirector.java @@ -17,239 +17,143 @@ package it.grid.storm.health; +import org.slf4j.Logger; + import it.grid.storm.config.Configuration; import it.grid.storm.logging.StoRMLoggers; -import java.text.SimpleDateFormat; -import java.util.Date; +public class HealthDirector { -import org.slf4j.Logger; + public static final Logger LOGGER = StoRMLoggers.getHBLogger(); + public static final Logger HEARTLOG = StoRMLoggers.getHBLogger(); + private static final Logger BOOKKEEPING = StoRMLoggers.getBKLogger(); + private static final Logger PERFLOG = StoRMLoggers.getPerfLogger(); -public class HealthDirector { + private static boolean initialized = false; + private static HealthMonitor healthMonitorIstance = null; + private static boolean bookKeepingConfigured = false; + private static boolean bookKeepingEnabled = false; + + private static boolean performanceMonitorConfigured = false; + private static boolean performanceMonitorEnabled = false; + + private static long bornInstant = -1L; + + public static int timeToLiveLogEventInSec = + Configuration.getInstance().getHearthbeatPerformanceLogbookTimeInterval(); + + public static void initializeDirector() { + + bookKeepingEnabled = Configuration.getInstance().isHearthbeatBookkeepingEnabled(); + if (bookKeepingEnabled) { + bookKeepingConfigured = true; + } + + int statusPeriod = Configuration.getInstance().getHearthbeatPeriod(); + + bornInstant = System.currentTimeMillis(); + healthMonitorIstance = new HealthMonitor(1, statusPeriod); + + // Setting performance rate + performanceMonitorEnabled = Configuration.getInstance().isHearthbeatPerformanceMeasuringEnabled(); + if (performanceMonitorEnabled) { + int glanceTimeInterval = Configuration.getInstance().getHearthbeatPerformanceGlanceTimeInterval(); + + LOGGER.debug("----- Performance GLANCE Time Interval = {}", glanceTimeInterval); + LOGGER.debug("----- Performance LOGBOOK Time Interval = {}", timeToLiveLogEventInSec); + + healthMonitorIstance.initializePerformanceMonitor(timeToLiveLogEventInSec, + glanceTimeInterval); + + } + + initialized = true; + + } + + /** + * + * @return Logger + */ + public static Logger getLogger() { + + return LOGGER; + } + + /** + * + * @return Logger + */ + public static Logger getHealthLogger() { + + return HEARTLOG; + } + + /** + * + * @return Logger + */ + public static Logger getBookkeepingLogger() { + + return BOOKKEEPING; + } + + /** + * + * @return Logger + */ + public static Logger getPerformanceLogger() { + + return PERFLOG; + } + + public static boolean isBookKeepingConfigured() { + + return bookKeepingConfigured; + } + + public static boolean isBookKeepingEnabled() { + + return bookKeepingEnabled; + } + + public static boolean isPerformanceMonitorConfigured() { + + return performanceMonitorConfigured; + } + + public static boolean isPerformanceMonitorEnabled() { + + return performanceMonitorEnabled; + } - public static final Logger LOGGER = StoRMLoggers.getHBLogger(); - public static final Logger HEARTLOG = StoRMLoggers.getHBLogger(); - private static final Logger BOOKKEEPING = StoRMLoggers.getBKLogger(); - private static final Logger PERFLOG = StoRMLoggers.getPerfLogger(); - - private static boolean initialized = false; - private static HealthMonitor healthMonitorIstance = null; - private static boolean bookKeepingConfigured = false; - private static boolean bookKeepingEnabled = false; - - private static boolean performanceMonitorConfigured = false; - private static boolean performanceMonitorEnabled = false; - - private static long bornInstant = -1L; - private static String bornInstantStr = null; - - public static int timeToLiveLogEventInSec = Configuration.getInstance() - .getPerformanceLogbookTimeInterval(); - - /** - * - * @param testingMode - * boolean - */ - public static void initializeDirector(boolean testingMode) { - - // configureHealthLog(testingMode); - - bookKeepingEnabled = Configuration.getInstance().getBookKeepingEnabled(); - if (bookKeepingEnabled) { - // configureBookKeeping(testingMode); - bookKeepingConfigured = true; - } - - int statusPeriod = Configuration.getInstance().getHearthbeatPeriod(); - if (testingMode) { - statusPeriod = 5; - } - - // Record the born of StoRM instance - bornInstant = System.currentTimeMillis(); - Date date = new Date(bornInstant); - SimpleDateFormat formatter = new SimpleDateFormat("yyyy.MM.dd HH.mm.ss"); - bornInstantStr = formatter.format(date); - - healthMonitorIstance = new HealthMonitor(1, statusPeriod); // Start after 1 - // sec - - // Setting performance rate - performanceMonitorEnabled = Configuration.getInstance() - .getPerformanceMeasuring(); - if (performanceMonitorEnabled) { - // configurePerformanceMonitor(testingMode); - int glanceTimeInterval = Configuration.getInstance() - .getPerformanceGlanceTimeInterval(); - - LOGGER.debug("----- Performance GLANCE Time Interval = " - + glanceTimeInterval); - LOGGER.debug("----- Performance LOGBOOK Time Interval = " - + timeToLiveLogEventInSec); - - healthMonitorIstance.initializePerformanceMonitor( - timeToLiveLogEventInSec, glanceTimeInterval); - - } - - initialized = true; - - } - - - private static String getHealthPatternLayout() { - - /** - * @todo : Retrieve Patter Layout from Configuration .. - */ - String pattern = "[%d{ISO8601}]: %m%n"; - return pattern; - } - - /** - * @return String - */ - private static String getBookKeppingPatternLayout() { - - /** - * @todo : Retrieve Patter Layout from Configuration .. - */ - String pattern = "[%d{ISO8601}]: %-5p [%t] %x -%m%n"; - return pattern; - } - - /** - * @return String - */ - private static String getPerformanceMonitoringPatternLayout() { - - /** - * @todo : Retrieve Patter Layout from Configuration .. - */ - String pattern = "[%d{ISO8601}]: %m%n"; - return pattern; - } - - /** - * - * @return Logger - */ - public static Logger getLogger() { - - return LOGGER; - } - - /** - * - * @return Logger - */ - public static Logger getHealthLogger() { - - return HEARTLOG; - } - - /** - * - * @return Logger - */ - public static Logger getBookkeepingLogger() { - - return BOOKKEEPING; - } - - /** - * - * @return Logger - */ - public static Logger getPerformanceLogger() { - - return PERFLOG; - } - - public static boolean isBookKeepingConfigured() { - - return bookKeepingConfigured; - } - - public static boolean isBookKeepingEnabled() { - - return bookKeepingEnabled; - } - - public static boolean isPerformanceMonitorConfigured() { - - return performanceMonitorConfigured; - } - - public static boolean isPerformanceMonitorEnabled() { - - return performanceMonitorEnabled; - } - - /** - * - * @return Logger - */ - public static Logger getBookKeepingLogger() { - - return BOOKKEEPING; - } - - /** - * - * @return Namespace - */ - public static HealthMonitor getHealthMonitor() { - - if (!(initialized)) { - initializeDirector(false); - } - return healthMonitorIstance; - } - - /** - * - * @return Namespace - */ - public static HealthMonitor getHealthMonitor(boolean testingMode) { - - if (!(initialized)) { - initializeDirector(testingMode); - } - return healthMonitorIstance; - } - - public static long getBornInstant(boolean testingMode) { - - if (!(initialized)) { - initializeDirector(testingMode); - } - return bornInstant; - } - - public static String getBornInstantStr(boolean testingMode) { + /** + * + * @return Logger + */ + public static Logger getBookKeepingLogger() { - if (!(initialized)) { - initializeDirector(testingMode); - } - return bornInstantStr; - } + return BOOKKEEPING; + } - public static long getBornInstant() { + /** + * + * @return Namespace + */ + public static HealthMonitor getHealthMonitor() { - if (!(initialized)) { - initializeDirector(false); - } - return bornInstant; - } + if (!(initialized)) { + initializeDirector(); + } + return healthMonitorIstance; + } - public static String getBornInstantStr() { + public static long getBornInstant() { - if (!(initialized)) { - initializeDirector(false); - } - return bornInstantStr; - } + if (!(initialized)) { + initializeDirector(); + } + return bornInstant; + } } diff --git a/src/main/java/it/grid/storm/info/SpaceInfoManager.java b/src/main/java/it/grid/storm/info/SpaceInfoManager.java index 8b88113a2..d2f9f19ba 100644 --- a/src/main/java/it/grid/storm/info/SpaceInfoManager.java +++ b/src/main/java/it/grid/storm/info/SpaceInfoManager.java @@ -17,8 +17,6 @@ package it.grid.storm.info; -import static it.grid.storm.config.Configuration.DISKUSAGE_SERVICE_ENABLED; - import java.io.FileNotFoundException; import java.util.List; @@ -31,9 +29,8 @@ import it.grid.storm.catalogs.ReservedSpaceCatalog; import it.grid.storm.common.types.SizeUnit; import it.grid.storm.config.Configuration; -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.NamespaceInterface; -import it.grid.storm.namespace.VirtualFSInterface; +import it.grid.storm.namespace.Namespace; +import it.grid.storm.namespace.model.VirtualFS; import it.grid.storm.persistence.exceptions.DataAccessException; import it.grid.storm.space.StorageSpaceData; import it.grid.storm.space.gpfsquota.GPFSQuotaManager; @@ -47,14 +44,14 @@ public class SpaceInfoManager { private static final SpaceInfoManager instance = new SpaceInfoManager(); private static final String USED_SPACE_INI_FILEPATH = - Configuration.getInstance().configurationDir() + "/used-space.ini".replaceAll("/+", "/"); + Configuration.getInstance().getConfigurationDir() + "/used-space.ini".replaceAll("/+", "/"); private static final Logger log = LoggerFactory.getLogger(SpaceInfoManager.class); // Reference to the Catalog - private final ReservedSpaceCatalog spaceCatalog = new ReservedSpaceCatalog(); - // Reference to the NamespaceDirector - private final NamespaceInterface namespace = NamespaceDirector.getNamespace(); + private final ReservedSpaceCatalog spaceCatalog = ReservedSpaceCatalog.getInstance(); + // Reference to the Namespace + private final Namespace namespace = Namespace.getInstance(); private SpaceInfoManager() {} @@ -84,14 +81,12 @@ public void initializeUsedSpace() { return; } - if (Configuration.getInstance().getDiskUsageServiceEnabled()) { + if (Configuration.getInstance().isDiskUsageServiceEnabled()) { log.info("The remaining {} storage spaces will be initialized by DiskUsage service", ssni.size()); } else { - log.warn( - "The remaining {} storage spaces WON'T be initialized with DUs. " - + "Please enable DiskUsage service by setting '{}' as true.", - ssni.size(), DISKUSAGE_SERVICE_ENABLED); + log.warn("The remaining {} storage spaces WON'T be initialized with DUs. " + + "Please enable DiskUsage service.", ssni.size()); } } @@ -108,8 +103,8 @@ public List retrieveSSDtoInitializeWithQuota() { // Dispatch SA to compute in two categories: Quota and DU tasks List ssdSet = Lists.newArrayList(); - List vfsSet = namespace.getVFSWithQuotaEnabled(); - for (VirtualFSInterface vfsEntry : vfsSet) { + List vfsSet = namespace.getVFSWithQuotaEnabled(); + for (VirtualFS vfsEntry : vfsSet) { String spaceTokenDesc = vfsEntry.getSpaceTokenDescription(); StorageSpaceData ssd = spaceCatalog.getStorageSpaceByAlias(spaceTokenDesc); ssdSet.add(ssd); diff --git a/src/main/java/it/grid/storm/info/du/DiskUsageService.java b/src/main/java/it/grid/storm/info/du/DiskUsageService.java index edced7c6d..f781c53da 100644 --- a/src/main/java/it/grid/storm/info/du/DiskUsageService.java +++ b/src/main/java/it/grid/storm/info/du/DiskUsageService.java @@ -11,25 +11,21 @@ import com.google.common.base.Preconditions; import com.google.common.collect.Lists; -import it.grid.storm.namespace.VirtualFSInterface; +import it.grid.storm.namespace.model.VirtualFS; public class DiskUsageService { - public static final int DEFAULT_INITIAL_DELAY = 0; - public static final int DEFAULT_TASKS_INTERVAL = 604800; - public static final boolean DEFAULT_TASKS_PARALLEL = false; - private static final Logger log = LoggerFactory.getLogger(DiskUsageService.class); - private List monitoredSAs; + private List monitoredSAs; private ScheduledExecutorService executor; private boolean running; private int delay; - private int period; + private long period; - private DiskUsageService(List vfss, ScheduledExecutorService executor, - int delay, int period) { + private DiskUsageService(List vfss, ScheduledExecutorService executor, + int delay, long period) { Preconditions.checkNotNull(vfss, "Invalid null list of Virtual FS"); Preconditions.checkNotNull(executor, "Invalid null scheduled executor service"); @@ -41,17 +37,12 @@ private DiskUsageService(List vfss, ScheduledExecutorService this.period = period; } - private DiskUsageService(List vfss, ScheduledExecutorService executor) { - - this(vfss, executor, DEFAULT_INITIAL_DELAY, DEFAULT_TASKS_INTERVAL); - } - public int getDelay() { return delay; } - public int getPeriod() { + public long getPeriod() { return period; } @@ -61,47 +52,28 @@ public void setDelay(int delay) { this.delay = delay; } - public void setPeriod(int period) { + public void setPeriod(long period) { this.period = period; } - public static DiskUsageService getSingleThreadScheduledService(List vfss) { - - return new DiskUsageService(vfss, Executors.newSingleThreadScheduledExecutor()); - } - - public static DiskUsageService getSingleThreadScheduledService() { - - return getSingleThreadScheduledService(Lists.newArrayList()); - } - - public static DiskUsageService getScheduledThreadPoolService(List vfss, - int poolSize) { - - return new DiskUsageService(vfss, Executors.newScheduledThreadPool(poolSize)); - } - - public static DiskUsageService getScheduledThreadPoolService(List vfss) { + public static DiskUsageService getSingleThreadScheduledService(List vfss, + int delay, long period) { - return new DiskUsageService(vfss, Executors.newScheduledThreadPool(vfss.size())); + return new DiskUsageService(vfss, Executors.newSingleThreadScheduledExecutor(), delay, period); } - public static DiskUsageService getScheduledThreadPoolService(int poolSize) { + public static DiskUsageService getScheduledThreadPoolService(List vfss, + int delay, long period) { - return getScheduledThreadPoolService(Lists.newArrayList(), poolSize); + return new DiskUsageService(vfss, Executors.newScheduledThreadPool(vfss.size()), delay, period); } - public List getMonitoredSAs() { + public List getMonitoredSAs() { return monitoredSAs; } - public void addMonitoredSA(VirtualFSInterface vfs) { - - monitoredSAs.add(vfs); - } - public synchronized int start() { if (running) { diff --git a/src/main/java/it/grid/storm/info/du/DiskUsageTask.java b/src/main/java/it/grid/storm/info/du/DiskUsageTask.java index 1b2e0c173..d7355c410 100644 --- a/src/main/java/it/grid/storm/info/du/DiskUsageTask.java +++ b/src/main/java/it/grid/storm/info/du/DiskUsageTask.java @@ -10,7 +10,7 @@ import it.grid.storm.catalogs.ReservedSpaceCatalog; import it.grid.storm.common.types.SizeUnit; -import it.grid.storm.namespace.VirtualFSInterface; +import it.grid.storm.namespace.model.VirtualFS; import it.grid.storm.persistence.exceptions.DataAccessException; import it.grid.storm.space.DUResult; import it.grid.storm.space.StorageSpaceData; @@ -21,10 +21,10 @@ public class DiskUsageTask implements Runnable { private static final Logger log = LoggerFactory.getLogger(DiskUsageTask.class); - private final ReservedSpaceCatalog spaceCatalog = new ReservedSpaceCatalog(); - private VirtualFSInterface vfs; + private final ReservedSpaceCatalog spaceCatalog = ReservedSpaceCatalog.getInstance(); + private VirtualFS vfs; - public DiskUsageTask(VirtualFSInterface vfs) { + public DiskUsageTask(VirtualFS vfs) { this.vfs = vfs; } diff --git a/src/main/java/it/grid/storm/info/model/SpaceStatusSummary.java b/src/main/java/it/grid/storm/info/model/SpaceStatusSummary.java index 9e6d825a2..6333f76ae 100644 --- a/src/main/java/it/grid/storm/info/model/SpaceStatusSummary.java +++ b/src/main/java/it/grid/storm/info/model/SpaceStatusSummary.java @@ -43,7 +43,7 @@ public class SpaceStatusSummary { // published by DIP SETTED TO ZERO BECAUSE CURRENTLY RETURN FAKE VALUES // For now do not consider the reserved space, a better management is needed - private static final ReservedSpaceCatalog catalog = new ReservedSpaceCatalog(); + private static final ReservedSpaceCatalog catalog = ReservedSpaceCatalog.getInstance(); private static final Logger log = LoggerFactory .getLogger(SpaceStatusSummary.class); diff --git a/src/main/java/it/grid/storm/info/remote/Constants.java b/src/main/java/it/grid/storm/info/remote/Constants.java deleted file mode 100644 index 36b439116..000000000 --- a/src/main/java/it/grid/storm/info/remote/Constants.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS - * OF ANY KIND, either express or implied. See the License for the specific - * language governing permissions and limitations under the License. - */ - -package it.grid.storm.info.remote; - -/** - * @author Michele Dibenedetto - * - */ -public class Constants { - - public static final String ENCODING_SCHEME = "UTF-8"; - - public static final String RESOURCE = "info/status"; - - // public static final String VERSION = "1.0"; - - public static final String UPDATE_OPERATION = "update"; - - public static final String TOTAL_SPACE_KEY = "total"; - - public static final String USED_SPACE_KEY = "used"; - - public static final String RESERVED_SPACE_KEY = "reserved"; - - public static final String UNAVALILABLE_SPACE_KEY = "unavailable"; - - /* - * get: /RESOURCE/alias put: - * /RESOURCE/alias/UPDATE_OPERATION?TOTAL_SPACE_KEY=total - * &USED_SPACE_KEY=used&RESERVED_SPACE_KEY - * =reserved&UNAVALILABLE_SPACE_KEY=unavailable put: - * /RESOURCE/alias/UPDATE_OPERATION - * ?USED_SPACE_KEY=used&RESERVED_SPACE_KEY=reserved - * &UNAVALILABLE_SPACE_KEY=unavailable put: - * /RESOURCE/alias/UPDATE_OPERATION?USED_SPACE_KEY - * =used&RESERVED_SPACE_KEY=reserved put: - * /RESOURCE/alias/UPDATE_OPERATION?USED_SPACE_KEY - * =used&UNAVALILABLE_SPACE_KEY=unavailable put: - * /RESOURCE/alias/UPDATE_OPERATION - * ?RESERVED_SPACE_KEY=reserved&UNAVALILABLE_SPACE_KEY=unavailable put: - * /RESOURCE/alias/UPDATE_OPERATION?USED_SPACE_KEY=used put: - * /RESOURCE/alias/UPDATE_OPERATION?RESERVED_SPACE_KEY=reserved put: - * /RESOURCE/alias/UPDATE_OPERATION?UNAVALILABLE_SPACE_KEY=unavailable - */ -} diff --git a/src/main/java/it/grid/storm/info/remote/resources/Ping.java b/src/main/java/it/grid/storm/info/remote/resources/Ping.java index 7dcf9b6e2..5a866257e 100644 --- a/src/main/java/it/grid/storm/info/remote/resources/Ping.java +++ b/src/main/java/it/grid/storm/info/remote/resources/Ping.java @@ -1,91 +1,79 @@ -/* - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS - * OF ANY KIND, either express or implied. See the License for the specific - * language governing permissions and limitations under the License. - */ - -package it.grid.storm.info.remote.resources; - -import static javax.ws.rs.core.Response.Status.BAD_REQUEST; - -import java.io.UnsupportedEncodingException; -import java.net.URLDecoder; - -import javax.ws.rs.GET; -import javax.ws.rs.PUT; -import javax.ws.rs.Path; -import javax.ws.rs.Produces; -import javax.ws.rs.QueryParam; -import javax.ws.rs.WebApplicationException; -import javax.ws.rs.core.Response; - -import it.grid.storm.info.remote.Constants; - -@Path("/info/ping") -public class Ping { - - // The Java method will process HTTP GET requests - @GET - // The Java method will produce content identified by the MIME Media - // type "text/plain" - @Produces("text/plain") - public String getClichedMessage() { - - // Return some cliched textual content - return "Hello World"; - } - - @GET - // The Java method will produce content identified by the MIME Media - // type "text/plain" - @Produces("text/plain") - @Path("/queryMeGet") - public String getParameterizedMessage(@QueryParam("uno") String uno, - @QueryParam("due") String due) { - - String unoDecoded, dueDecoded; - try { - unoDecoded = URLDecoder.decode(uno.trim(), Constants.ENCODING_SCHEME); - dueDecoded = URLDecoder.decode(due.trim(), Constants.ENCODING_SCHEME); - } catch (UnsupportedEncodingException e) { - System.err - .println("Unable to decode parameters. UnsupportedEncodingException : " - + e.getMessage()); - throw new WebApplicationException(Response.status(BAD_REQUEST) - .entity("Unable to decode parameters, unsupported encoding \'" + Constants.ENCODING_SCHEME - + "\'") - .build()); - } - return "Hello by GET my friend " + unoDecoded + " from " + dueDecoded; - } - - @PUT - // The Java method will produce content identified by the MIME Media - // type "text/plain" - @Produces("text/plain") - @Path("/queryMePut") - public String putParameterizedMessage(@QueryParam("uno") String uno, - @QueryParam("due") String due) { - - String unoDecoded, dueDecoded; - try { - unoDecoded = URLDecoder.decode(uno.trim(), Constants.ENCODING_SCHEME); - dueDecoded = URLDecoder.decode(due.trim(), Constants.ENCODING_SCHEME); - } catch (UnsupportedEncodingException e) { - System.err - .println("Unable to decode parameters. UnsupportedEncodingException : " - + e.getMessage()); - throw new WebApplicationException(Response.status(BAD_REQUEST) - .entity("Unable to decode parameters, unsupported encoding \'" + Constants.ENCODING_SCHEME - + "\'") - .build()); - } - return "Hello by PUT my friend " + unoDecoded + " from " + dueDecoded; - } - -} +/* + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by + * applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS + * OF ANY KIND, either express or implied. See the License for the specific + * language governing permissions and limitations under the License. + */ + +package it.grid.storm.info.remote.resources; + +import static javax.ws.rs.core.Response.Status.BAD_REQUEST; + +import java.io.UnsupportedEncodingException; +import java.net.URLDecoder; + +import javax.ws.rs.GET; +import javax.ws.rs.PUT; +import javax.ws.rs.Path; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.WebApplicationException; +import javax.ws.rs.core.Response; + +@Path("/info/ping") +public class Ping { + + public static final String ENCODING_SCHEME = "UTF-8"; + + @GET + @Produces("text/plain") + public String getClichedMessage() { + + return "Hello World"; + } + + @GET + @Produces("text/plain") + @Path("/queryMeGet") + public String getParameterizedMessage(@QueryParam("uno") String uno, + @QueryParam("due") String due) { + + String unoDecoded, dueDecoded; + try { + unoDecoded = URLDecoder.decode(uno.trim(), ENCODING_SCHEME); + dueDecoded = URLDecoder.decode(due.trim(), ENCODING_SCHEME); + } catch (UnsupportedEncodingException e) { + System.err + .println("Unable to decode parameters. UnsupportedEncodingException : " + e.getMessage()); + throw new WebApplicationException(Response.status(BAD_REQUEST) + .entity("Unable to decode parameters, unsupported encoding \'" + ENCODING_SCHEME + "\'") + .build()); + } + return "Hello by GET my friend " + unoDecoded + " from " + dueDecoded; + } + + @PUT + @Produces("text/plain") + @Path("/queryMePut") + public String putParameterizedMessage(@QueryParam("uno") String uno, + @QueryParam("due") String due) { + + String unoDecoded, dueDecoded; + try { + unoDecoded = URLDecoder.decode(uno.trim(), ENCODING_SCHEME); + dueDecoded = URLDecoder.decode(due.trim(), ENCODING_SCHEME); + } catch (UnsupportedEncodingException e) { + System.err + .println("Unable to decode parameters. UnsupportedEncodingException : " + e.getMessage()); + throw new WebApplicationException(Response.status(BAD_REQUEST) + .entity("Unable to decode parameters, unsupported encoding \'" + ENCODING_SCHEME + "\'") + .build()); + } + return "Hello by PUT my friend " + unoDecoded + " from " + dueDecoded; + } + +} diff --git a/src/main/java/it/grid/storm/info/remote/resources/SpaceStatusResource.java b/src/main/java/it/grid/storm/info/remote/resources/SpaceStatusResource.java index 86d66a7f5..791433a13 100644 --- a/src/main/java/it/grid/storm/info/remote/resources/SpaceStatusResource.java +++ b/src/main/java/it/grid/storm/info/remote/resources/SpaceStatusResource.java @@ -1,65 +1,64 @@ -/* - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS - * OF ANY KIND, either express or implied. See the License for the specific - * language governing permissions and limitations under the License. - */ - -package it.grid.storm.info.remote.resources; - -import static javax.ws.rs.core.Response.Status.NOT_FOUND; - -import javax.ws.rs.GET; -import javax.ws.rs.Path; -import javax.ws.rs.PathParam; -import javax.ws.rs.Produces; -import javax.ws.rs.WebApplicationException; -import javax.ws.rs.core.Response; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import it.grid.storm.info.SpaceInfoManager; -import it.grid.storm.info.model.SpaceStatusSummary; -import it.grid.storm.info.remote.Constants; -import it.grid.storm.space.gpfsquota.GPFSQuotaManager; - -@Path("/" + Constants.RESOURCE) -public class SpaceStatusResource { - - private static final Logger log = LoggerFactory.getLogger(SpaceStatusResource.class); - - @GET - @Produces("application/json") - @Path("/{alias}") - public String getStatusSummary(@PathParam("alias") String saAlias) { - - String result = ""; - log.debug("Received call getStatusSummary for SA '{}'", saAlias); - - int quotaDefined = SpaceInfoManager.getInstance().getQuotasDefined(); - if (quotaDefined > 0) { - // Update SA used space using quota defined.. - GPFSQuotaManager.INSTANCE.triggerComputeQuotas(); - } - - // Load SA values - SpaceStatusSummary saSum; - try { - saSum = SpaceStatusSummary.createFromDB(saAlias); - } catch (IllegalArgumentException e) { - log.info( - "Unable to load requested space status summary from database. IllegalArgumentException: " - + e.getMessage()); - throw new WebApplicationException(Response.status(NOT_FOUND) - .entity("Unable to load requested space status info from database") - .build()); - } - result = saSum.getJsonFormat(); - return result; - } -} +/* + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by + * applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS + * OF ANY KIND, either express or implied. See the License for the specific + * language governing permissions and limitations under the License. + */ + +package it.grid.storm.info.remote.resources; + +import static javax.ws.rs.core.Response.Status.NOT_FOUND; + +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.WebApplicationException; +import javax.ws.rs.core.Response; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import it.grid.storm.info.SpaceInfoManager; +import it.grid.storm.info.model.SpaceStatusSummary; +import it.grid.storm.space.gpfsquota.GPFSQuotaManager; + +@Path("/info/status") +public class SpaceStatusResource { + + private static final Logger log = LoggerFactory.getLogger(SpaceStatusResource.class); + + @GET + @Produces("application/json") + @Path("/{alias}") + public String getStatusSummary(@PathParam("alias") String saAlias) { + + String result = ""; + log.debug("Received call getStatusSummary for SA '{}'", saAlias); + + int quotaDefined = SpaceInfoManager.getInstance().getQuotasDefined(); + if (quotaDefined > 0) { + // Update SA used space using quota defined.. + GPFSQuotaManager.INSTANCE.triggerComputeQuotas(); + } + + // Load SA values + SpaceStatusSummary saSum; + try { + saSum = SpaceStatusSummary.createFromDB(saAlias); + } catch (IllegalArgumentException e) { + log.info( + "Unable to load requested space status summary from database. IllegalArgumentException: " + + e.getMessage()); + throw new WebApplicationException(Response.status(NOT_FOUND) + .entity("Unable to load requested space status info from database") + .build()); + } + result = saSum.getJsonFormat(); + return result; + } +} \ No newline at end of file diff --git a/src/main/java/it/grid/storm/jna/Errno.java b/src/main/java/it/grid/storm/jna/Errno.java deleted file mode 100644 index 64824c9b7..000000000 --- a/src/main/java/it/grid/storm/jna/Errno.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.jna; - -public class Errno { - - public static final int ENOENT = 2; /* No such file or directory */ - public static final int EEXIST = 17; /* File exists */ - public static final int ENOTDIR = 20; /* Not a directory */ - public static final int ENOSPC = 28; /* No space left on device */ - public static final int ERANGE = 34; /* Math result not representable */ - public static final int ENODATA = 61; /* No data available */ - public static final int ENOATTR = ENODATA; /* No such attribute */ - public static final int EOPNOTSUPP = 95; /* - * Operation not supported on - * transport endpoint - */ - public static final int ENOTSUP = EOPNOTSUPP; - public static final int EDQUOT = 122; /* Quota exceeded */ - -} diff --git a/src/main/java/it/grid/storm/logging/Files.java b/src/main/java/it/grid/storm/logging/Files.java index 355bf1354..d2db68461 100644 --- a/src/main/java/it/grid/storm/logging/Files.java +++ b/src/main/java/it/grid/storm/logging/Files.java @@ -23,110 +23,95 @@ public final class Files { - private Files() {} - - /** - * A convenience method for getting a file and requiring it to be a readable - * file. This is equivalent to calling - * getFile(filePath, true, true, true, false). - * - * @param filePath - * the path to the file - * - * @return the file - * - * @throws IOException - * thrown if the file is a directory, does not exist, or can not be - * read - */ - public static File getReadableFile(String filePath) throws IOException { - return getFile(filePath, true, true, true, false); - } - - /** - * Gets the file object associated with the path. - * - * @param filePath - * the file path - * @param requireFile - * whether the given path is required to be a file instead of a - * directory - * @param requireExistance - * whether the given file/directory must exist already - * @param requireReadable - * whether the given file/directory must be readable - * @param requireWritable - * whether the given file/directory must be writable - * - * @return the created file - * - * @throws IOException - * thrown if existance, reabability, or writability is required but - * not met - */ - public static File getFile(String filePath, boolean requireFile, - boolean requireExistance, boolean requireReadable, boolean requireWritable) - throws IOException { - - String path = Strings.safeTrimOrNullString(filePath); - if (path == null) { - throw new IOException("The file path may not be empty"); - } - - File file = new File(filePath); - - if (requireExistance && !file.exists()) { - throw new IOException("The file '" + filePath + "' does not exist."); - } - - if (requireFile && !file.isFile()) { - throw new IOException("The path '" + filePath - + "' is a directory not a file"); - } - - if (requireReadable && !file.canRead()) { - throw new IOException("The file '" + filePath + "' is not readable."); - } - - if (requireWritable && !file.canWrite()) { - throw new IOException("The file '" + filePath + "' is not writable."); - } - - return file; - } - - /** - * Reads the contents of a file in to a byte array. - * - * @param file - * file to read - * @return the byte contents of the file - * - * @throws IOException - * throw if there is a problem reading the file in to the byte array - */ - public static byte[] fileToByteArray(File file) throws IOException { - - long numOfBytes = file.length(); - - if (numOfBytes > Integer.MAX_VALUE) { - throw new IOException("File is to large to be read in to a byte array"); - } - - byte[] bytes = new byte[(int) numOfBytes]; - FileInputStream ins = new FileInputStream(file); - int offset = 0; - int numRead = 0; - do { - numRead = ins.read(bytes, offset, bytes.length - offset); - offset += numRead; - } while (offset < bytes.length && numRead >= 0); - - if (offset < bytes.length) { - throw new IOException("Could not completely read file " + file.getName()); - } - - ins.close(); - return bytes; - } -} \ No newline at end of file + private Files() {} + + /** + * A convenience method for getting a file and requiring it to be a readable file. This is + * equivalent to calling getFile(filePath, true, true, true, false). + * + * @param filePath the path to the file + * + * @return the file + * + * @throws IOException thrown if the file is a directory, does not exist, or can not be read + */ + public static File getReadableFile(String filePath) throws IOException { + return getFile(filePath, true, true, true, false); + } + + /** + * Gets the file object associated with the path. + * + * @param filePath the file path + * @param requireFile whether the given path is required to be a file instead of a directory + * @param requireExistance whether the given file/directory must exist already + * @param requireReadable whether the given file/directory must be readable + * @param requireWritable whether the given file/directory must be writable + * + * @return the created file + * + * @throws IOException thrown if existance, reabability, or writability is required but not met + */ + public static File getFile(String filePath, boolean requireFile, boolean requireExistance, + boolean requireReadable, boolean requireWritable) throws IOException { + + String path = Strings.safeTrimOrNullString(filePath); + if (path == null) { + throw new IOException("The file path may not be empty"); + } + + File file = new File(filePath); + + if (requireExistance && !file.exists()) { + throw new IOException("The file '" + filePath + "' does not exist."); + } + + if (requireFile && !file.isFile()) { + throw new IOException("The path '" + filePath + "' is a directory not a file"); + } + + if (requireReadable && !file.canRead()) { + throw new IOException("The file '" + filePath + "' is not readable."); + } + + if (requireWritable && !file.canWrite()) { + throw new IOException("The file '" + filePath + "' is not writable."); + } + + return file; + } + + /** + * Reads the contents of a file in to a byte array. + * + * @param file file to read + * @return the byte contents of the file + * + * @throws IOException throw if there is a problem reading the file in to the byte array + */ + public static byte[] fileToByteArray(File file) throws IOException { + + long numOfBytes = file.length(); + + if (numOfBytes > Integer.MAX_VALUE) { + throw new IOException("File is to large to be read in to a byte array"); + } + + byte[] bytes = new byte[(int) numOfBytes]; + FileInputStream ins = new FileInputStream(file); + int offset = 0; + int numRead = 0; + do { + numRead = ins.read(bytes, offset, bytes.length - offset); + offset += numRead; + } while (offset < bytes.length && numRead >= 0); + + ins.close(); + + if (offset < bytes.length) { + throw new IOException("Could not completely read file " + file.getName()); + } + + return bytes; + } +} diff --git a/src/main/java/it/grid/storm/metrics/InstrumentedBasicDataSource.java b/src/main/java/it/grid/storm/metrics/InstrumentedBasicDataSource.java new file mode 100644 index 000000000..195f4a1c5 --- /dev/null +++ b/src/main/java/it/grid/storm/metrics/InstrumentedBasicDataSource.java @@ -0,0 +1,136 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.metrics; + +import static com.codahale.metrics.MetricRegistry.name; + +import java.sql.Connection; +import java.sql.SQLException; + +import org.apache.commons.dbcp2.BasicDataSource; + +import com.codahale.metrics.Gauge; +import com.codahale.metrics.JmxReporter; +import com.codahale.metrics.MetricRegistry; +import com.codahale.metrics.RatioGauge; +import com.codahale.metrics.Timer; + +public class InstrumentedBasicDataSource extends BasicDataSource { + + private final Timer getConnectionTimer; + private final JmxReporter reporter; + + public InstrumentedBasicDataSource(String prefix, MetricRegistry registry) { + instrument(prefix, registry, this); + getConnectionTimer = registry.timer(name(prefix, "get-connection")); + reporter = JmxReporter.forRegistry(registry).build(); + reporter.start(); + } + + /** + * Instrument the given BasicDataSource instance with a series of timers and gauges. + * + */ + public static void instrument(String prefix, MetricRegistry registry, + final BasicDataSource datasource) { + + registry.register(name(prefix, "initial-size"), new Gauge() { + public Integer getValue() { + return datasource.getInitialSize(); + } + }); + registry.register(name(prefix, "max-idle"), new Gauge() { + public Integer getValue() { + return datasource.getMaxIdle(); + } + }); + registry.register(name(prefix, "max-open-prepared-statements"), new Gauge() { + public Integer getValue() { + return datasource.getMaxOpenPreparedStatements(); + } + }); + registry.register(name(prefix, "max-wait-millis"), new Gauge() { + public Long getValue() { + return datasource.getMaxWaitMillis(); + } + }); + registry.register(name(prefix, "min-evictable-idle-time-millis"), new Gauge() { + public Long getValue() { + return datasource.getMinEvictableIdleTimeMillis(); + } + }); + registry.register(name(prefix, "min-idle"), new Gauge() { + public Integer getValue() { + return datasource.getMinIdle(); + } + }); + registry.register(name(prefix, "num-active"), new Gauge() { + public Integer getValue() { + return datasource.getNumActive(); + } + }); + registry.register(name(prefix, "max-total"), new Gauge() { + public Integer getValue() { + return datasource.getMaxTotal(); + } + }); + registry.register(name(prefix, "num-idle"), new Gauge() { + public Integer getValue() { + return datasource.getNumIdle(); + } + }); + registry.register(name(prefix, "num-tests-per-eviction-run"), new Gauge() { + public Integer getValue() { + return datasource.getNumTestsPerEvictionRun(); + } + }); + registry.register(name(prefix, "time-between-eviction-runs-millis"), new Gauge() { + public Long getValue() { + return datasource.getTimeBetweenEvictionRunsMillis(); + } + }); + registry.register(name(prefix, "percent-idle"), new RatioGauge() { + @Override + protected Ratio getRatio() { + return Ratio.of(datasource.getNumIdle(), datasource.getMaxIdle()); + } + }); + registry.register(name(prefix, "percent-active"), new RatioGauge() { + @Override + protected Ratio getRatio() { + return Ratio.of(datasource.getNumActive(), datasource.getMaxTotal()); + } + }); + } + + @Override + public Connection getConnection() throws SQLException { + final Timer.Context ctx = getConnectionTimer.time(); + try { + return super.getConnection(); + } finally { + ctx.stop(); + } + } + + @Override + public synchronized void close() throws SQLException { + super.close(); + reporter.stop(); + } +} diff --git a/src/main/java/it/grid/storm/metrics/StormMetricsReporter.java b/src/main/java/it/grid/storm/metrics/StormMetricsReporter.java index e5d9d6228..01108dc94 100644 --- a/src/main/java/it/grid/storm/metrics/StormMetricsReporter.java +++ b/src/main/java/it/grid/storm/metrics/StormMetricsReporter.java @@ -1,5 +1,23 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + package it.grid.storm.metrics; +import java.util.Optional; import java.util.SortedMap; import java.util.concurrent.TimeUnit; @@ -110,6 +128,30 @@ public void report(SortedMap gauges, SortedMap c reportJettyHandlerMetrics("xmlrpc-handler", meters); reportJettyHandlerMetrics("rest-handler", meters); + + reportDbPoolMetrics("storm_db", gauges, timers); + reportDbPoolMetrics("storm_be_ISAM", gauges, timers); + } + + @SuppressWarnings("rawtypes") + private void reportDbPoolMetrics(String tpName, SortedMap gauges, + SortedMap timers) { + + String timerName = tpName + ".get-connection"; + Optional.ofNullable(timers.get(timerName)) + .ifPresent(t -> { + reportMetric(timerName, t); + }); + + int numActive = getIntValue(gauges.get(tpName + ".num-active")); + int maxActive = getIntValue(gauges.get(tpName + ".max-total")); + int numIdle = getIntValue(gauges.get(tpName + ".num-idle")); + int maxIdle = getIntValue(gauges.get(tpName + ".max-idle")); + double percentActive = getDoubleValue(gauges.get(tpName + ".percent-active")); + double percentIdle = getDoubleValue(gauges.get(tpName + ".percent-idle")); + + LOG.info("{} [active-connections={}, max-active-connections={}, percent-active={}, idle-connections={}, max-idle-connections={}. percent-idle={}]", + tpName, numActive, maxActive, percentActive, numIdle, maxIdle, percentIdle); } private void reportMetric(String name, Timer timer) { @@ -134,7 +176,8 @@ private void reportThreadPoolMetrics(String tpName, SortedMap gau int jobs = getIntValue(gauges.get(tpName + ".jobs")); double utilizationMax = round2dec(getDoubleValue(gauges.get(tpName + ".utilization-max"))); - LOG.info("{} [active-threads={}, idle-threads={}, jobs={}, utilization-max={}, percent-idle={}]", + LOG.info( + "{} [active-threads={}, idle-threads={}, jobs={}, utilization-max={}, percent-idle={}]", tpName, activeThreads, idleThreads, jobs, utilizationMax, percentIdle); } @@ -151,8 +194,7 @@ private void reportJettyHandlerMetrics(String handlerName, SortedMap getAllDefinedVFS() { + public List getAllDefinedVFS() { return parser.getVFSs().values().stream().collect(Collectors.toList()); } - @Override - public Map getAllDefinedVFSAsDictionary() { + public Map getAllDefinedVFSAsDictionary() { return parser.getMapVFS_Root(); } - @Override public List getAllDefinedMappingRules() { return parser.getMappingRules().values().stream().collect(Collectors.toList()); } - public List getApproachableVFS(GridUserInterface user) { + public List getApproachableVFS(GridUserInterface user) { Map apprules = Maps.newHashMap(parser.getApproachableRules()); - List approachVFS = Lists.newLinkedList(); + List approachVFS = Lists.newLinkedList(); for (ApproachableRule appRule : apprules.values()) { if (appRule.match(user)) { approachVFS.addAll(appRule.getApproachableVFS()); @@ -102,13 +134,12 @@ public List getApproachableVFS(GridUserInterface user) { return approachVFS; } - @Override - public List getApproachableByAnonymousVFS() throws NamespaceException { + public List getApproachableByAnonymousVFS() throws NamespaceException { - List anonymousVFS = Lists.newLinkedList(); - List allVFS = Lists.newLinkedList(getAllDefinedVFS()); + List anonymousVFS = Lists.newLinkedList(); + List allVFS = Lists.newLinkedList(getAllDefinedVFS()); - for (VirtualFSInterface vfs : allVFS) { + for (VirtualFS vfs : allVFS) { if (vfs.isApproachableByAnonymous()) { anonymousVFS.add(vfs); } @@ -117,13 +148,12 @@ public List getApproachableByAnonymousVFS() throws Namespace return anonymousVFS; } - @Override - public List getReadableByAnonymousVFS() throws NamespaceException { + public List getReadableByAnonymousVFS() throws NamespaceException { - List readableVFS = Lists.newLinkedList(); - List allVFS = Lists.newLinkedList(getAllDefinedVFS()); + List readableVFS = Lists.newLinkedList(); + List allVFS = Lists.newLinkedList(getAllDefinedVFS()); - for (VirtualFSInterface vfs : allVFS) { + for (VirtualFS vfs : allVFS) { if (vfs.isHttpWorldReadable()) { readableVFS.add(vfs); } @@ -132,14 +162,12 @@ public List getReadableByAnonymousVFS() throws NamespaceExce return readableVFS; } - @Override - public List getReadableOrApproachableByAnonymousVFS() - throws NamespaceException { + public List getReadableOrApproachableByAnonymousVFS() throws NamespaceException { - List rowVFS = Lists.newLinkedList(); - List allVFS = Lists.newLinkedList(getAllDefinedVFS()); + List rowVFS = Lists.newLinkedList(); + List allVFS = Lists.newLinkedList(getAllDefinedVFS()); - for (VirtualFSInterface vfs : allVFS) { + for (VirtualFS vfs : allVFS) { if (vfs.isHttpWorldReadable() || vfs.isApproachableByAnonymous()) { rowVFS.add(vfs); } @@ -148,7 +176,7 @@ public List getReadableOrApproachableByAnonymousVFS() return rowVFS; } - public VirtualFSInterface getDefaultVFS(GridUserInterface user) throws NamespaceException { + public VirtualFS getDefaultVFS(GridUserInterface user) throws NamespaceException { SortedSet appRules = Sets.newTreeSet(getApproachableRules(user)); @@ -170,7 +198,7 @@ public VirtualFSInterface getDefaultVFS(GridUserInterface user) throws Namespace ApproachableRule firstAppRule = appRules.first(); log.debug("Default APP_RULE is the first: {}", firstAppRule); - VirtualFSInterface vfs = getApproachableDefaultVFS(firstAppRule); + VirtualFS vfs = getApproachableDefaultVFS(firstAppRule); log.debug("Default VFS for Space Files : {}", vfs); return vfs; } @@ -210,7 +238,7 @@ private StoRI resolveStoRI(TSURL surl, GridUserInterface user) checkNotNull(surl, "resolveStoRI: invalid null surl"); StoRI stori = null; - List vfsApproachable = null; + List vfsApproachable = null; /* 1. compute user's approachable VFS: */ if (isAnonymous(user)) { @@ -263,7 +291,7 @@ private StoRI resolveStoRI(TSURL surl, GridUserInterface user) /* get the VFS where the resource is phisically located, if exists */ String realPath = getStoRICanonicalPath(stori); - VirtualFSInterface targetVFS = getWinnerVFS(realPath, parser.getMapVFS_Root()); + VirtualFS targetVFS = getWinnerVFS(realPath, parser.getMapVFS_Root()); if (targetVFS == null) { log.debug("Unable to find a valid VFS from path '{}'", realPath); throw new InvalidSURLException(surl, @@ -300,12 +328,12 @@ private String getStoRICanonicalPath(StoRI stori) throws NamespaceException { return realPath; } - private boolean isStoRIEnclosed(StoRI stori, VirtualFSInterface vfs) throws NamespaceException { + private boolean isStoRIEnclosed(StoRI stori, VirtualFS vfs) throws NamespaceException { return resolveVFSbyLocalFile(stori.getLocalFile()).getRootPath().equals(vfs.getRootPath()); } - private StoRI buildStoRI(VirtualFSInterface vfs, MappingRule mappingRule, TSURL surl) + private StoRI buildStoRI(VirtualFS vfs, MappingRule mappingRule, TSURL surl) throws NamespaceException { String stfnPath = surl.sfn().stfn().toString(); @@ -321,7 +349,7 @@ private boolean isAnonymous(GridUserInterface user) { return user == null; } - public VirtualFSInterface resolveVFSbySURL(TSURL surl, GridUserInterface user) + public VirtualFS resolveVFSbySURL(TSURL surl, GridUserInterface user) throws UnapprochableSurlException, InvalidSURLException, NamespaceException { if (surl == null || user == null) { @@ -330,7 +358,7 @@ public VirtualFSInterface resolveVFSbySURL(TSURL surl, GridUserInterface user) throw new IllegalArgumentException(errorMsg); } - List vfsApproachable = getApproachableVFS(user); + List vfsApproachable = getApproachableVFS(user); if (vfsApproachable.isEmpty()) { String errorMsg = String.format("Surl %s is not approachable by user %s", surl, user); log.debug(errorMsg); @@ -351,20 +379,20 @@ public StoRI resolveStoRIbyAbsolutePath(String absolutePath, GridUserInterface u public StoRI resolveStoRIbyAbsolutePath(String absolutePath) throws NamespaceException { - VirtualFSInterface vfs = resolveVFSbyAbsolutePath(absolutePath); + VirtualFS vfs = resolveVFSbyAbsolutePath(absolutePath); log.debug("VFS retrivied is {}", vfs.getAliasName()); log.debug("VFS instance is {}", vfs.hashCode()); return resolveStoRIbyAbsolutePath(absolutePath, vfs); } - public StoRI resolveStoRIbyAbsolutePath(String absolutePath, VirtualFSInterface vfs) + public StoRI resolveStoRIbyAbsolutePath(String absolutePath, VirtualFS vfs) throws NamespaceException { String relativePath = NamespaceUtil.extractRelativePath(vfs.getRootPath(), absolutePath); return vfs.createFile(relativePath); } - public VirtualFSInterface resolveVFSbyAbsolutePath(String absolutePath, GridUserInterface user) + public VirtualFS resolveVFSbyAbsolutePath(String absolutePath, GridUserInterface user) throws NamespaceException { /** @@ -377,21 +405,20 @@ public VirtualFSInterface resolveVFSbyAbsolutePath(String absolutePath, GridUser * Method used by srmGetSpaceMetadata * * @param absolutePath String - * @return VirtualFSInterface + * @return VirtualFS * @throws NamespaceException */ - public VirtualFSInterface resolveVFSbyRoot(String absolutePath) throws NamespaceException { + public VirtualFS resolveVFSbyRoot(String absolutePath) throws NamespaceException { return getWinnerVFS(absolutePath, parser.getMapVFS_Root()); } - public VirtualFSInterface resolveVFSbyAbsolutePath(String absolutePath) - throws NamespaceException { + public VirtualFS resolveVFSbyAbsolutePath(String absolutePath) throws NamespaceException { return getWinnerVFS(absolutePath, parser.getMapVFS_Root()); } - public VirtualFSInterface resolveVFSbyLocalFile(LocalFile file) throws NamespaceException { + public VirtualFS resolveVFSbyLocalFile(LocalFile file) throws NamespaceException { try { return this.resolveVFSbyAbsolutePath(file.getCanonicalPath()); @@ -405,7 +432,7 @@ public StoRI resolveStoRIbyPFN(PFN pfn) throws NamespaceException { /** * @todo Check the approachable rules */ - VirtualFSInterface vfs = resolveVFSbyPFN(pfn); + VirtualFS vfs = resolveVFSbyPFN(pfn); String vfsRoot = vfs.getRootPath(); String relativePath = NamespaceUtil.extractRelativePath(vfsRoot, pfn.getValue()); return vfs.createFile(relativePath); @@ -415,10 +442,10 @@ public StoRI resolveStoRIbyPFN(PFN pfn) throws NamespaceException { * method used by GetSpaceMetaData Executor to retrieve the VFS and Quota Parameters. * * @param pfn PFN - * @return VirtualFSInterface + * @return VirtualFS * @throws NamespaceException */ - public VirtualFSInterface resolveVFSbyPFN(PFN pfn) throws NamespaceException { + public VirtualFS resolveVFSbyPFN(PFN pfn) throws NamespaceException { return getWinnerVFS(pfn.getValue(), parser.getMapVFS_Root()); } @@ -465,7 +492,7 @@ public String makeSpaceFileURI(GridUserInterface user) throws NamespaceException log.debug("First approachable rule: {}", firstAppRule); String spacePath = getRelativePathForSpaceFile(firstAppRule); - VirtualFSInterface vfs = getApproachableDefaultVFS(firstAppRule); + VirtualFS vfs = getApproachableDefaultVFS(firstAppRule); log.debug("Default VFS for Space Files: {}", vfs); // Build the Space file path @@ -546,24 +573,34 @@ public SortedSet getApproachableRules(GridUserInterface user) return appRules; } + public Set getSupportedVOs() { + Set vos = Sets.newHashSet(); + parser.getApproachableRules().forEach((key, value) -> { + String voName = value.getSubjectRules().getVONameMatchingRule().getVOName(); + if (!voName.contains("*")) { + vos.add(voName); + } + }); + return vos; + } + /** * * @param appRule ApproachableRule - * @return VirtualFSInterface + * @return VirtualFS */ - public VirtualFSInterface getApproachableDefaultVFS(ApproachableRule appRule) - throws NamespaceException { + public VirtualFS getApproachableDefaultVFS(ApproachableRule appRule) throws NamespaceException { - VirtualFSInterface defaultVFS = null; + VirtualFS defaultVFS = null; String defaultVFSName = null; - List listVFS = appRule.getApproachableVFS(); + List listVFS = appRule.getApproachableVFS(); if (listVFS != null && !listVFS.isEmpty()) { log.debug(" VFS List = {}", listVFS); // Looking for the default element, signed with a '*' char at the end // Various VFS names exists. The default is '*' tagged or the first. String vfsName = null; - for (VirtualFSInterface element : listVFS) { + for (VirtualFS element : listVFS) { if (element.getAliasName().endsWith("*")) { vfsName = element.getAliasName().substring(0, element.getAliasName().length() - 1); break; @@ -590,10 +627,9 @@ private static boolean matchSubject(ApproachableRule approachableRule, GridUserI return result; } - public VirtualFSInterface resolveVFSbySpaceToken(TSpaceToken spaceToken) - throws NamespaceException { + public VirtualFS resolveVFSbySpaceToken(TSpaceToken spaceToken) throws NamespaceException { - Optional vfs = + Optional vfs = getAllDefinedVFS().stream().filter(v -> spaceToken.equals(v.getSpaceToken())).findFirst(); if (vfs.isPresent()) { return vfs.get(); @@ -606,12 +642,12 @@ public boolean isStfnFittingSomewhere(String surlString, GridUserInterface user) throws NamespaceException { List stfnRoots = Lists.newArrayList(); - List listVFS = getApproachableVFS(user); + List listVFS = getApproachableVFS(user); Map rules = Maps.newHashMap(parser.getMappingRules()); // Retrieve the list of stfnRoot approachable for (Map.Entry rule : rules.entrySet()) { - VirtualFSInterface mappedFS = rule.getValue().getMappedFS(); + VirtualFS mappedFS = rule.getValue().getMappedFS(); if (listVFS.contains(mappedFS)) { // retrieve stfnRoot stfnRoots.add(rule.getValue().getStFNRoot()); } @@ -635,17 +671,17 @@ public boolean isStfnFittingSomewhere(String surlString, GridUserInterface user) return false; } - public List getVFSWithQuotaEnabled() { + public List getVFSWithQuotaEnabled() { - List vfsSet = getAllDefinedVFS(); + List vfsSet = getAllDefinedVFS(); log.debug("Found '{}' VFS defined in Namespace.xml", vfsSet.size()); - List vfsSetQuota = + List vfsSetQuota = vfsSet.stream().filter(vfs -> isGPFSQuotaEnabled(vfs)).collect(Collectors.toList()); log.debug("Number of VFS with Quota enabled: {}", vfsSetQuota.size()); return vfsSetQuota; } - private boolean isGPFSQuotaEnabled(VirtualFSInterface vfs) { + private boolean isGPFSQuotaEnabled(VirtualFS vfs) { Preconditions.checkNotNull(vfs, "vfsItem must not be null!"); @@ -672,4 +708,14 @@ private boolean isGPFSQuotaEnabled(VirtualFSInterface vfs) { } + public Set getManagedEndpoints(Protocol protocol) { + + Set endpoints = Sets.newHashSet(); + this.getAllDefinedVFS().forEach(vfs -> { + vfs.getCapabilities().getManagedProtocolByScheme(protocol).forEach(tp -> { + endpoints.add(tp.getAuthority()); + }); + }); + return endpoints; + } } diff --git a/src/main/java/it/grid/storm/namespace/NamespaceDirector.java b/src/main/java/it/grid/storm/namespace/NamespaceDirector.java deleted file mode 100644 index 4332bbfd2..000000000 --- a/src/main/java/it/grid/storm/namespace/NamespaceDirector.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.namespace; - -import it.grid.storm.config.Configuration; -import it.grid.storm.namespace.config.NamespaceLoader; -import it.grid.storm.namespace.config.NamespaceParser; -import it.grid.storm.namespace.config.xml.XMLNamespaceLoader; -import it.grid.storm.namespace.config.xml.XMLNamespaceParser; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class NamespaceDirector { - - private static final Logger log = LoggerFactory - .getLogger(NamespaceDirector.class);; - private static NamespaceInterface namespaceIstance = null; - - private static NamespaceLoader loader; - private static NamespaceParser parser; - - private static boolean initialized = false; - - private NamespaceDirector() {} - - public static void initializeDirector() { - - log.info("NAMESPACE : Initializing ..."); - Configuration config = Configuration.getInstance(); - - log.info(" +++++++++++++++++++++++ "); - log.info(" Production Mode "); - log.info(" +++++++++++++++++++++++ "); - - String configurationPATH = config.namespaceConfigPath(); - String namespaceConfigFileName = config.getNamespaceConfigFilename(); - int refreshInSeconds = config.getNamespaceConfigRefreshRateInSeconds(); - loader = new XMLNamespaceLoader(configurationPATH, namespaceConfigFileName, refreshInSeconds); - - // Check the validity of namespace. - if (loader instanceof XMLNamespaceLoader) { - XMLNamespaceLoader xmlLoader = (XMLNamespaceLoader) loader; - if (!(xmlLoader.schemaValidity)) { - // Error into the validity ckeck of namespace - log.error("Namespace configuration is not conformant with namespae grammar."); - log.error("Please validate namespace configuration file."); - System.exit(0); - } - } - - log.debug("Namespace Configuration PATH : {}" , configurationPATH); - log.debug("Namespace Configuration FILENAME : {}" , namespaceConfigFileName); - log.debug("Namespace Configuration GLANCE RATE : {}" , refreshInSeconds); - - parser = new XMLNamespaceParser(loader); - namespaceIstance = new Namespace(parser); - - log.debug("NAMESPACE INITIALIZATION : ... done!"); - initialized = true; - - } - - /** - * - * @return Namespace - */ - public static NamespaceInterface getNamespace() { - - if (!(initialized)) { - initializeDirector(); - } - return namespaceIstance; - } - - /** - * - * @return Namespace - */ - public static NamespaceParser getNamespaceParser() { - - if (!(initialized)) { - initializeDirector(); - } - return parser; - } - - /** - * - * @return Namespace - */ - public static NamespaceLoader getNamespaceLoader() { - - if (!(initialized)) { - initializeDirector(); - } - return loader; - } - - public static Logger getLogger() { - - return log; - } - -} diff --git a/src/main/java/it/grid/storm/namespace/NamespaceException.java b/src/main/java/it/grid/storm/namespace/NamespaceException.java index 88e8ec7af..a99f8feda 100644 --- a/src/main/java/it/grid/storm/namespace/NamespaceException.java +++ b/src/main/java/it/grid/storm/namespace/NamespaceException.java @@ -19,23 +19,28 @@ public class NamespaceException extends Exception { - public NamespaceException() { + /** + * + */ + private static final long serialVersionUID = 1L; - super(); - } + public NamespaceException() { - public NamespaceException(String message) { + super(); + } - super(message); - } + public NamespaceException(String message) { - public NamespaceException(String message, Throwable cause) { + super(message); + } - super(message, cause); - } + public NamespaceException(String message, Throwable cause) { - public NamespaceException(Throwable cause) { + super(message, cause); + } - super(cause); - } + public NamespaceException(Throwable cause) { + + super(cause); + } } diff --git a/src/main/java/it/grid/storm/namespace/NamespaceInterface.java b/src/main/java/it/grid/storm/namespace/NamespaceInterface.java deleted file mode 100644 index 68dc9c2fc..000000000 --- a/src/main/java/it/grid/storm/namespace/NamespaceInterface.java +++ /dev/null @@ -1,267 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.namespace; - -import java.util.List; -import java.util.Map; - -import it.grid.storm.common.types.PFN; -import it.grid.storm.filesystem.Space; -import it.grid.storm.griduser.GridUserInterface; -import it.grid.storm.namespace.model.MappingRule; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TSpaceToken; - -public interface NamespaceInterface { - - /** - * getAllDefinedVFS - * - * @return List : Return a List of VirtualFS containing all the instances - * defined within Namespace - * @throws NamespaceException - */ - public List getAllDefinedVFS(); - - /** - * getAllDefinedVFSAsDictionary - * - * @return Map : Return a Map of all VirtualFS defined within - * Namespace, indexed by their root-paths - * @throws NamespaceException - */ - public Map getAllDefinedVFSAsDictionary(); - - /** - * getVFSWithQuotaEnabled - * - * @return Collection: Return a collection of VirtualFS with fs type GPFS and - * quota enabled - * @throws NamespaceException - */ - public List getVFSWithQuotaEnabled(); - - /** - * getAllDefinedMappingRules - * - * @return List : Return a List of mapping rules containing all the instances defined - * within Namespace - * @throws NamespaceException - */ - public List getAllDefinedMappingRules(); - - /** - * - * - * - * @param user GridUserInterface : Represents the principal - * @return List : Return a List of VirtualFS instances - * @throws NamespaceException : Occur when - */ - public List getApproachableVFS(GridUserInterface user) - throws NamespaceException; - - /** - * - * @return List : Return a List of readable and writable by anonymous users VirtualFS instances - * @throws NamespaceException - */ - public List getApproachableByAnonymousVFS() throws NamespaceException; - - /** - * - * @return List : Return a List of readable by anonymous users VirtualFS instances - * @throws NamespaceException - */ - public List getReadableByAnonymousVFS() throws NamespaceException; - - /** - * - * @return List : Return a List of readable or writable by anonymous users VirtualFS instances - * @throws NamespaceException - */ - public List getReadableOrApproachableByAnonymousVFS() - throws NamespaceException; - - /** - * - * @param user GridUserInterface - * @return VirtualFSInterface - * @throws NamespaceException - */ - public VirtualFSInterface getDefaultVFS(GridUserInterface user) throws NamespaceException; - - /** - * - * @param storageResource StoRI - * @param gridUser GridUserInterface - * @return boolean - * @throws NamespaceException - */ - public boolean isApproachable(StoRI storageResource, GridUserInterface gridUser) - throws NamespaceException; - - /** - * - * @param surl TSURL - * @param user GridUserInterface - * @return StoRI - * @throws NamespaceException - * @throws UnapprochableSurlException - * @throws InvalidSURLException - */ - public StoRI resolveStoRIbySURL(TSURL surl, GridUserInterface user) - throws UnapprochableSurlException, NamespaceException, InvalidSURLException; - - /** - * - * @param surl TSURL - * @return StoRI - * @throws IllegalArgumentException - * @throws NamespaceException - * @throws InvalidSURLException - */ - public StoRI resolveStoRIbySURL(TSURL surl) - throws UnapprochableSurlException, NamespaceException, InvalidSURLException; - - /** - * - * @param absolutePath String - * @param user GridUserInterface - * @return StoRI - * @throws NamespaceException - */ - public StoRI resolveStoRIbyAbsolutePath(String absolutePath, GridUserInterface user) - throws NamespaceException; - - /** - * - * @param absolutePath String - * @param vfs VirtualFSInterface - * @return StoRI - * @throws NamespaceException - */ - public StoRI resolveStoRIbyAbsolutePath(String absolutePath, VirtualFSInterface vfs) - throws NamespaceException; - - /** - * - * @param absolutePath String - * @return StoRI - * @throws NamespaceException - */ - public StoRI resolveStoRIbyAbsolutePath(String absolutePath) throws NamespaceException; - - /** - * - * @param absolutePath String - * @param user GridUserInterface - * @return VirtualFSInterface - * @throws NamespaceException - */ - public VirtualFSInterface resolveVFSbyAbsolutePath(String absolutePath, GridUserInterface user) - throws NamespaceException; - - /** - * - * @param absolutePath String - * @return VirtualFSInterface - * @throws NamespaceException - */ - public VirtualFSInterface resolveVFSbyAbsolutePath(String absolutePath) throws NamespaceException; - - /** - * - * @param pfn PFN - * @return StoRI - * @throws NamespaceException - */ - public StoRI resolveStoRIbyPFN(PFN pfn) throws NamespaceException; - - /** - * - * @param file LocalFile - * @return VirtualFSInterface - * @throws NamespaceException - */ - public VirtualFSInterface resolveVFSbyLocalFile(it.grid.storm.filesystem.LocalFile file) - throws NamespaceException; - - /** - * - * @param pfn PFN - * @return VirtualFSInterface - * @throws NamespaceException - */ - public VirtualFSInterface resolveVFSbyPFN(PFN pfn) throws NamespaceException; - - /** - * - * @param user GridUserInterface - * @return StoRI - * @throws NamespaceException - */ - public StoRI getDefaultSpaceFileForUser(GridUserInterface user) throws NamespaceException; - - /** - * Method that retrieves a previously reserved Space as identified by the SpaceToken, for the - * given new size. If null or Empty TSizeInBytes are supplied, a Space object built off deafult - * values is returned instead. - * - * - * @param totSize TSizeInBytes - * @param token TSpaceToken - * @return Space - */ - - public Space retrieveSpaceByToken(TSizeInBytes totSize, TSpaceToken token); - - /** - * - * @param user GridUserInterface - * @return String - * @throws NamespaceException - */ - public String makeSpaceFileURI(GridUserInterface user) throws NamespaceException; - - /** - * @param fileName - * @return - * @throws IllegalArgumentException - */ - public boolean isSpaceFile(String fileName); - - public String getNamespaceVersion() throws NamespaceException; - - /** - * @param absolutePath - * @return - * @throws NamespaceException - */ - public VirtualFSInterface resolveVFSbyRoot(String absolutePath) throws NamespaceException; - - /** - * @param spaceToken - * @return - * @throws NamespaceException - */ - public VirtualFSInterface resolveVFSbySpaceToken(TSpaceToken spaceToken) - throws NamespaceException; - -} diff --git a/src/main/java/it/grid/storm/namespace/StoRI.java b/src/main/java/it/grid/storm/namespace/StoRI.java index 0443c2674..de9c3f1c5 100644 --- a/src/main/java/it/grid/storm/namespace/StoRI.java +++ b/src/main/java/it/grid/storm/namespace/StoRI.java @@ -17,14 +17,45 @@ package it.grid.storm.namespace; +import static org.apache.commons.lang.StringUtils.join; + +import java.io.File; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.Collection; +import java.util.Date; +import java.util.List; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import it.grid.storm.balancer.BalancingStrategy; +import it.grid.storm.balancer.BalancingStrategyException; +import it.grid.storm.balancer.Node; +import it.grid.storm.catalogs.VolatileAndJiTCatalog; +import it.grid.storm.common.types.InvalidPFNAttributeException; +import it.grid.storm.common.types.InvalidStFNAttributeException; import it.grid.storm.common.types.PFN; import it.grid.storm.common.types.StFN; import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.config.Configuration; +import it.grid.storm.filesystem.FilesystemIF; import it.grid.storm.filesystem.LocalFile; import it.grid.storm.filesystem.ReservationException; import it.grid.storm.filesystem.Space; +import it.grid.storm.filesystem.SpaceSystem; +import it.grid.storm.namespace.model.Authority; +import it.grid.storm.namespace.model.Capability; import it.grid.storm.namespace.model.MappingRule; +import it.grid.storm.namespace.model.PathCreator; +import it.grid.storm.namespace.model.Protocol; import it.grid.storm.namespace.model.StoRIType; +import it.grid.storm.namespace.model.TransportProtocol; +import it.grid.storm.namespace.model.VirtualFS; +import it.grid.storm.namespace.naming.NamespaceUtil; +import it.grid.storm.namespace.naming.NamingConst; +import it.grid.storm.namespace.naming.SURL; import it.grid.storm.srm.types.TDirOption; import it.grid.storm.srm.types.TLifeTimeInSeconds; import it.grid.storm.srm.types.TSURL; @@ -32,74 +63,631 @@ import it.grid.storm.srm.types.TSpaceToken; import it.grid.storm.srm.types.TTURL; -import java.util.ArrayList; -import java.util.Date; -import java.util.List; - -public interface StoRI { - - public void setStoRIType(StoRIType type); - - public TTURL getTURL(TURLPrefix prefixOfAcceptedTransferProtocols) - throws IllegalArgumentException, InvalidGetTURLProtocolException, - TURLBuildingException; - - public TSURL getSURL(); - - public PFN getPFN(); - - public StFN getStFN(); - - public StFN getStFNFromMappingRule(); - - public String getRelativePath(); - - public String getRelativeStFN(); - - public TLifeTimeInSeconds getFileLifeTime(); - - public Date getFileStartTime(); - - public StoRIType getStoRIType(); - - public Space getSpace(); - - public void setSpace(Space space); - - public LocalFile getLocalFile(); - - public VirtualFSInterface getVirtualFileSystem(); - - public String getStFNRoot(); - - public String getStFNPath(); - - public String getFilename(); - - public void setStFNRoot(String stfnRoot); - - public void setMappingRule(MappingRule winnerRule); - - public MappingRule getMappingRule(); - - public ArrayList getChildren(TDirOption dirOption) - throws InvalidDescendantsEmptyRequestException, - InvalidDescendantsPathRequestException, - InvalidDescendantsFileRequestException; - - public String getAbsolutePath(); - - public boolean hasJustInTimeACLs(); - - public List getParents(); - - public void allotSpaceForFile(TSizeInBytes totSize) - throws ReservationException; - - public void allotSpaceByToken(TSpaceToken token) throws ReservationException, - ExpiredSpaceTokenException; - - public void allotSpaceByToken(TSpaceToken token, TSizeInBytes totSize) - throws ReservationException, ExpiredSpaceTokenException; +public class StoRI { + + private Logger log = LoggerFactory.getLogger(StoRI.class); + + private final Configuration config = Configuration.getInstance(); + + private TSURL surl; + private PFN pfn; + private Capability.ACLMode aclMode = Capability.ACLMode.UNDEF; + private TLifeTimeInSeconds lifetime = null; + private Date startTime = null; + private LocalFile localFile = null; + private Space space; + + private VirtualFS vfs; + private FilesystemIF fs; + private SpaceSystem spaceDriver; + private StoRIType type; + private Capability capability; + + // Elements of Name of StoRI + private String stfn; + private String vfsRoot; + private String relativeStFN; + private String relativePath; + private String fileName; + private String stfnPath; + private String stfnRoot; + + private MappingRule winnerRule; + + // Boolean status for full detailed metadata + private boolean volatileInformationAreSet = false; + + public StoRI(VirtualFS vfs, MappingRule winnerRule, String relativeStFN, + StoRIType type) { + + if (vfs != null) { + this.vfs = vfs; + capability = (Capability) vfs.getCapabilities(); + } else { + log.error("StoRI built without VFS!"); + } + + if (winnerRule != null) { + stfnRoot = winnerRule.getStFNRoot(); + stfn = stfnRoot + NamingConst.SEPARATOR + relativeStFN; + + vfsRoot = vfs.getRootPath(); + + this.relativeStFN = relativeStFN; + + stfnPath = NamespaceUtil.getStFNPath(stfn); + + relativePath = NamespaceUtil.consumeFileName(relativeStFN); + + if (relativePath != null) { + if (relativePath.startsWith(NamingConst.SEPARATOR)) { + relativePath = relativePath.substring(1); + } + } else { + relativePath = "/"; + } + + fileName = NamespaceUtil.getFileName(relativeStFN); + log.debug("StFN Filename : {} [StFN = '{}']", fileName, relativeStFN); + + if (type == null) { + if (relativeStFN.endsWith(NamingConst.SEPARATOR)) { + type = StoRIType.FOLDER; + } else { + type = StoRIType.UNKNOWN; + } + } else { + this.type = type; + } + + } else { + log.warn("StoRI built without mapping rule"); + } + } + + public StoRI(VirtualFS vfs, String stfnStr, TLifeTimeInSeconds lifetime, + StoRIType type) { + + this.vfs = vfs; + this.capability = (Capability) vfs.getCapabilities(); + // Relative path has to be a path in a relative form! (without "/" at + // begins) + if (relativePath != null) { + if (relativePath.startsWith(NamingConst.SEPARATOR)) { + this.relativePath = relativePath.substring(1); + } + } else { + this.relativePath = "/"; + } + + this.lifetime = lifetime; + + if (type == null) { + this.type = StoRIType.UNKNOWN; + } else { + this.type = type; + } + + this.stfnRoot = null; + + this.fileName = NamespaceUtil.getFileName(stfnStr); + log.debug("StFN Filename : {} [StFN = '{}']", fileName, stfnStr); + + this.stfnPath = NamespaceUtil.getStFNPath(stfnStr); + log.debug("StFN StFNPath : {} [StFN = '{}']", stfnPath, stfnStr); + + } + + public void allotSpaceByToken(TSpaceToken token) + throws ReservationException, ExpiredSpaceTokenException { + + // Retrieve SpaceSystem Driver + if (spaceDriver == null) { + try { + this.spaceDriver = vfs.getSpaceSystemDriverInstance(); + } catch (NamespaceException e) { + log.error(e.getMessage(), e); + throw new ReservationException("Error while retrieving Space System Driver for VFS", e); + } + } + + try { + vfs.useAllSpaceForFile(token, this); + } catch (NamespaceException e) { + log.error("Error using space token {} for file {}: {}", token, fileName, e.getMessage(), e); + throw new ReservationException(e.getMessage(), e); + } + + } + + public void allotSpaceByToken(TSpaceToken token, TSizeInBytes totSize) + throws ReservationException, ExpiredSpaceTokenException { + + if (spaceDriver == null) { + try { + this.spaceDriver = vfs.getSpaceSystemDriverInstance(); + } catch (NamespaceException e) { + log.error(e.getMessage(), e); + throw new ReservationException("Error while retrieving Space System Driver for VFS", e); + } + } + + try { + vfs.useSpaceForFile(token, this, totSize); + } catch (NamespaceException e) { + log.error("Error using space token {} for file {}: {}", token, fileName, e.getMessage(), e); + throw new ReservationException(e.getMessage(), e); + } + + } + + public void allotSpaceForFile(TSizeInBytes totSize) throws ReservationException { + + if (spaceDriver == null) { + try { + this.spaceDriver = vfs.getSpaceSystemDriverInstance(); + } catch (NamespaceException e) { + log.error("Error while retrieving Space System Driver for VFS {}", e.getMessage(), e); + + throw new ReservationException("Error while retrieving Space System Driver for VFS", e); + } + } + + try { + vfs.makeSilhouetteForFile(this, totSize); + } catch (NamespaceException e) { + log.error(e.getMessage(), e); + throw new ReservationException( + "Error while constructing 'Space Silhouette' for " + this.fileName, e); + } + + log.debug("Space built. Space " + this.getSpace().getSpaceFile().getPath()); + this.getSpace().allot(); + } + + public String getAbsolutePath() { + return vfs.getRootPath() + NamingConst.SEPARATOR + relativeStFN; + } + + public TLifeTimeInSeconds getFileLifeTime() { + if (!(volatileInformationAreSet)) { + setVolatileInformation(); + } + return lifetime; + } + + public String getFilename() { + + return this.fileName; + } + + public Date getFileStartTime() { + + if (!(volatileInformationAreSet)) { + setVolatileInformation(); + } + return startTime; + } + + public ArrayList getChildren(TDirOption dirOption) + throws InvalidDescendantsEmptyRequestException, InvalidDescendantsPathRequestException, + InvalidDescendantsFileRequestException { + + ArrayList stoRIList = new ArrayList(); + File fileHandle = new File(getAbsolutePath()); + + if (!fileHandle.isDirectory()) { + if (fileHandle.isFile()) { + log.error("SURL represents a File, not a Directory!"); + throw new InvalidDescendantsFileRequestException(fileHandle); + } else { + log.warn("SURL does not exists!"); + throw new InvalidDescendantsPathRequestException(fileHandle); + } + } else { // SURL point to an existent directory. + // Create ArrayList containing all Valid fileName path found in + // PFN of StoRI's SURL + PathCreator pCreator = new PathCreator(fileHandle, dirOption.isAllLevelRecursive(), 1); + Collection pathList = pCreator.generateChildren(); + if (pathList.size() == 0) { + log.debug("SURL point to an EMPTY DIRECTORY"); + throw new InvalidDescendantsEmptyRequestException(fileHandle, pathList); + } else { // Creation of StoRI LIST + Namespace namespace = Namespace.getInstance(); + for (String childPath : pathList) { + log.debug(":Creation of new StoRI with path: {}", childPath); + try { + + StoRI childStorI = namespace.resolveStoRIbyAbsolutePath(childPath, vfs); + childStorI.setMappingRule(getMappingRule()); + + stoRIList.add(childStorI); + } catch (NamespaceException ex) { + log.error("Error occurred while resolving StoRI by absolute path", ex); + } + } + } + } + return stoRIList; + } + + public LocalFile getLocalFile() { + + if (localFile == null) { + try { + fs = vfs.getFilesystem(); + } catch (NamespaceException ex) { + log.error("Error while retrieving FS driver ", ex); + } + localFile = new LocalFile(getAbsolutePath(), fs); + } + return localFile; + } + + public MappingRule getMappingRule() { + return this.winnerRule; + } + + public List getParents() { + + StoRI createdStoRI = null; + ArrayList parentList = new ArrayList(); + String consumeElements = this.relativePath; + String consumed; + boolean lastElements = false; + + do { + createdStoRI = new StoRI(this.vfs, this.winnerRule, consumeElements, StoRIType.FOLDER); + parentList.add(createdStoRI); + consumed = NamespaceUtil.consumeElement(consumeElements); + if (consumed.equals(consumeElements)) { + lastElements = true; + } else { + consumeElements = consumed; + } + } while ((!lastElements)); + + return parentList; + } + + public PFN getPFN() { + + if (pfn == null) { + try { + this.pfn = PFN.make(getAbsolutePath()); + } catch (InvalidPFNAttributeException e) { + log.error(e.getMessage(), e); + } + } + return this.pfn; + } + + public String getRelativePath() { + + return this.relativePath; + } + + public String getRelativeStFN() { + + return this.relativeStFN; + } + + public Space getSpace() { + + if (space == null) { + log.error("No space bound with this StoRI!"); + return null; + } + return this.space; + } + + public StFN getStFN() { + + StFN stfn = null; + if (this.surl == null) { + getSURL(); + } + stfn = surl.sfn().stfn(); + return stfn; + } + + public String getStFNPath() { + + return this.stfnPath; + } + + public String getStFNRoot() { + + return this.stfnRoot; + } + + public StoRIType getStoRIType() { + + return this.type; + } + + public TSURL getSURL() { + + /** + * The String passed to TSURL.makeFromString MUST contains a valid TSURL in string format, not + * only relativePath. + */ + if (this.surl == null) { + try { + this.surl = TSURL.makeFromStringValidate(buildSURLString()); + } catch (Throwable e) { + log.error("Unable to build the SURL with relative path: {}. {}", relativePath, + e.getMessage(), e); + } + } + return surl; + } + + public TTURL getTURL(TURLPrefix desiredProtocols) + throws IllegalArgumentException, InvalidGetTURLProtocolException, TURLBuildingException { + + TTURL resultTURL = null; + + if (desiredProtocols == null || desiredProtocols.size() == 0) { + log.error(" request with NULL or empty prefixOfAcceptedTransferProtocol!"); + throw new IllegalArgumentException( + "unable to build the TTURL, invalid arguments: desiredProtocols=" + desiredProtocols); + } else { + + // Within the request there are some protocol preferences + // Calculate the intersection between Desired Protocols and Available + // Protocols + List desiredP = new ArrayList<>(desiredProtocols.getDesiredProtocols()); + List availableP = new ArrayList<>(capability.getAllManagedProtocols()); + desiredP.retainAll(availableP); + + if (desiredP.isEmpty()) { + String msg = + String.format("None of [%s] protocols matches the available " + "protocols [%s]", + join(desiredP, ','), join(availableP, ',')); + log.error(msg); + throw new InvalidGetTURLProtocolException(msg); + + } else { + + log.debug("Protocol matching.. Intersection size: {}", desiredP.size()); + + Protocol choosen = null; + Authority authority = null; + int index = 0; + boolean turlBuilt = false; + while (!turlBuilt && index < desiredP.size()) { + choosen = desiredP.get(index); + authority = null; + log.debug("Selected Protocol: {}", choosen); + if (capability.isPooledProtocol(choosen)) { + log.debug("The protocol selected is in POOL Configuration"); + try { + authority = getPooledAuthority(choosen); + } catch (BalancingStrategyException e) { + log.warn( + "Unable to get the pool member to be used to build the turl. BalancerException : {}", + e.getMessage()); + index++; + continue; + } + } else { + log.debug("The protocol selected is in NON-POOL Configuration"); + TransportProtocol transProt = null; + List protList = capability.getManagedProtocolByScheme(choosen); + if (protList.size() > 1) { // Strange case + log.warn("More than one protocol {}" + + " defined but NOT in POOL Configuration. Taking the first one.", choosen); + } + transProt = protList.get(0); + authority = transProt.getAuthority(); + } + + if (choosen.equals(Protocol.HTTP) || choosen.equals(Protocol.HTTPS)) { + resultTURL = buildHTTPTURL(choosen, authority); + } else { + resultTURL = buildTURL(choosen, authority); + } + + turlBuilt = true; + } + if (!turlBuilt) { + throw new TURLBuildingException( + "Unable to build the turl given protocols " + desiredP.toString()); + } + } + } + return resultTURL; + } + + public VirtualFS getVirtualFileSystem() { + return this.vfs; + } + + public boolean hasJustInTimeACLs() { + + boolean result = true; + + if (aclMode.equals(Capability.ACLMode.UNDEF)) { + this.aclMode = vfs.getCapabilities().getACLMode(); + } + if (aclMode.equals(Capability.ACLMode.JUST_IN_TIME)) { + result = true; + } else { + result = false; + } + + return result; + } + + + public void setMappingRule(MappingRule winnerRule) { + this.winnerRule = winnerRule; + } + + public void setSpace(Space space) { + this.space = space; + } + + public void setStFNRoot(String stfnRoot) { + + this.stfnRoot = stfnRoot; + } + + public void setStoRIType(StoRIType type) { + + this.type = type; + } + + @Override + public String toString() { + + StringBuilder sb = new StringBuilder(); + + sb.append("\n"); + sb.append(" stori.stfn : " + this.getStFN().toString() + "\n"); + sb.append(" stori.vfs-root :" + this.vfsRoot + "\n"); + sb.append(" stori.absolutePath : " + this.getAbsolutePath() + "\n"); + sb.append(" stori.vfs NAME : " + this.getVFSName() + "\n"); + sb.append(" stori.stfn FileName : " + this.fileName + "\n"); + sb.append(" stori.stfn StFN path : " + this.stfnPath + "\n"); + sb.append(" stori.stfn rel. Path : " + this.relativePath + "\n"); + sb.append(" stori.relative StFN : " + this.relativeStFN + "\n"); + sb.append(" stori.stfn-root : " + this.stfnRoot + "\n"); + sb.append(" story.type : " + this.type + "\n"); + sb.append(" stori.SURL : " + this.getSURL() + "\n"); + sb.append(" stori.localFile : " + this.getLocalFile() + "\n"); + sb.append(" stori.mappingRule : " + this.getMappingRule() + "\n"); + + return sb.toString(); + } + + private String buildSURLString() throws NamespaceException, UnknownHostException { + String stfn = stfnRoot + NamingConst.SEPARATOR + relativeStFN; + SURL surl = new SURL(config.getSrmServiceHostname(), config.getSrmServicePort(), stfn); + return surl.toString(); + } + + private TTURL buildHTTPTURL(Protocol p, Authority authority) { + + String prefix = Configuration.getInstance().getHTTPTURLPrefix(); + StringBuilder sb = new StringBuilder(); + sb.append(p.getProtocolPrefix()); + sb.append(authority); + + if (prefix != null) { + sb.append(prefix); + } + + sb.append(getStFN().toString()); + + log.debug("built http turl: {}", sb.toString()); + + return TTURL.makeFromString(sb.toString()); + + } + + private TTURL buildTURL(Protocol protocol, Authority authority) + throws InvalidProtocolForTURLException { + + TTURL result = null; + + switch (protocol.getProtocolIndex()) { + case 0: // EMPTY Protocol + throw new InvalidProtocolForTURLException(protocol.getSchema()); + case 1: + result = TURLBuilder.buildFileTURL(authority, this.getPFN()); + break; // FILE Protocol + case 2: + result = TURLBuilder.buildGsiftpTURL(authority, this.getPFN()); + break; // GSIFTP Protocol + case 3: + result = TURLBuilder.buildRFIOTURL(authority, this.getPFN()); + break; // RFIO Protocol + case 4: // SRM Protocol + throw new InvalidProtocolForTURLException(protocol.getSchema()); + case 5: + result = TURLBuilder.buildROOTTURL(authority, this.getPFN()); + break; // ROOT Protocol + case 8: + result = TURLBuilder.buildXROOTTURL(authority, this.getPFN()); + break; // XROOT Protocol + default: + // Unknown protocol + throw new InvalidProtocolForTURLException(protocol.getSchema()); + } + return result; + } + + /** + * @param pooledProtocol + * @return + * @throws BalancerException + */ + private Authority getPooledAuthority(Protocol pooledProtocol) throws BalancingStrategyException { + + Authority authority = null; + if (pooledProtocol.equals(Protocol.GSIFTP) || pooledProtocol.equals(Protocol.HTTP) + || pooledProtocol.equals(Protocol.HTTPS)) { + BalancingStrategy bal = vfs.getProtocolBalancingStrategy(pooledProtocol); + if (bal != null) { + Node node = bal.getNextElement(); + authority = new Authority(node.getHostName(), node.getPort()); + } + } else { + log.error("Unable to manage pool with protocol different from GSIFTP."); + } + return authority; + } + + private String getVFSName() { + + String result = "UNDEF"; + if (vfs != null) { + result = vfs.getAliasName(); + } + return result; + } + + /** + * Set "lifetime" and "startTime" information. The corresponding values are retrieved from the DB. + */ + private void setVolatileInformation() { + + VolatileAndJiTCatalog catalog = VolatileAndJiTCatalog.getInstance(); + List volatileInfo = catalog.volatileInfoOn(getPFN()); + if (volatileInfo.size() != 2) { + lifetime = TLifeTimeInSeconds.makeInfinite(); + startTime = null; + return; + } + startTime = new Date(((Calendar) volatileInfo.get(0)).getTimeInMillis()); + lifetime = (TLifeTimeInSeconds) volatileInfo.get(1); + volatileInformationAreSet = true; + } + + public StFN getStFNFromMappingRule() { + try { + + if (getMappingRule() == null) { + log.warn("Mapping rule is null for this StorI. " + "Falling back to VFS StFN."); + return getStFN(); + } + + String mappingRuleRoot = getMappingRule().getStFNRoot(); + String mappedStfn = mappingRuleRoot + NamingConst.SEPARATOR + relativeStFN; + + return StFN.make(mappedStfn); + + } catch (InvalidStFNAttributeException e) { + + log.error("Error building StFN from mapping rule. Reason: {}", e.getMessage(), e); + + log.error("Falling back to VFS StFN."); + + return getStFN(); + + } + } } diff --git a/src/main/java/it/grid/storm/namespace/StoRIImpl.java b/src/main/java/it/grid/storm/namespace/StoRIImpl.java deleted file mode 100644 index c00788c9b..000000000 --- a/src/main/java/it/grid/storm/namespace/StoRIImpl.java +++ /dev/null @@ -1,715 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.namespace; - -import it.grid.storm.balancer.BalancingStrategy; -import it.grid.storm.balancer.BalancingStrategyException; -import it.grid.storm.balancer.Node; -import it.grid.storm.catalogs.VolatileAndJiTCatalog; -import it.grid.storm.common.types.InvalidPFNAttributeException; -import it.grid.storm.common.types.InvalidStFNAttributeException; -import it.grid.storm.common.types.PFN; -import it.grid.storm.common.types.StFN; -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.config.Configuration; -import it.grid.storm.filesystem.FilesystemIF; -import it.grid.storm.filesystem.LocalFile; -import it.grid.storm.filesystem.ReservationException; -import it.grid.storm.filesystem.Space; -import it.grid.storm.filesystem.SpaceSystem; -import it.grid.storm.namespace.model.Authority; -import it.grid.storm.namespace.model.Capability; -import it.grid.storm.namespace.model.MappingRule; -import it.grid.storm.namespace.model.PathCreator; -import it.grid.storm.namespace.model.Protocol; -import it.grid.storm.namespace.model.StoRIType; -import it.grid.storm.namespace.model.TransportProtocol; -import it.grid.storm.namespace.naming.NamespaceUtil; -import it.grid.storm.namespace.naming.NamingConst; -import it.grid.storm.namespace.naming.SURL; -import it.grid.storm.srm.types.TDirOption; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TSpaceToken; -import it.grid.storm.srm.types.TTURL; - -import static org.apache.commons.lang.StringUtils.join; - -import java.io.File; -import java.util.ArrayList; -import java.util.Calendar; -import java.util.Collection; -import java.util.Date; -import java.util.List; - -import org.slf4j.Logger; - -public class StoRIImpl implements StoRI { - - private Logger log = NamespaceDirector.getLogger(); - - private TSURL surl; - private PFN pfn; - private Capability.ACLMode aclMode = Capability.ACLMode.UNDEF; - private TLifeTimeInSeconds lifetime = null; - private Date startTime = null; - private LocalFile localFile = null; - private Space space; - - private VirtualFSInterface vfs; - private FilesystemIF fs; - private SpaceSystem spaceDriver; - private StoRIType type; - private Capability capability; - - // Elements of Name of StoRI - private String stfn; - private String vfsRoot; - private String relativeStFN; - private String relativePath; - private String fileName; - private String stfnPath; - private String stfnRoot; - - private MappingRule winnerRule; - - // Boolean status for full detailed metadata - private boolean volatileInformationAreSet = false; - - public StoRIImpl(VirtualFSInterface vfs, MappingRule winnerRule, - String relativeStFN, StoRIType type) { - - if (vfs != null) { - this.vfs = vfs; - capability = (Capability) vfs.getCapabilities(); - } else { - log.error("StoRI built without VFS!"); - } - - if (winnerRule != null) { - stfnRoot = winnerRule.getStFNRoot(); - stfn = stfnRoot + NamingConst.SEPARATOR + relativeStFN; - - vfsRoot = vfs.getRootPath(); - - this.relativeStFN = relativeStFN; - - stfnPath = NamespaceUtil.getStFNPath(stfn); - - relativePath = NamespaceUtil.consumeFileName(relativeStFN); - - if (relativePath != null) { - if (relativePath.startsWith(NamingConst.SEPARATOR)) { - relativePath = relativePath.substring(1); - } - } else { - relativePath = "/"; - } - - fileName = NamespaceUtil.getFileName(relativeStFN); - log.debug("StFN Filename : {} [StFN = '{}']", fileName, - relativeStFN); - - if (type == null) { - if (relativeStFN.endsWith(NamingConst.SEPARATOR)) { - type = StoRIType.FOLDER; - } else { - type = StoRIType.UNKNOWN; - } - } else { - this.type = type; - } - - } else { - log.warn("StoRI built without mapping rule"); - } - } - - public StoRIImpl(VirtualFSInterface vfs, String stfnStr, - TLifeTimeInSeconds lifetime, StoRIType type) { - - this.vfs = vfs; - this.capability = (Capability) vfs.getCapabilities(); - // Relative path has to be a path in a relative form! (without "/" at - // begins) - if (relativePath != null) { - if (relativePath.startsWith(NamingConst.SEPARATOR)) { - this.relativePath = relativePath.substring(1); - } - } else { - this.relativePath = "/"; - } - - this.lifetime = lifetime; - - if (type == null) { - this.type = StoRIType.UNKNOWN; - } else { - this.type = type; - } - - this.stfnRoot = null; - - this.fileName = NamespaceUtil.getFileName(stfnStr); - log.debug("StFN Filename : {} [StFN = '{}']", fileName, - stfnStr); - - this.stfnPath = NamespaceUtil.getStFNPath(stfnStr); - log.debug("StFN StFNPath : {} [StFN = '{}']", stfnPath, stfnStr); - - } - - public void allotSpaceByToken(TSpaceToken token) throws ReservationException, - ExpiredSpaceTokenException { - - // Retrieve SpaceSystem Driver - if (spaceDriver == null) { - try { - this.spaceDriver = vfs.getSpaceSystemDriverInstance(); - } catch (NamespaceException e) { - log.error(e.getMessage(), e); - throw new ReservationException( - "Error while retrieving Space System Driver for VFS", e); - } - } - - try { - vfs.useAllSpaceForFile(token, this); - } catch (NamespaceException e) { - log.error("Error using space token {} for file {}: {}", - token, fileName, e.getMessage(),e); - throw new ReservationException(e.getMessage(), e); - } - - } - - public void allotSpaceByToken(TSpaceToken token, TSizeInBytes totSize) - throws ReservationException, ExpiredSpaceTokenException { - - if (spaceDriver == null) { - try { - this.spaceDriver = vfs.getSpaceSystemDriverInstance(); - } catch (NamespaceException e) { - log.error(e.getMessage(),e); - throw new ReservationException( - "Error while retrieving Space System Driver for VFS", e); - } - } - - try { - vfs.useSpaceForFile(token, this, totSize); - } catch (NamespaceException e) { - log.error("Error using space token {} for file {}: {}", - token, fileName, e.getMessage(),e); - throw new ReservationException(e.getMessage(), e); - } - - } - - public void allotSpaceForFile(TSizeInBytes totSize) - throws ReservationException { - - if (spaceDriver == null) { - try { - this.spaceDriver = vfs.getSpaceSystemDriverInstance(); - } catch (NamespaceException e) { - log.error("Error while retrieving Space System Driver for VFS {}", - e.getMessage(), e); - - throw new ReservationException( - "Error while retrieving Space System Driver for VFS", e); - } - } - - try { - vfs.makeSilhouetteForFile(this, totSize); - } catch (NamespaceException e) { - log.error(e.getMessage(),e); - throw new ReservationException( - "Error while constructing 'Space Silhouette' for " + this.fileName, e); - } - - log.debug("Space built. Space " + this.getSpace().getSpaceFile().getPath()); - this.getSpace().allot(); - } - - public String getAbsolutePath() { - return vfs.getRootPath() + NamingConst.SEPARATOR + relativeStFN; - } - - public TLifeTimeInSeconds getFileLifeTime() { - if (!(volatileInformationAreSet)) { - setVolatileInformation(); - } - return lifetime; - } - - public String getFilename() { - - return this.fileName; - } - - public Date getFileStartTime() { - - if (!(volatileInformationAreSet)) { - setVolatileInformation(); - } - return startTime; - } - - public ArrayList getChildren(TDirOption dirOption) - throws InvalidDescendantsEmptyRequestException, - InvalidDescendantsPathRequestException, - InvalidDescendantsFileRequestException { - - ArrayList stoRIList = new ArrayList(); - File fileHandle = new File(getAbsolutePath()); - - if (!fileHandle.isDirectory()) { - if (fileHandle.isFile()) { - log.error("SURL represents a File, not a Directory!"); - throw new InvalidDescendantsFileRequestException(fileHandle); - } else { - log.warn("SURL does not exists!"); - throw new InvalidDescendantsPathRequestException(fileHandle); - } - } else { // SURL point to an existent directory. - // Create ArrayList containing all Valid fileName path found in - // PFN of StoRI's SURL - PathCreator pCreator = new PathCreator(fileHandle, - dirOption.isAllLevelRecursive(), 1); - Collection pathList = pCreator.generateChildren(); - if (pathList.size() == 0) { - log.debug("SURL point to an EMPTY DIRECTORY"); - throw new InvalidDescendantsEmptyRequestException(fileHandle, pathList); - } else { // Creation of StoRI LIST - NamespaceInterface namespace = NamespaceDirector.getNamespace(); - for (String childPath : pathList) { - log.debug(":Creation of new StoRI with path: {}", - childPath); - try { - - StoRI childStorI = namespace.resolveStoRIbyAbsolutePath(childPath, vfs); - childStorI.setMappingRule(getMappingRule()); - - stoRIList.add(childStorI); - } catch (NamespaceException ex) { - log.error("Error occurred while resolving StoRI by absolute path", - ex); - } - } - } - } - return stoRIList; - } - - public LocalFile getLocalFile() { - - if (localFile == null) { - try { - fs = vfs.getFilesystem(); - } catch (NamespaceException ex) { - log.error("Error while retrieving FS driver ", ex); - } - localFile = new LocalFile(getAbsolutePath(), fs); - } - return localFile; - } - - public MappingRule getMappingRule() { - return this.winnerRule; - } - - public List getParents() { - - StoRI createdStoRI = null; - ArrayList parentList = new ArrayList(); - String consumeElements = this.relativePath; - String consumed; - boolean lastElements = false; - - do { - createdStoRI = new StoRIImpl(this.vfs, this.winnerRule, consumeElements, - StoRIType.FOLDER); - parentList.add(createdStoRI); - consumed = NamespaceUtil.consumeElement(consumeElements); - if (consumed.equals(consumeElements)) { - lastElements = true; - } else { - consumeElements = consumed; - } - } while ((!lastElements)); - - return parentList; - } - - public PFN getPFN() { - - if (pfn == null) { - try { - this.pfn = PFN.make(getAbsolutePath()); - } catch (InvalidPFNAttributeException e) { - log.error(e.getMessage(),e); - } - } - return this.pfn; - } - - public String getRelativePath() { - - return this.relativePath; - } - - public String getRelativeStFN() { - - return this.relativeStFN; - } - - public Space getSpace() { - - if (space == null) { - log.error("No space bound with this StoRI!"); - return null; - } - return this.space; - } - - public StFN getStFN() { - - StFN stfn = null; - if (this.surl == null) { - getSURL(); - } - stfn = surl.sfn().stfn(); - return stfn; - } - - public String getStFNPath() { - - return this.stfnPath; - } - - public String getStFNRoot() { - - return this.stfnRoot; - } - - public StoRIType getStoRIType() { - - return this.type; - } - - public TSURL getSURL() { - - /** - * The String passed to TSURL.makeFromString MUST contains a valid TSURL in - * string format, not only relativePath. - */ - if (this.surl == null) { - try { - this.surl = TSURL.makeFromStringValidate(buildSURLString()); - } catch (Throwable e) { - log.error("Unable to build the SURL with relative path: {}. {}", - relativePath, e.getMessage(), e); - } - } - return surl; - } - - public TTURL getTURL(TURLPrefix desiredProtocols) - throws IllegalArgumentException, InvalidGetTURLProtocolException, - TURLBuildingException { - - TTURL resultTURL = null; - - if (desiredProtocols == null || desiredProtocols.size() == 0) { - log - .error(" request with NULL or empty prefixOfAcceptedTransferProtocol!"); - throw new IllegalArgumentException( - "unable to build the TTURL, invalid arguments: desiredProtocols=" - + desiredProtocols); - } else { - - // Within the request there are some protocol preferences - // Calculate the intersection between Desired Protocols and Available - // Protocols - List desiredP = new ArrayList<>(desiredProtocols.getDesiredProtocols()); - List availableP = new ArrayList<>(capability.getAllManagedProtocols()); - desiredP.retainAll(availableP); - - if (desiredP.isEmpty()) { - String msg = String.format("None of [%s] protocols matches the available " - + "protocols [%s]", join(desiredP, ','), join(availableP, ',')); - log.error(msg); - throw new InvalidGetTURLProtocolException(msg); - - } else { - - log.debug("Protocol matching.. Intersection size: {}", - desiredP.size()); - - Protocol choosen = null; - Authority authority = null; - int index = 0; - boolean turlBuilt = false; - while (!turlBuilt && index < desiredP.size()) { - choosen = desiredP.get(index); - authority = null; - log.debug("Selected Protocol: {}", choosen); - if (capability.isPooledProtocol(choosen)) { - log.debug("The protocol selected is in POOL Configuration"); - try { - authority = getPooledAuthority(choosen); - } catch (BalancingStrategyException e) { - log - .warn("Unable to get the pool member to be used to build the turl. BalancerException : {}", - e.getMessage()); - index++; - continue; - } - } else { - log.debug("The protocol selected is in NON-POOL Configuration"); - TransportProtocol transProt = null; - List protList = capability - .getManagedProtocolByScheme(choosen); - if (protList.size() > 1) { // Strange case - log - .warn("More than one protocol {}" - + " defined but NOT in POOL Configuration. Taking the first one.", - choosen); - } - transProt = protList.get(0); - authority = transProt.getAuthority(); - } - - if (choosen.equals(Protocol.HTTP) || choosen.equals(Protocol.HTTPS)){ - resultTURL = buildHTTPTURL(choosen,authority); - } else { - resultTURL = buildTURL(choosen, authority); - } - - turlBuilt = true; - } - if (!turlBuilt) { - throw new TURLBuildingException( - "Unable to build the turl given protocols " + desiredP.toString()); - } - } - } - return resultTURL; - } - - public VirtualFSInterface getVirtualFileSystem() { - return this.vfs; - } - - public boolean hasJustInTimeACLs() { - - boolean result = true; - - if (aclMode.equals(Capability.ACLMode.UNDEF)) { - this.aclMode = vfs.getCapabilities().getACLMode(); - } - if (aclMode.equals(Capability.ACLMode.JUST_IN_TIME)) { - result = true; - } else { - result = false; - } - - return result; - } - - - public void setMappingRule(MappingRule winnerRule) { - this.winnerRule = winnerRule; - } - - public void setSpace(Space space) { - this.space = space; - } - - public void setStFNRoot(String stfnRoot) { - - this.stfnRoot = stfnRoot; - } - - public void setStoRIType(StoRIType type) { - - this.type = type; - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - - sb.append("\n"); - sb.append(" stori.stfn : " + this.getStFN().toString() + "\n"); - sb.append(" stori.vfs-root :" + this.vfsRoot + "\n"); - sb.append(" stori.absolutePath : " + this.getAbsolutePath() + "\n"); - sb.append(" stori.vfs NAME : " + this.getVFSName() + "\n"); - sb.append(" stori.stfn FileName : " + this.fileName + "\n"); - sb.append(" stori.stfn StFN path : " + this.stfnPath + "\n"); - sb.append(" stori.stfn rel. Path : " + this.relativePath + "\n"); - sb.append(" stori.relative StFN : " + this.relativeStFN + "\n"); - sb.append(" stori.stfn-root : " + this.stfnRoot + "\n"); - sb.append(" story.type : " + this.type + "\n"); - sb.append(" stori.SURL : " + this.getSURL() + "\n"); - sb.append(" stori.localFile : " + this.getLocalFile() + "\n"); - sb.append(" stori.mappingRule : " + this.getMappingRule() + "\n"); - - return sb.toString(); - } - - private String buildSURLString() throws NamespaceException { - String stfn = stfnRoot + NamingConst.SEPARATOR + relativeStFN; - SURL surl = new SURL(stfn); - return surl.toString(); - } - - private TTURL buildHTTPTURL(Protocol p, Authority authority){ - - String prefix = Configuration.getInstance().getHTTPTURLPrefix(); - StringBuilder sb = new StringBuilder(); - sb.append(p.getProtocolPrefix()); - sb.append(authority); - - if ( prefix != null){ - sb.append(prefix); - } - - sb.append(getStFN().toString()); - - log.debug("built http turl: {}", sb.toString()); - - return TTURL.makeFromString(sb.toString()); - - } - private TTURL buildTURL(Protocol protocol, Authority authority) - throws InvalidProtocolForTURLException { - - TTURL result = null; - - switch (protocol.getProtocolIndex()) { - case 0: // EMPTY Protocol - throw new InvalidProtocolForTURLException(protocol.getSchema()); - case 1: - result = TURLBuilder.buildFileTURL(authority, this.getPFN()); - break; // FILE Protocol - case 2: - result = TURLBuilder.buildGsiftpTURL(authority, this.getPFN()); - break; // GSIFTP Protocol - case 3: - result = TURLBuilder.buildRFIOTURL(authority, this.getPFN()); - break; // RFIO Protocol - case 4: // SRM Protocol - throw new InvalidProtocolForTURLException(protocol.getSchema()); - case 5: - result = TURLBuilder.buildROOTTURL(authority, this.getPFN()); - break; // ROOT Protocol - case 8: - result = TURLBuilder.buildXROOTTURL(authority, this.getPFN()); - break; // XROOT Protocol - default: - // Unknown protocol - throw new InvalidProtocolForTURLException(protocol.getSchema()); - } - return result; - } - - /** - * @param pooledProtocol - * @return - * @throws BalancerException - */ - private Authority getPooledAuthority(Protocol pooledProtocol) - throws BalancingStrategyException { - - Authority authority = null; - if (pooledProtocol.equals(Protocol.GSIFTP) - || pooledProtocol.equals(Protocol.HTTP) - || pooledProtocol.equals(Protocol.HTTPS)) { - BalancingStrategy bal = vfs - .getProtocolBalancingStrategy(pooledProtocol); - if (bal != null) { - Node node = bal.getNextElement(); - authority = new Authority(node.getHostName(), node.getPort()); - } - } else { - log.error("Unable to manage pool with protocol different from GSIFTP."); - } - return authority; - } - - private String getVFSName() { - - String result = "UNDEF"; - if (vfs != null) { - result = vfs.getAliasName(); - } - return result; - } - - /** - * Set "lifetime" and "startTime" information. The corresponding values are - * retrieved from the DB. - */ - private void setVolatileInformation() { - - VolatileAndJiTCatalog catalog = VolatileAndJiTCatalog.getInstance(); - List volatileInfo = catalog.volatileInfoOn(getPFN()); - if (volatileInfo.size() != 2) { - lifetime = TLifeTimeInSeconds.makeInfinite(); - startTime = null; - return; - } - startTime = new Date(((Calendar) volatileInfo.get(0)).getTimeInMillis()); - lifetime = (TLifeTimeInSeconds) volatileInfo.get(1); - volatileInformationAreSet = true; - } - - @Override - public StFN getStFNFromMappingRule() { - try { - - if (getMappingRule() == null){ - log.warn("Mapping rule is null for this StorI. " + - "Falling back to VFS StFN."); - return getStFN(); - } - - String mappingRuleRoot = getMappingRule().getStFNRoot(); - String mappedStfn = mappingRuleRoot + NamingConst.SEPARATOR - + relativeStFN; - - return StFN.make(mappedStfn); - - } catch (InvalidStFNAttributeException e) { - - log.error("Error building StFN from mapping rule. Reason: {}", - e.getMessage(),e); - - log.error("Falling back to VFS StFN."); - - return getStFN(); - - } - } - -} diff --git a/src/main/java/it/grid/storm/namespace/TURLBuilder.java b/src/main/java/it/grid/storm/namespace/TURLBuilder.java index 6008d7f62..226a0a578 100644 --- a/src/main/java/it/grid/storm/namespace/TURLBuilder.java +++ b/src/main/java/it/grid/storm/namespace/TURLBuilder.java @@ -17,6 +17,9 @@ package it.grid.storm.namespace; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.common.types.PFN; import it.grid.storm.config.Configuration; import it.grid.storm.namespace.model.Authority; @@ -24,11 +27,9 @@ import it.grid.storm.srm.types.InvalidTTURLAttributesException; import it.grid.storm.srm.types.TTURL; -import org.slf4j.Logger; - public class TURLBuilder { - private static Logger log = NamespaceDirector.getLogger(); + private static Logger log = LoggerFactory.getLogger(TURLBuilder.class); public TURLBuilder() { @@ -77,7 +78,7 @@ public static TTURL buildRFIOTURL(Authority authority, PFN physicalFN) { public static TTURL buildROOTTURL(Authority authority, PFN physicalFN) { String extraSlashesForROOT = Configuration.getInstance() - .getExtraSlashesForROOTTURL(); + .getExtraSlashesForRootTURL(); return buildTURL(Protocol.ROOT, authority, extraSlashesForROOT, physicalFN); } diff --git a/src/main/java/it/grid/storm/namespace/VirtualFSInterface.java b/src/main/java/it/grid/storm/namespace/VirtualFSInterface.java deleted file mode 100644 index 5be38eef2..000000000 --- a/src/main/java/it/grid/storm/namespace/VirtualFSInterface.java +++ /dev/null @@ -1,149 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.namespace; - -import it.grid.storm.balancer.BalancingStrategy; -import it.grid.storm.balancer.Node; -import it.grid.storm.filesystem.FilesystemIF; -import it.grid.storm.filesystem.SpaceSystem; -import it.grid.storm.filesystem.swig.genericfs; -import it.grid.storm.griduser.GridUserInterface; -import it.grid.storm.namespace.model.ApproachableRule; -import it.grid.storm.namespace.model.MappingRule; -import it.grid.storm.namespace.model.Protocol; -import it.grid.storm.namespace.model.SAAuthzType; -import it.grid.storm.namespace.model.StoRIType; -import it.grid.storm.namespace.model.StorageClassType; -import it.grid.storm.space.StorageSpaceData; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TSpaceToken; - -import java.util.List; - -public interface VirtualFSInterface { - - public String getFSType(); - - public String getSpaceTokenDescription(); - - public StorageClassType getStorageClassType(); - - public String getRootPath(); - - public StoRI getRoot() throws NamespaceException; - - public String getAliasName(); - - public Class getFSDriver() throws NamespaceException; - - public genericfs getFSDriverInstance() throws NamespaceException; - - public FilesystemIF getFilesystem() throws NamespaceException; - - public Class getSpaceSystemDriver() throws NamespaceException; - - public SpaceSystem getSpaceSystemDriverInstance() throws NamespaceException; - - public boolean isApproachableByUser(GridUserInterface user); - - public DefaultValuesInterface getDefaultValues(); - - public CapabilityInterface getCapabilities(); - - public PropertyInterface getProperties(); - - public List getMappingRules() throws NamespaceException; - - public List getApproachableRules() - throws NamespaceException; - - public TSizeInBytes getUsedNearlineSpace() throws NamespaceException; - - public TSizeInBytes getUsedOnlineSpace() throws NamespaceException; - - public TSizeInBytes getAvailableOnlineSpace() throws NamespaceException; - - public TSizeInBytes getAvailableNearlineSpace() throws NamespaceException; - - public StoRI createFile(String relativePath) throws NamespaceException; - - public StoRI createFile(String relativePath, StoRIType type) throws NamespaceException; - - public StoRI createFile(String relativePath, StoRIType type, MappingRule rule) throws NamespaceException; - - public void makeSilhouetteForFile(StoRI stori, TSizeInBytes presumedSize) - throws NamespaceException; - - public void useSpaceForFile(TSpaceToken token, StoRI file, - TSizeInBytes sizePresumed) throws ExpiredSpaceTokenException, - NamespaceException; - - public void useAllSpaceForFile(TSpaceToken token, StoRI file) - throws ExpiredSpaceTokenException, NamespaceException; - - - public StoRI createSpace(String relativePath, long guaranteedSize, - long totalSize) throws NamespaceException; - - public StoRI createSpace(String relativePath, long totalSize) - throws NamespaceException; - - public StoRI createSpace(long guarSize, long totalSize) - throws NamespaceException; - - public StoRI createSpace(long totalSize) throws NamespaceException; - - public StoRI createSpace() throws NamespaceException; - - public TSizeInBytes splitSpace(StoRI spaceOrig, StoRI file, long sizePresumed) - throws NamespaceException; - - public StorageSpaceData getSpaceByAlias(String alias) - throws NamespaceException; - - public void storeSpaceByToken(StorageSpaceData spaceData) - throws NamespaceException; - - public StoRI createDefaultStoRI() throws NamespaceException; - - public long getCreationTime(); - - public TSpaceToken getSpaceToken(); - - public SAAuthzType getStorageAreaAuthzType() throws NamespaceException; - - public String getStorageAreaAuthzDB() throws NamespaceException; - - public String getStorageAreaAuthzFixed() throws NamespaceException; - - public boolean isPoolDefined(Protocol protocol) throws NamespaceException; - - public BalancingStrategy getProtocolBalancingStrategy( - Protocol protocol); - - public boolean isApproachableByAnonymous(); - - public boolean isHttpWorldReadable(); - - public void setProperties(PropertyInterface prop); - - public boolean increaseUsedSpace(long size); - - public boolean decreaseUsedSpace(long size); - -} diff --git a/src/main/java/it/grid/storm/namespace/config/InvalidConfigurationFileFormatException.java b/src/main/java/it/grid/storm/namespace/config/InvalidConfigurationFileFormatException.java index da72b06b1..432df5dbb 100644 --- a/src/main/java/it/grid/storm/namespace/config/InvalidConfigurationFileFormatException.java +++ b/src/main/java/it/grid/storm/namespace/config/InvalidConfigurationFileFormatException.java @@ -20,8 +20,7 @@ import it.grid.storm.namespace.*; /** - * This class represents an Exception throws if TDirOptionData is not well - * formed. * + * This class represents an Exception throws if TDirOptionData is not well formed. * * * @author Magnoni Luca * @author Cnaf - INFN Bologna @@ -31,15 +30,20 @@ public class InvalidConfigurationFileFormatException extends NamespaceException { - private boolean notSupported = false; + /** + * + */ + private static final long serialVersionUID = 1L; - public InvalidConfigurationFileFormatException(String fileName) { + private boolean notSupported = false; - notSupported = fileName.endsWith(".cfg") || fileName.endsWith(".xml"); - } + public InvalidConfigurationFileFormatException(String fileName) { - public String toString() { + notSupported = fileName.endsWith(".cfg") || fileName.endsWith(".xml"); + } - return ("Configuration File Format NOT SUPPORTED = Not .xml or .cfg formati = " + notSupported); - } + public String toString() { + + return ("Configuration File Format NOT SUPPORTED = Not .xml or .cfg formati = " + notSupported); + } } diff --git a/src/main/java/it/grid/storm/namespace/config/NamespaceCheck.java b/src/main/java/it/grid/storm/namespace/config/NamespaceCheck.java index 5b0edbe09..d75bf77f9 100644 --- a/src/main/java/it/grid/storm/namespace/config/NamespaceCheck.java +++ b/src/main/java/it/grid/storm/namespace/config/NamespaceCheck.java @@ -17,15 +17,6 @@ package it.grid.storm.namespace.config; -import it.grid.storm.namespace.CapabilityInterface; -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.VirtualFSInterface; -import it.grid.storm.namespace.model.ACLEntry; -import it.grid.storm.namespace.model.ApproachableRule; -import it.grid.storm.namespace.model.DefaultACL; -import it.grid.storm.namespace.model.MappingRule; -import it.grid.storm.namespace.util.userinfo.LocalGroups; - import java.io.File; import java.util.ArrayList; import java.util.Iterator; @@ -33,35 +24,26 @@ import java.util.Map; import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Lists; + +import it.grid.storm.namespace.CapabilityInterface; +import it.grid.storm.namespace.model.ACLEntry; +import it.grid.storm.namespace.model.ApproachableRule; +import it.grid.storm.namespace.model.DefaultACL; +import it.grid.storm.namespace.model.MappingRule; +import it.grid.storm.namespace.model.VirtualFS; +import it.grid.storm.namespace.util.userinfo.LocalGroups; -/** - *

- * Title: - *

- * - *

- * Description: - *

- * - *

- * Copyright: Copyright (c) 2006 - *

- * - *

- * Company: INFN-CNAF and ICTP/eGrid project - *

- * - * @author Riccardo Zappi - * @version 1.0 - */ public class NamespaceCheck { - private final Logger log = NamespaceDirector.getLogger(); - private final Map vfss; + private final Logger log = LoggerFactory.getLogger(NamespaceCheck.class); + private final Map vfss; private final Map maprules; private final Map apprules; - public NamespaceCheck(Map vfss, + public NamespaceCheck(Map vfss, Map maprules, Map apprules) { @@ -82,15 +64,15 @@ public boolean check() { private boolean checkGroups(boolean vfsCheckResult) { log - .info("Namespace check. Checking of the existence of the needed Local group ..."); + .debug("Namespace check. Checking of the existence of the needed Local group ..."); boolean result = true; if (!vfsCheckResult) { log .warn("Skip the check of the needed Local Group, because check of VFSs failed."); } else { - List vf = new ArrayList<>(vfss.values()); - for (VirtualFSInterface vfs : vf) { + List vf = new ArrayList<>(vfss.values()); + for (VirtualFS vfs : vf) { // Check the presence of Default ACL CapabilityInterface cap = vfs.getCapabilities(); @@ -112,7 +94,7 @@ private boolean checkGroups(boolean vfsCheckResult) { } } if (result) { - log.info("All local groups are defined. "); + log.debug("All local groups are defined. "); } else { log.warn("Please check the local group needed to StoRM"); } @@ -129,17 +111,17 @@ private boolean checkGroups(boolean vfsCheckResult) { */ private boolean checkVFS() { - log.info("Namespace checking VFSs .."); + log.debug("Namespace checking VFSs .."); boolean result = true; if (vfss == null) { log.error("Anyone VFS is defined in namespace!"); return false; } else { - List rules = new ArrayList<>(vfss.values()); - Iterator scan = rules.iterator(); + List rules = new ArrayList<>(vfss.values()); + Iterator scan = rules.iterator(); while (scan.hasNext()) { - VirtualFSInterface vfs = scan.next(); + VirtualFS vfs = scan.next(); String aliasName = vfs.getAliasName(); log.debug("VFS named '{}' found.", aliasName); @@ -153,7 +135,7 @@ private boolean checkVFS() { } } if (result) { - log.info(" VFSs are well-defined."); + log.debug(" VFSs are well-defined."); } return result; } @@ -198,8 +180,8 @@ private boolean checkAppRules() { boolean check = false; while (scan.hasNext()) { ApproachableRule rule = scan.next(); - List approachVFSs = new ArrayList<>(rule.getApproachableVFS()); - for (VirtualFSInterface aVfs : approachVFSs) { + List approachVFSs = Lists.newArrayList(rule.getApproachableVFS()); + for (VirtualFS aVfs : approachVFSs) { check = vfss.containsKey(aVfs.getAliasName()); if (!check) { log.error("ERROR in NAMESPACE - APP RULE '{}' point a UNKNOWN VFS '{}'!", rule.getRuleName(), aVfs); diff --git a/src/main/java/it/grid/storm/namespace/config/NamespaceParser.java b/src/main/java/it/grid/storm/namespace/config/NamespaceParser.java index a5eedcef9..456cb8fde 100644 --- a/src/main/java/it/grid/storm/namespace/config/NamespaceParser.java +++ b/src/main/java/it/grid/storm/namespace/config/NamespaceParser.java @@ -17,13 +17,13 @@ package it.grid.storm.namespace.config; -import it.grid.storm.namespace.VirtualFSInterface; -import it.grid.storm.namespace.model.ApproachableRule; -import it.grid.storm.namespace.model.MappingRule; - import java.util.List; import java.util.Map; +import it.grid.storm.namespace.model.ApproachableRule; +import it.grid.storm.namespace.model.MappingRule; +import it.grid.storm.namespace.model.VirtualFS; + /** *

* Title: @@ -49,13 +49,13 @@ public interface NamespaceParser { public String getNamespaceVersion(); - public Map getVFSs(); + public Map getVFSs(); - public VirtualFSInterface getVFS(String vfsName); + public VirtualFS getVFS(String vfsName); public List getAllVFS_Roots(); - public Map getMapVFS_Root(); + public Map getMapVFS_Root(); public List getAllMappingRule_StFNRoots(); @@ -65,6 +65,4 @@ public interface NamespaceParser { public Map getApproachableRules(); - public long getLastUpdateTime(); - } diff --git a/src/main/java/it/grid/storm/namespace/config/xml/XMLNamespaceLoader.java b/src/main/java/it/grid/storm/namespace/config/xml/XMLNamespaceLoader.java index 58d8429ab..bf3224a1f 100644 --- a/src/main/java/it/grid/storm/namespace/config/xml/XMLNamespaceLoader.java +++ b/src/main/java/it/grid/storm/namespace/config/xml/XMLNamespaceLoader.java @@ -17,17 +17,9 @@ package it.grid.storm.namespace.config.xml; -import it.grid.storm.namespace.NamespaceValidator; -import it.grid.storm.namespace.config.NamespaceLoader; - -import static java.io.File.separatorChar; - import java.io.File; +import java.io.FileNotFoundException; import java.io.IOException; -import java.util.Observable; -import java.util.Observer; -import java.util.Timer; -import java.util.TimerTask; import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; @@ -38,325 +30,102 @@ import org.apache.commons.configuration.XMLConfiguration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.w3c.dom.DOMException; import org.w3c.dom.Document; import org.w3c.dom.Element; import org.xml.sax.SAXException; -/** - *

- * Title: - *

- * - *

- * Description: - *

- * - *

- * Copyright: Copyright (c) 2006 - *

- * - *

- * Company: INFN-CNAF - *

- * - * @author Riccardo Zappi - * @version 1.0 - */ -public class XMLNamespaceLoader extends Observable implements NamespaceLoader { - - private static Logger log = LoggerFactory.getLogger(XMLNamespaceLoader.class); - - public String filename; - public String path; - public int refresh; // refresh time in seconds before the configuration is - // checked for a change in parameters! - private XMLConfiguration config = null; - private final int delay = 1000; // delay for 5 sec. - private long period = -1; - private final Timer timer = new Timer(); - private XMLReloadingStrategy xmlStrategy; - private String namespaceFN = null; - private final String namespaceSchemaURL; - - public boolean schemaValidity = false; - - public XMLNamespaceLoader() { - - // Build the namespaceFileName - namespaceFN = getNamespaceFileName(); - namespaceSchemaURL = getNamespaceSchemaFileName(); - init(namespaceFN, refresh); - } - - public XMLNamespaceLoader(int refresh) { - - if (refresh < 0) { - this.refresh = 0; - } else { - this.refresh = refresh; - } - namespaceFN = getNamespaceFileName(); - namespaceSchemaURL = getNamespaceSchemaFileName(); - log.debug("Namespace XSD : {}", namespaceSchemaURL); - init(namespaceFN, refresh); - } - - public XMLNamespaceLoader(String filename) { - - this.filename = filename; - namespaceFN = getNamespaceFileName(); - namespaceSchemaURL = getNamespaceSchemaFileName(); - log.debug("Namespace XSD : {}", namespaceSchemaURL); - init(namespaceFN, refresh); - } - - public XMLNamespaceLoader(String path, String filename) { - - this.path = path; - this.filename = filename; - namespaceFN = getNamespaceFileName(); - namespaceSchemaURL = getNamespaceSchemaFileName(); - log.debug("Namespace XSD : {}", namespaceSchemaURL); - init(namespaceFN, refresh); - } - - public XMLNamespaceLoader(String path, String filename, int refresh) { - - if (refresh < 0) { - this.refresh = 0; - } else { - this.refresh = refresh; - } - this.path = path; - this.filename = filename; - namespaceFN = getNamespaceFileName(); - namespaceSchemaURL = getNamespaceSchemaFileName(); - log.debug("Namespace XSD : {}", namespaceSchemaURL); - init(namespaceFN, refresh); - } - - public void setObserver(Observer obs) { - - addObserver(obs); - } - - public void setNotifyManaged() { - - xmlStrategy.notifingPerformed(); - config.setReloadingStrategy(xmlStrategy); - } - - /** - * The setChanged() protected method must overridden to make it public - */ - @Override - public synchronized void setChanged() { - - super.setChanged(); - } - - private void init(String namespaceFileName, int refresh) { - - log.info("Reading Namespace configuration file {} and setting refresh rate to {} seconds.", namespaceFileName, refresh); - - // create reloading strategy for refresh - xmlStrategy = new XMLReloadingStrategy(); - period = 3000; // Conversion in millisec. - log.debug(" Refresh time is {} millisec", period); - xmlStrategy.setRefreshDelay(period); // Set to refresh sec the refreshing delay. - - namespaceFN = namespaceFileName; - - // specify the properties file and set the reloading strategy for that file - try { - config = new XMLConfiguration(); - config.setFileName(namespaceFileName); - - // Validation of Namespace.xml - log.debug(" ... CHECK of VALIDITY of NAMESPACE Configuration ..."); - - schemaValidity = XMLNamespaceLoader.checkValidity(namespaceSchemaURL, - namespaceFileName); - if (!(schemaValidity)) { - log.error("NAMESPACE IS NOT VALID IN RESPECT OF NAMESPACE SCHEMA! "); - throw new ConfigurationException("XML is not valid!"); - } else { - log.debug("Namespace is valid in respect of NAMESPACE SCHEMA."); - } - - // This will throw a ConfigurationException if the XML document does not - // conform to its DTD. - - config.setReloadingStrategy(xmlStrategy); - - Peeper peeper = new Peeper(this); - timer.schedule(peeper, delay, period); - - log.debug("Timer initialized"); - - config.load(); - log.debug("Namespace Configuration read!"); - - } catch (ConfigurationException cex) { - log.error("ATTENTION! Unable to load Namespace Configuration!", cex); - log.error(toString()); - } - - } - - private String getNamespaceFileName() { - - String configurationDir = it.grid.storm.config.Configuration.getInstance() - .configurationDir(); - // Looking for namespace configuration file - String namespaceFN = it.grid.storm.config.Configuration.getInstance() - .getNamespaceConfigFilename(); - // Build the filename - if (configurationDir.charAt(configurationDir.length() - 1) != separatorChar) { - configurationDir += Character.toString(separatorChar); - } - String namespaceAbsFN = configurationDir + namespaceFN; - // Check the namespace conf file accessibility - File nsFile = new File(namespaceAbsFN); - if (nsFile.exists()) { - log.debug("Found the namespace file : {}", namespaceAbsFN); - } else { - log.error("Unable to find the namespace file : {}", namespaceAbsFN); - } - return namespaceAbsFN; - } - - private String getNamespaceSchemaFileName() { - - String schemaName = it.grid.storm.config.Configuration.getInstance() - .getNamespaceSchemaFilename(); - - if ("Schema UNKNOWN!".equals(schemaName)) { - - schemaName = "namespace.xsd"; - String namespaceFN = getNamespaceFileName(); - File namespaceFile = new File(namespaceFN); - if (namespaceFile.exists()) { - DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); - try { - DocumentBuilder builder = factory.newDocumentBuilder(); - Document doc = builder.parse(namespaceFN); - Element rootElement = doc.getDocumentElement(); - String tagName = rootElement.getTagName(); - if ("namespace".equals(tagName)) { - if (rootElement.hasAttributes()) { - String value = rootElement - .getAttribute("xsi:noNamespaceSchemaLocation"); - if ((value != null) && (value.length() > 0)) { - schemaName = value; - } - } else { - log.error("{} don't have a valid root element attributes", namespaceFN); - } - } else { - log.error("{} don't have a valid root element.", namespaceFN); - } - - } catch (ParserConfigurationException | SAXException | IOException e) { - log.error("Error while parsing {}: {}", namespaceFN, e.getMessage(), e); - } - } - } - - return schemaName; - - } - - public Configuration getConfiguration() { - - return config; - } - - private static boolean checkValidity(String namespaceSchemaURL, - String filename) { - - NamespaceValidator validator = new NamespaceValidator(); - return validator.validateSchema(namespaceSchemaURL, filename); - } - - /** - * - *

- * Title: - *

- * - *

- * Description: - *

- * - *

- * Copyright: Copyright (c) 2006 - *

- * - *

- * Company: INFN-CNAF and ICTP/eGrid project - *

- * - * @author Riccardo Zappi - * @version 1.0 - */ - private class Peeper extends TimerTask { - - private XMLReloadingStrategy reloadingStrategy; - - private boolean signal; - private final XMLNamespaceLoader observed; - - public Peeper(XMLNamespaceLoader obs) { - - observed = obs; - } - - @Override - public void run() { - - // log.debug(" The glange of peeper.."); - reloadingStrategy = (XMLReloadingStrategy) config.getReloadingStrategy(); - boolean changed = reloadingStrategy.reloadingRequired(); - if (changed) { - log.debug(" NAMESPACE CONFIGURATION is changed ! "); - log.debug(" ... CHECK of VALIDITY of NAMESPACE Configuration ..."); - boolean valid = XMLNamespaceLoader.checkValidity(namespaceSchemaURL, - namespaceFN); - if (!valid) { - log - .debug(" Namespace configuration is not reloaded.. Please rectify the error."); - schemaValidity = false; - reloadingStrategy.notifingPerformed(); - reloadingStrategy.reloadingPerformed(); - } else { - log - .debug(" ... NAMESPACE Configuration is VALID in respect of Schema Grammar."); - log.debug(" ----> RELOADING "); - - schemaValidity = true; - - boolean forceReloading = it.grid.storm.config.Configuration - .getInstance().getNamespaceAutomaticReloading(); - if (forceReloading) { - config.reload(); - } else { - log - .debug(" ----> RELOAD of namespace don't be executed because NO AUTOMATIC RELOAD is configured."); - } - reloadingStrategy.reloadingPerformed(); - } - } - - signal = reloadingStrategy.notifingRequired(); - if ((signal)) { - observed.setChanged(); - observed.notifyObservers(" MSG : Namespace is changed!"); - reloadingStrategy.notifingPerformed(); - } - - } +import it.grid.storm.namespace.NamespaceException; +import it.grid.storm.namespace.NamespaceValidator; +import it.grid.storm.namespace.config.NamespaceLoader; - } +public class XMLNamespaceLoader implements NamespaceLoader { + + private static Logger log = LoggerFactory.getLogger(XMLNamespaceLoader.class); + + private static final String ROOT_ELEMENT = "namespace"; + private static final String XML_SCHEMA_ATTRIBUTE = "xsi:noNamespaceSchemaLocation"; + + private final String namespaceAbsoluteFilePath; + private final String namespaceSchemaURL; + + private XMLConfiguration config; + + public XMLNamespaceLoader(String namespaceFilePath) + throws DOMException, ParserConfigurationException, SAXException, IOException, + NamespaceException, ConfigurationException { + + File namespace = new File(namespaceFilePath); + if (!namespace.exists()) { + throw new FileNotFoundException("Namespace file '" + namespaceFilePath + "' not found!"); + } + namespaceAbsoluteFilePath = namespaceFilePath; + namespaceSchemaURL = getNamespaceSchemaUrlFromNamespaceFile(); + log.debug("Namespace XSD : {}", namespaceSchemaURL); + + if (checkValidity(namespaceAbsoluteFilePath, namespaceSchemaURL)) { + log.debug("Namespace file '{}' is valid in respect of namespace schema '{}'.", + namespaceAbsoluteFilePath, namespaceSchemaURL); + } else { + String errorMessage = String.format("Namespace %s is NOT VALID in respect of %s schema.", + namespaceAbsoluteFilePath, namespaceSchemaURL); + log.error(errorMessage); + throw new NamespaceException(errorMessage); + } + + init(); + } + + private void init() throws ConfigurationException { + + config = new XMLConfiguration(); + config.setFileName(namespaceAbsoluteFilePath); + + log.debug("Timer initialized"); + + config.load(); + log.debug("Namespace Configuration read!"); + } + + private String getNamespaceSchemaUrlFromNamespaceFile() throws DOMException, + ParserConfigurationException, SAXException, IOException, NamespaceException { + + DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); + DocumentBuilder builder = factory.newDocumentBuilder(); + Document doc = builder.parse(namespaceAbsoluteFilePath); + Element rootElement = doc.getDocumentElement(); + String tagName = rootElement.getTagName(); + if (!ROOT_ELEMENT.equals(tagName)) { + String errorMessage = String.format("Invalid root for %s: 'namespace' element not found", + namespaceAbsoluteFilePath); + log.error(errorMessage); + throw new NamespaceException(errorMessage); + } + if (!rootElement.hasAttributes()) { + String errorMessage = + String.format("Invalid root for %s: no attributes found", namespaceAbsoluteFilePath); + log.error(errorMessage); + throw new NamespaceException(errorMessage); + } + String value = rootElement.getAttribute(XML_SCHEMA_ATTRIBUTE); + if (value == null || value.isEmpty()) { + String errorMessage = String.format("Invalid root for %s: attribute %s not found", + namespaceAbsoluteFilePath, XML_SCHEMA_ATTRIBUTE); + log.error(errorMessage); + throw new NamespaceException(errorMessage); + } + return value; + } + + public Configuration getConfiguration() { + + return config; + } + + private boolean checkValidity(String filename, String namespaceSchemaURL) { + + NamespaceValidator validator = new NamespaceValidator(); + return validator.validateSchema(namespaceSchemaURL, filename); + } } diff --git a/src/main/java/it/grid/storm/namespace/config/xml/XMLNamespaceParser.java b/src/main/java/it/grid/storm/namespace/config/xml/XMLNamespaceParser.java index bf8d10cd6..5c92d28c9 100644 --- a/src/main/java/it/grid/storm/namespace/config/xml/XMLNamespaceParser.java +++ b/src/main/java/it/grid/storm/namespace/config/xml/XMLNamespaceParser.java @@ -17,18 +17,32 @@ package it.grid.storm.namespace.config.xml; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +import org.apache.commons.configuration.ConfigurationException; +import org.apache.commons.configuration.XMLConfiguration; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Maps; + import it.grid.storm.balancer.BalancingStrategyType; -import it.grid.storm.check.sanity.filesystem.SupportedFSType; import it.grid.storm.namespace.CapabilityInterface; import it.grid.storm.namespace.DefaultValuesInterface; -import it.grid.storm.namespace.NamespaceDirector; import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.PropertyInterface; -import it.grid.storm.namespace.VirtualFSInterface; import it.grid.storm.namespace.config.NamespaceCheck; import it.grid.storm.namespace.config.NamespaceLoader; import it.grid.storm.namespace.config.NamespaceParser; import it.grid.storm.namespace.model.ACLEntry; +import it.grid.storm.namespace.model.AccessLatency; import it.grid.storm.namespace.model.ApproachableRule; import it.grid.storm.namespace.model.Authority; import it.grid.storm.namespace.model.Capability; @@ -37,868 +51,676 @@ import it.grid.storm.namespace.model.PermissionException; import it.grid.storm.namespace.model.PoolMember; import it.grid.storm.namespace.model.Property; -import it.grid.storm.namespace.model.Property.SizeUnitType; import it.grid.storm.namespace.model.Protocol; import it.grid.storm.namespace.model.ProtocolPool; import it.grid.storm.namespace.model.Quota; import it.grid.storm.namespace.model.QuotaType; +import it.grid.storm.namespace.model.RetentionPolicy; import it.grid.storm.namespace.model.SAAuthzType; import it.grid.storm.namespace.model.StorageClassType; import it.grid.storm.namespace.model.SubjectRules; import it.grid.storm.namespace.model.TransportProtocol; import it.grid.storm.namespace.model.VirtualFS; -import it.grid.storm.space.SpaceHelper; -import it.grid.storm.space.gpfsquota.GPFSFilesetQuotaInfo; -import it.grid.storm.space.gpfsquota.GetGPFSFilesetQuotaInfoCommand; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TSpaceToken; -import it.grid.storm.util.GPFSSizeHelper; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Observable; -import java.util.Observer; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; - -import org.apache.commons.configuration.ConfigurationException; -import org.apache.commons.configuration.XMLConfiguration; -import org.slf4j.Logger; - -/** - *

- * Title: - *

- * - *

- * Description: - *

- * - *

- * Copyright: Copyright (c) 2006 - *

- * - *

- * Company: INFN-CNAF and ICTP/eGrid project - *

- * - * @author Riccardo Zappi - * @version 1.0 - */ - -public class XMLNamespaceParser implements NamespaceParser, Observer { - - private final Logger log = NamespaceDirector.getLogger(); - - private String version; - private Map vfss; - private Map maprules; - private Map apprules; - - private XMLParserUtil parserUtil; - private final XMLConfiguration configuration; - private XMLNamespaceLoader xmlLoader; - - private final Lock refreshing = new ReentrantLock(); - - /** - * Constructor - * - * @param loader - * NamespaceLoader - */ - public XMLNamespaceParser(NamespaceLoader loader) { - configuration = (XMLConfiguration) loader.getConfiguration(); - if (loader instanceof XMLNamespaceLoader) { - xmlLoader = (XMLNamespaceLoader) loader; - xmlLoader.setObserver(this); - } else { - log.error("XMLParser initialized with a non-XML Loader"); - } - - parserUtil = new XMLParserUtil(configuration); - - for (Iterator iter = parserUtil.getKeys(); iter.hasNext();) { - log.debug("current item: {}", iter.next()); - } - - vfss = new HashMap<>(); - maprules = new HashMap<>(); - apprules = new HashMap<>(); - - boolean validNamespaceConfiguration = refreshCachedData(); - if (!validNamespaceConfiguration) { - log.error(" ???????????????????????????????????? "); - log.error(" ???? NAMESPACE does not VALID ???? "); - log.error(" ???????????????????????????????????? "); - log.error(" Please see the log. "); - System.exit(0); - } - - } - - public Map getVFSs() { - - return vfss; - } - - public Map getApproachableRules() { - - return apprules; - } - - public Map getMappingRules() { - - return maprules; - } - - public long getLastUpdateTime() { - - return 0L; - } - - public void update(Observable observed, Object arg) { - - log.debug("{} Refreshing Namespace Memory Cache .. ", arg); - - XMLNamespaceLoader loader = (XMLNamespaceLoader) observed; - parserUtil = new XMLParserUtil(loader.getConfiguration()); - - if (loader.schemaValidity) { - refreshCachedData(); - } - - loader.setNotifyManaged(); - - log.debug(" ... Cache Refreshing ended"); - } - - /**************************************************************** - * PRIVATE METHODs - *****************************************************************/ - - private boolean refreshCachedData() { - - boolean result = false; - try { - refreshing.lock(); - configuration.clear(); - configuration.clearTree("filesystems"); - configuration.clearTree("mapping-rules"); - configuration.clearTree("approachable-rules"); - try { - configuration.load(); - log.debug(" ... reading and parsing the namespace configuration from file!"); - } catch (ConfigurationException ex) { - log.error(ex.getMessage(), ex); - } - log.debug("REFRESHING CACHE.."); - // Save the cache content - log.debug(" ..save the cache content before semantic check"); - Map vfssSAVED = vfss; - Map maprulesSAVED = maprules; - Map apprulesSAVED = apprules; - // Refresh the cache content with new values - - log.debug(" ..refresh the cache"); - refreshCache(); - - // Do the checking on Namespace - log.debug(" ..semantic check of namespace"); - NamespaceCheck checker = new NamespaceCheck(vfss, maprules, apprules); - boolean semanticCheck = checker.check(); - - // If there is an error restore old cache content - log.debug("REFRESHING ENDED."); - if (semanticCheck) { - log.debug("Namespace is semantically valid"); - result = true; - } else { - log - .warn("Namespace does not semantically valid!, so no load performed!"); - vfss = vfssSAVED; - maprules = maprulesSAVED; - apprules = apprulesSAVED; - result = false; - } - } finally { - refreshing.unlock(); - } - return result; - } - - private void refreshCache() { - - log - .info(" ############## REFRESHING NAMESPACE CONFIGURATION CACHE : start ###############"); - - /************************** - * Retrieve Version Number - *************************/ - try { - retrieveVersion(); - } catch (NamespaceException ex1) { - log - .warn( - "Namespace configuration does not contain a valid version number.", - ex1); - /** - * @todo Manage this exceptional status! - */ - } - - /************************** - * Building VIRTUAL FS - *************************/ - try { - buildVFSs(); - } catch (ClassNotFoundException ex) { - log - .error("Namespace Configuration ERROR in VFS-DRIVER specification", ex); - /** - * @todo Manage this exceptional status! - */ - } catch (NamespaceException ex) { - log - .error( - "Namespace Configuration ERROR in VFS definition, please check it.", - ex); - /** - * @todo Manage this exceptional status! - */ - } - - /************************** - * Building MAPPING RULES - *************************/ - try { - buildMapRules(); - } catch (NamespaceException ex1) { - log - .error( - "Namespace Configuration ERROR in MAPPING RULES definition, please check it.", - ex1); - /** - * @todo Manage this exceptional status! - */ - } - - /************************** - * Building APPROACHABLE RULES - *************************/ - try { - buildAppRules(); - } catch (NamespaceException ex2) { - log - .error( - "Namespace Configuration ERROR in APPROACHABLE RULES definition, please check it.", - ex2); - /** - * @todo Manage this exceptional status! - */ - } - log - .info(" ############## REFRESHING NAMESPACE CONFIGURATION CACHE : end ###############"); - - handleTotalOnlineSizeFromGPFSQuota(); - // Update SA within Reserved Space Catalog - updateSA(); - } - - private void handleTotalOnlineSizeFromGPFSQuota() { - - for (Entry entry : vfss.entrySet()) { - String storageAreaName = entry.getKey(); - VirtualFSInterface storageArea = entry.getValue(); - if (SupportedFSType.parseFS(storageArea.getFSType()) == SupportedFSType.GPFS) { - Quota quota = storageArea.getCapabilities().getQuota(); - if (quota != null && quota.getEnabled()) { - - GPFSFilesetQuotaInfo quotaInfo = getGPFSQuotaInfo(storageArea); - if (quotaInfo != null) { - updateTotalOnlineSizeFromGPFSQuota(storageAreaName, storageArea, - quotaInfo); - } - } - } - } - } - - private GPFSFilesetQuotaInfo getGPFSQuotaInfo(VirtualFSInterface storageArea) { - - GetGPFSFilesetQuotaInfoCommand cmd = new GetGPFSFilesetQuotaInfoCommand( - storageArea); - - try { - return cmd.call(); - } catch (Throwable t) { - log - .warn( - "Cannot get quota information out of GPFS. Using the TotalOnlineSize in namespace.xml " - + "for Storage Area {}. Reason: {}", storageArea.getAliasName(), - t.getMessage()); - return null; - } - } - - private void updateTotalOnlineSizeFromGPFSQuota(String storageAreaName, - VirtualFSInterface storageArea, GPFSFilesetQuotaInfo quotaInfo) { - - long gpfsTotalOnlineSize = GPFSSizeHelper.getBytesFromKIB(quotaInfo - .getBlockSoftLimit()); - Property newProperties = Property.from(storageArea.getProperties()); - try { - newProperties.setTotalOnlineSize(SizeUnitType.BYTE.getTypeName(), - gpfsTotalOnlineSize); - storageArea.setProperties(newProperties); - log.warn("TotalOnlineSize as specified in namespace.xml will be ignored " - + "since quota is enabled on the GPFS {} Storage Area.", - storageAreaName); - } catch (NamespaceException e) { - log - .warn( - "Cannot get quota information out of GPFS. Using the TotalOnlineSize in namespace.xml " - + "for Storage Area {}.", storageAreaName, e); - } - } - - // ******************* Update SA Catalog *************************** - private void updateSA() { - - TSpaceToken spaceToken = null; - // ReservedSpaceCatalog spaceCatalog = new ReservedSpaceCatalog(); - SpaceHelper spaceHelp = new SpaceHelper(); - log - .debug("Updating Space Catalog with Storage Area defined within NAMESPACE"); - VirtualFS vfs = null; - Iterator scan = vfss.values().iterator(); - while (scan.hasNext()) { - - vfs = (VirtualFS) scan.next(); - String vfsAliasName = vfs.getAliasName(); - log.debug(" Considering VFS : {}", vfsAliasName); - String aliasName = vfs.getSpaceTokenDescription(); - if (aliasName == null) { - // Found a VFS without the optional element Space Token Description - log.debug("XMLNamespaceParser.UpdateSA() : Found a VFS ('{}') without space-token-description. " - + "Skipping the Update of SA", vfsAliasName); - } else { - TSizeInBytes onlineSize = vfs.getProperties().getTotalOnlineSize(); - String spaceFileName = vfs.getRootPath(); - spaceToken = spaceHelp.createVOSA_Token(aliasName, onlineSize, - spaceFileName); - vfs.setSpaceToken(spaceToken); - - log.debug(" Updating SA ('{}'), token:'{}', onlineSize:'{}', spaceFileName:'{}'", - aliasName, spaceToken, onlineSize, spaceFileName); - } - - } - spaceHelp.purgeOldVOSA_token(); - log.debug("Updating Space Catalog... DONE!!"); - - } - - // ******************* VERSION NUMBER *************************** - private void retrieveVersion() throws NamespaceException { - - version = parserUtil.getNamespaceVersion(); - log.debug(" ==== NAMESPACE VERSION : '{}' ====", version); - } - - // ******************* VIRTUAL FS *************************** - - private void buildVFSs() throws ClassNotFoundException, NamespaceException { - - int nrOfVFS = 0; - - nrOfVFS = parserUtil.getNumberOfFS(); - // For each VFS within configuration build VFS class instance - VirtualFS vfs; - String spaceTokenDescription = null; - StorageClassType storageClass; - String root = null; - String name; - String fsType; - Class driver; - String storageAreaAuthz; - PropertyInterface prop; - CapabilityInterface cap; - DefaultValuesInterface defValues; - SAAuthzType saAuthzType; - - for (int i = 0; i < nrOfVFS; i++) { - // Building VFS - vfs = new VirtualFS(); - - name = parserUtil.getFSName(i); - vfs.setAliasName(name); - log.debug("VFS({}).name = '{}'", i, name); - - fsType = parserUtil.getFSType(name); - vfs.setFSType(fsType); - log.debug("VFS({}).fs_type = '{}'", name, fsType); - - spaceTokenDescription = parserUtil.getFSSpaceTokenDescription(name); - vfs.setSpaceTokenDescription(spaceTokenDescription); - log.debug("VFS({}).space-token-description = '{}'", name, spaceTokenDescription); - - storageClass = StorageClassType.getStorageClassType(parserUtil - .getStorageClass(name)); - vfs.setStorageClassType(storageClass); - log.debug("VFS({}).storage-class = '{}'", name, storageClass); - - root = parserUtil.getFSRoot(name); - vfs.setRoot(root); - log.debug("VFS({}).root = '{}'", name, root); - - driver = Class.forName(parserUtil.getFSDriver(name)); - vfs.setFSDriver(driver); - log.debug("VFS({}).fsDriver [CLASS Name] = '{}'", name, driver.getName()); - - driver = Class.forName(parserUtil.getSpaceDriver(name)); - vfs.setSpaceSystemDriver(driver); - log.debug("VFS({}).spaceDriver [CLASS Name] = '{}'", name, driver.getName()); - - saAuthzType = parserUtil.getStorageAreaAuthzType(name); - vfs.setSAAuthzType(saAuthzType); - log.debug("VFS({}).storage-area-authz.TYPE = '{}'", name, saAuthzType); - - storageAreaAuthz = parserUtil.getStorageAreaAuthz(name, saAuthzType); - vfs.setSAAuthzSource(storageAreaAuthz); - log.debug("VFS({}).storage-area-authz = '{}'", name, storageAreaAuthz); - - prop = buildProperties(name); - vfs.setProperties(prop); - - cap = buildCapabilities(name); - vfs.setCapabilities(cap); - - defValues = buildDefaultValues(name); - vfs.setDefaultValues(defValues); - - // Adding VFS - synchronized (this) { - vfss.remove(name); - vfss.put(name, vfs); - } - } - } - - // ******************* PROPERTY *************************** - private PropertyInterface buildProperties(String fsName) - throws NamespaceException { - - Property prop = new Property(); - - String accessLatency = parserUtil.getAccessLatencyType(fsName); - prop.setAccessLatency(accessLatency); - log.debug("VFS({}).Properties.AccessLatency = '{}'", fsName, accessLatency); - - String expirationMode = parserUtil.getExpirationModeType(fsName); - prop.setExpirationMode(expirationMode); - log.debug("VFS({}).Properties.ExpirationMode = '{}'", fsName, expirationMode); - - String retentionPolicy = parserUtil.getRetentionPolicyType(fsName); - prop.setRetentionPolicy(retentionPolicy); - log.debug("VFS({}).Properties.RetentionPolicy = '{}'", fsName, retentionPolicy); - - String unitType = parserUtil.getNearlineSpaceUnitType(fsName); - long nearLineSize = parserUtil.getNearlineSpaceSize(fsName); - prop.setTotalNearlineSize(unitType, nearLineSize); - log.debug("VFS({}).Properties.NearlineSpaceSize = '{} {}'", fsName, nearLineSize, unitType); - - unitType = parserUtil.getOnlineSpaceUnitType(fsName); - long onlineSize = parserUtil.getOnlineSpaceSize(fsName); - prop.setTotalOnlineSize(unitType, onlineSize); - log.debug("VFS({}).Properties.OnlineSpaceSize = '{} {}'", fsName, onlineSize, unitType); - - boolean hasLimitedSize = parserUtil.getOnlineSpaceLimitedSize(fsName); - prop.setLimitedSize(hasLimitedSize); - log.debug("VFS({}).Properties.OnlineSpaceLimitedSize = '{}'", fsName, hasLimitedSize); - - return prop; - } - - // ******************* CAPABILITY *************************** - - private CapabilityInterface buildCapabilities(String fsName) - throws NamespaceException { - - /** - * ACL MODE ELEMENT - */ - String aclMode = parserUtil.getACLMode(fsName); - Capability cap = new Capability(aclMode); - log.debug("VFS({}).Capabilities.aclMode = '{}'", fsName, aclMode); - - /** - * DEFAULT ACL - */ - boolean defaultACLDefined = parserUtil.getDefaultACLDefined(fsName); - log.debug("VFS({}).Capabilities.defaultACL [Defined?] = {}", fsName, defaultACLDefined); - if (defaultACLDefined) { - int nrACLEntries = parserUtil.getNumberOfACL(fsName); - String groupName = null; - String filePermString = null; - ACLEntry aclEntry = null; - for (int entryNumber = 0; entryNumber < nrACLEntries; entryNumber++) { - groupName = parserUtil.getGroupName(fsName, entryNumber); - filePermString = parserUtil.getPermissionString(fsName, entryNumber); - try { - aclEntry = new ACLEntry(groupName, filePermString); - cap.addACLEntry(aclEntry); - } catch (PermissionException permEx) { - log.error("Namespace XML Parser -- ERROR -- : {}", permEx.getMessage()); - } - } - log.debug("VFS({}).Capabilities.defaultACL = {}", fsName, cap.getDefaultACL()); - } - - /** - * QUOTA ELEMENT - */ - boolean quotaDefined = parserUtil.getQuotaDefined(fsName); - Quota quota = null; - if (quotaDefined) { - boolean quotaEnabled = parserUtil.getQuotaEnabled(fsName); - String device = parserUtil.getQuotaDevice(fsName); - - QuotaType quotaType; - String quotaValue = null; - - if (parserUtil.getQuotaFilesetDefined(fsName)) { - quotaType = QuotaType.buildQuotaType(QuotaType.FILESET); - quotaValue = parserUtil.getQuotaFileset(fsName); - } else { - if (parserUtil.getQuotaGroupIDDefined(fsName)) { - quotaType = QuotaType.buildQuotaType(QuotaType.GRP); - quotaValue = parserUtil.getQuotaGroupID(fsName); - } else { - if (parserUtil.getQuotaUserIDDefined(fsName)) { - quotaType = QuotaType.buildQuotaType(QuotaType.USR); - quotaValue = parserUtil.getQuotaUserID(fsName); - } else { - quotaType = QuotaType.buildQuotaType(QuotaType.UNKNOWN); - quotaValue = "unknown"; - } - } - } - - quotaType.setValue(quotaValue); - quota = new Quota(quotaEnabled, device, quotaType); - - } else { - quota = new Quota(); - } - cap.setQuota(quota); - - log.debug("VFS({}).Capabilities.quota = '{}'", fsName, quota); - - /** - * TRANSFER PROTOCOL - */ - int nrProtocols = parserUtil.getNumberOfProt(fsName); - Protocol protocol; - Authority service; - TransportProtocol transportProt; - int protocolIndex; - String serviceHostName; - String servicePortValue; - String schema; - String name; - for (int protCounter = 0; protCounter < nrProtocols; protCounter++) { - protocolIndex = parserUtil.getProtId(fsName, protCounter); - name = parserUtil.getProtName(fsName, protCounter); - schema = parserUtil.getProtSchema(fsName, protCounter); - protocol = Protocol.getProtocol(schema); - protocol.setProtocolServiceName(name); - serviceHostName = parserUtil.getProtHost(fsName, protCounter); - servicePortValue = parserUtil.getProtPort(fsName, protCounter); - int portIntValue = -1; - service = null; - if (servicePortValue != null) { - try { - portIntValue = Integer.parseInt(servicePortValue); - service = new Authority(serviceHostName, portIntValue); - // log.debug("SERVICE PORT: "+service); - } catch (NumberFormatException nfe) { - log - .warn("to evaluate the environmental variable " + servicePortValue); - } - } else { - service = new Authority(serviceHostName); - // log.debug("SERVICE : "+service); - } - transportProt = new TransportProtocol(protocol, service); - transportProt.setProtocolID(protocolIndex); // 1.4.0 - log.debug("VFS({}).Capabilities.protocol({}) = '{}'", fsName, protCounter, transportProt); - cap.addTransportProtocolByScheme(protocol, transportProt); - cap.addTransportProtocol(transportProt); - if (protocolIndex != -1) { - cap.addTransportProtocolByID(protocolIndex, transportProt); - } - - } - - /** - * PROTOCOL POOL - */ - int nrPools = parserUtil.getNumberOfPool(fsName); - if (nrPools > 0) { - - for (int poolCounter = 0; poolCounter < nrPools; poolCounter++) { - BalancingStrategyType balanceStrategy = BalancingStrategyType - .getByValue(parserUtil.getBalancerStrategy(fsName, poolCounter)); - ArrayList poolMembers = new ArrayList<>(); - int nrMembers = parserUtil.getNumberOfPoolMembers(fsName, poolCounter); - for (int i = 0; i < nrMembers; i++) { - int protIndex = parserUtil.getMemberID(fsName, poolCounter, i); - TransportProtocol tProtMember = cap.getProtocolByID(protIndex); - if (tProtMember != null) { - PoolMember poolMember; - if (balanceStrategy.requireWeight()) { - int memberWeight = parserUtil.getMemberWeight(fsName, - poolCounter, i); - poolMember = new PoolMember(protIndex, tProtMember, memberWeight); - } else { - poolMember = new PoolMember(protIndex, tProtMember); - } - poolMembers.add(poolMember); - } else { // member pointed out doesn't exist!! - String errorMessage = String.format("POOL Building: Protocol with index %d does not exists in the VFS : %s", protIndex, fsName); - log.error(errorMessage); - throw new NamespaceException(errorMessage); - } - } - Protocol pooProtocol = poolMembers.get(0).getMemberProtocol() - .getProtocol(); - verifyPoolIsValid(poolMembers); - log.debug("Defined pool for protocol {} with size {}", pooProtocol, poolMembers.size()); - cap.addProtocolPoolBySchema(pooProtocol, new ProtocolPool( - balanceStrategy, poolMembers)); - log.debug("PROTOCOL POOL: {}", cap.getPoolByScheme(pooProtocol)); - } - } else { - log.debug("Pool is not defined in VFS {}", fsName); - } - - return cap; - } - - /** - * @param poolMembers - * @throws NamespaceException - */ - private void verifyPoolIsValid(ArrayList poolMembers) - throws NamespaceException { - - if (poolMembers.isEmpty()) { - throw new NamespaceException("POOL Defined is EMPTY!"); - } - Protocol prot = poolMembers.get(0).getMemberProtocol().getProtocol(); - for (PoolMember member : poolMembers) { - if (!(member.getMemberProtocol().getProtocol().equals(prot))) { - throw new NamespaceException( - "Defined Pool is NOT HOMOGENEOUS! Protocols " + prot.toString() - + " and " + member.toString() + " differs"); - } - } - } - - // ******************* DEFAULT VALUES *************************** - - private DefaultValuesInterface buildDefaultValues(String fsName) - throws NamespaceException { - - DefaultValues def = new DefaultValues(); - if (parserUtil.isDefaultElementPresent(fsName)) { - setSpaceDef(fsName, def); - setFileDef(fsName, def); - } else { // Produce Default Values with default values :o ! - log.debug("VFS({}).DefaultValues is ABSENT. Using DEFAULT values.", fsName); - } - return def; - } - - private void setSpaceDef(String fsName, DefaultValues def) - throws NamespaceException { - - String spaceType = parserUtil.getDefaultSpaceType(fsName); - log.debug("VFS({}).DefaultValues.space.type = '{}'", fsName, spaceType); - long lifeTime = parserUtil.getDefaultSpaceLifeTime(fsName); - log.debug("VFS({}).DefaultValues.space.lifeTime = ''", fsName, lifeTime); - long guarSize = parserUtil.getDefaultSpaceGuarSize(fsName); - log.debug("VFS({}).DefaultValues.space.guarSize = '{}'", fsName, guarSize); - long totSize = parserUtil.getDefaultSpaceTotSize(fsName); - log.debug("VFS({}).DefaultValues.space.totSize = '{}'", fsName, totSize); - def.setSpaceDefaults(spaceType, lifeTime, guarSize, totSize); - } - - private void setFileDef(String fsName, DefaultValues def) - throws NamespaceException { - - String fileType = parserUtil.getDefaultFileType(fsName); - log.debug("VFS({}).DefaultValues.file.type = '{}'", fsName, fileType); - long lifeTime = parserUtil.getDefaultFileLifeTime(fsName); - log.debug("VFS({}).DefaultValues.file.lifeTime = '{}'", fsName, lifeTime); - def.setFileDefaults(fileType, lifeTime); - } - - // ******************* MAPPING RULE *************************** - - private void buildMapRules() throws NamespaceException { - - int numOfMapRules = parserUtil.getNumberOfMappingRule(); - String ruleName; - String stfnRoot; - String mappedFS; - MappingRule mapRule; - - for (int i = 0; i < numOfMapRules; i++) { - ruleName = parserUtil.getMapRuleName(i); - mappedFS = parserUtil.getMapRule_mappedFS(ruleName); - // Adding mapping rule to VFS within vfss; - if (vfss.containsKey(mappedFS)) { - log.debug("VFS '{}' pointed by RULE : '{}' exists.", mappedFS, ruleName); - stfnRoot = parserUtil.getMapRule_StFNRoot(ruleName); - VirtualFSInterface vfs = vfss.get(mappedFS); - mapRule = new MappingRule(ruleName, stfnRoot, vfs); - ((VirtualFS) vfs).addMappingRule(mapRule); - maprules.put(ruleName, mapRule); - } else { - log.error("VFS '{}' pointed by RULE : '{}' DOES NOT EXISTS.", mappedFS, ruleName); - } - } - } - - // ******************* APPROACHABLE RULE *************************** - - private void buildAppRules() throws NamespaceException { - - int numOfAppRules = parserUtil.getNumberOfApproachRule(); - - String ruleName; - String dn; - String vo_name; - String relPath; - String anonymousHttpReadString; - List appFSList; - ApproachableRule appRule; - - log.debug("Number of APP Rule : {}", numOfAppRules); - - - for (int i = 0; i < numOfAppRules; i++) { - ruleName = parserUtil.getApproachRuleName(i); - log.debug(" APP rule nr: {} is named : {}", i, ruleName); - - dn = parserUtil.getAppRule_SubjectDN(ruleName); - vo_name = parserUtil.getAppRule_SubjectVO(ruleName); - SubjectRules subjectRules = new SubjectRules(dn, vo_name); - - relPath = parserUtil.getAppRule_RelativePath(ruleName); - - anonymousHttpReadString = parserUtil - .getAppRule_AnonymousHttpRead(ruleName); - if (anonymousHttpReadString != null - && !anonymousHttpReadString.trim().isEmpty()) { - appRule = new ApproachableRule(ruleName, subjectRules, relPath, - Boolean.parseBoolean(anonymousHttpReadString)); - } else { - appRule = new ApproachableRule(ruleName, subjectRules, relPath); - } - - appFSList = parserUtil.getAppRule_AppFS(ruleName); - for (String appFS : appFSList) { - if (vfss.containsKey(appFS)) { - log.debug("VFS '{}' pointed by RULE : '{}' exists.", appFS, ruleName); - VirtualFSInterface vfs = vfss.get(appFS); - ((VirtualFS) vfs).addApproachableRule(appRule); - appRule.addApproachableVFS(vfs); - } else { - log.error("VFS '{}' pointed by RULE : '{}' DOES NOT EXISTS.", appFS, ruleName); - } - } - apprules.put(ruleName, appRule); - } - } - - /***************************************************************************** - * BUSINESS METHODs - ****************************************************************************/ - - public String getNamespaceVersion() { - - return version; - } - - public List getAllVFS_Roots() { - - Collection elem = vfss.values(); - List roots = new ArrayList<>(vfss.size()); - Iterator scan = elem.iterator(); - while (scan.hasNext()) { - String root = null; - root = scan.next().getRootPath(); - roots.add(root); - } - return roots; - } - - public Map getMapVFS_Root() { - - Map result = new HashMap<>(); - Collection elem = vfss.values(); - Iterator scan = elem.iterator(); - while (scan.hasNext()) { - String root = null; - VirtualFSInterface vfs = scan.next(); - root = vfs.getRootPath(); - result.put(root, vfs); - } - return result; - } - - public List getAllMappingRule_StFNRoots() { - - Collection elem = maprules.values(); - List roots = new ArrayList<>(maprules.size()); - Iterator scan = elem.iterator(); - String root = null; - while (scan.hasNext()) { - root = scan.next().getStFNRoot(); - roots.add(root); - } - return roots; - } - - public Map getMappingRuleMAP() { - - Map map = new HashMap<>(); - Collection elem = maprules.values(); - Iterator scan = elem.iterator(); - String root = null; - String name = null; - MappingRule rule; - while (scan.hasNext()) { - rule = scan.next(); - root = rule.getStFNRoot(); - name = rule.getRuleName(); - map.put(name, root); - } - return map; - } - - public VirtualFSInterface getVFS(String vfsName) { - - return vfss.get(vfsName); - } +public class XMLNamespaceParser implements NamespaceParser { + + private final Logger log = LoggerFactory.getLogger(XMLNamespaceParser.class); + + private String version; + private Map vfss; + private Map maprules; + private Map apprules; + + private XMLParserUtil parserUtil; + private final XMLConfiguration configuration; + + private final Lock refreshing = new ReentrantLock(); + + /** + * Constructor + * + * @param loader NamespaceLoader + */ + public XMLNamespaceParser(NamespaceLoader loader, boolean semanticCheckEnabled) { + + configuration = (XMLConfiguration) loader.getConfiguration(); + + parserUtil = new XMLParserUtil(configuration); + + for (Iterator iter = parserUtil.getKeys(); iter.hasNext();) { + log.debug("current item: {}", iter.next()); + } + + vfss = Maps.newHashMap(); + maprules = Maps.newHashMap(); + apprules = Maps.newHashMap(); + + boolean validNamespaceConfiguration = refreshCachedData(semanticCheckEnabled); + if (!validNamespaceConfiguration) { + log.error(" ???????????????????????????????????? "); + log.error(" ???? NAMESPACE does not VALID ???? "); + log.error(" ???????????????????????????????????? "); + log.error(" Please see the log. "); + System.exit(0); + } + + } + + public Map getVFSs() { + + return vfss; + } + + public Map getApproachableRules() { + + return apprules; + } + + public Map getMappingRules() { + + return maprules; + } + + /**************************************************************** + * PRIVATE METHODs + *****************************************************************/ + + private boolean refreshCachedData(boolean semanticCheckEnabled) { + + boolean result = false; + try { + refreshing.lock(); + configuration.clear(); + configuration.clearTree("filesystems"); + configuration.clearTree("mapping-rules"); + configuration.clearTree("approachable-rules"); + try { + configuration.load(); + log.debug(" ... reading and parsing the namespace configuration from file!"); + } catch (ConfigurationException ex) { + log.error(ex.getMessage(), ex); + } + log.debug("REFRESHING CACHE.."); + // Save the cache content + log.debug(" ..save the cache content before semantic check"); + Map vfssSAVED = vfss; + Map maprulesSAVED = maprules; + Map apprulesSAVED = apprules; + // Refresh the cache content with new values + + log.debug(" ..refresh the cache"); + refreshCache(); + + if (semanticCheckEnabled) { + // Do the checking on Namespace + log.debug(" ..semantic check of namespace"); + NamespaceCheck checker = new NamespaceCheck(vfss, maprules, apprules); + boolean semanticCheck = checker.check(); + if (semanticCheck) { + log.debug("Namespace is semantically valid"); + result = true; + } else { + log.warn("Namespace does not semantically valid!, so no load performed!"); + vfss = vfssSAVED; + maprules = maprulesSAVED; + apprules = apprulesSAVED; + result = false; + } + } else { + result = true; + } + + log.debug("REFRESHING ENDED."); + + } finally { + refreshing.unlock(); + } + return result; + } + + private void refreshCache() { + + log.debug(" ############## REFRESHING NAMESPACE CONFIGURATION CACHE : start ###############"); + + /************************** + * Retrieve Version Number + *************************/ + try { + retrieveVersion(); + } catch (NamespaceException ex1) { + log.warn("Namespace configuration does not contain a valid version number.", ex1); + /** + * @todo Manage this exceptional status! + */ + } + + /************************** + * Building VIRTUAL FS + *************************/ + try { + buildVFSs(); + } catch (ClassNotFoundException ex) { + log.error("Namespace Configuration ERROR in VFS-DRIVER specification", ex); + /** + * @todo Manage this exceptional status! + */ + } catch (NamespaceException ex) { + log.error("Namespace Configuration ERROR in VFS definition, please check it.", ex); + /** + * @todo Manage this exceptional status! + */ + } + + /************************** + * Building MAPPING RULES + *************************/ + try { + buildMapRules(); + } catch (NamespaceException ex1) { + log.error("Namespace Configuration ERROR in MAPPING RULES definition, please check it.", ex1); + /** + * @todo Manage this exceptional status! + */ + } + + /************************** + * Building APPROACHABLE RULES + *************************/ + try { + buildAppRules(); + } catch (NamespaceException ex2) { + log.error("Namespace Configuration ERROR in APPROACHABLE RULES definition, please check it.", + ex2); + /** + * @todo Manage this exceptional status! + */ + } + log.debug(" ############## REFRESHING NAMESPACE CONFIGURATION CACHE : end ###############"); + + } + + // ******************* VERSION NUMBER *************************** + private void retrieveVersion() throws NamespaceException { + + version = parserUtil.getNamespaceVersion(); + log.debug(" ==== NAMESPACE VERSION : '{}' ====", version); + } + + // ******************* VIRTUAL FS *************************** + + private void buildVFSs() throws ClassNotFoundException, NamespaceException { + + int nrOfVFS = 0; + + nrOfVFS = parserUtil.getNumberOfFS(); + // For each VFS within configuration build VFS class instance + VirtualFS vfs; + String spaceTokenDescription = null; + StorageClassType storageClass; + String root = null; + String name; + String fsType; + Class driver; + String storageAreaAuthz; + PropertyInterface prop; + CapabilityInterface cap; + DefaultValuesInterface defValues; + SAAuthzType saAuthzType; + + for (int i = 0; i < nrOfVFS; i++) { + // Building VFS + vfs = new VirtualFS(); + + name = parserUtil.getFSName(i); + vfs.setAliasName(name); + log.debug("VFS({}).name = '{}'", i, name); + + fsType = parserUtil.getFSType(name); + vfs.setFSType(fsType); + log.debug("VFS({}).fs_type = '{}'", name, fsType); + + spaceTokenDescription = parserUtil.getFSSpaceTokenDescription(name); + vfs.setSpaceTokenDescription(spaceTokenDescription); + log.debug("VFS({}).space-token-description = '{}'", name, spaceTokenDescription); + + storageClass = StorageClassType.valueOf(parserUtil.getStorageClass(name)); + vfs.setStorageClassType(storageClass); + log.debug("VFS({}).storage-class = '{}'", name, storageClass); + + root = parserUtil.getFSRoot(name); + vfs.setRoot(root); + log.debug("VFS({}).root = '{}'", name, root); + + driver = Class.forName(parserUtil.getFSDriver(name)); + vfs.setFSDriver(driver); + log.debug("VFS({}).fsDriver [CLASS Name] = '{}'", name, driver.getName()); + + driver = Class.forName(parserUtil.getSpaceDriver(name)); + vfs.setSpaceSystemDriver(driver); + log.debug("VFS({}).spaceDriver [CLASS Name] = '{}'", name, driver.getName()); + + saAuthzType = parserUtil.getStorageAreaAuthzType(name); + vfs.setSAAuthzType(saAuthzType); + log.debug("VFS({}).storage-area-authz.TYPE = '{}'", name, saAuthzType); + + storageAreaAuthz = parserUtil.getStorageAreaAuthz(name, saAuthzType); + vfs.setSAAuthzSource(storageAreaAuthz); + log.debug("VFS({}).storage-area-authz = '{}'", name, storageAreaAuthz); + + prop = buildProperties(name); + vfs.setProperties(prop); + + cap = buildCapabilities(name); + vfs.setCapabilities(cap); + + defValues = buildDefaultValues(name); + vfs.setDefaultValues(defValues); + + // Adding VFS + synchronized (this) { + vfss.remove(name); + vfss.put(name, vfs); + } + } + } + + // ******************* PROPERTY *************************** + private PropertyInterface buildProperties(String fsName) throws NamespaceException { + + Property prop = new Property(); + + String accessLatency = parserUtil.getAccessLatencyType(fsName); + prop.setAccessLatency(AccessLatency.valueOf(accessLatency)); + log.debug("VFS({}).Properties.AccessLatency = '{}'", fsName, accessLatency); + + String expirationMode = parserUtil.getExpirationModeType(fsName); + prop.setExpirationMode(expirationMode); + log.debug("VFS({}).Properties.ExpirationMode = '{}'", fsName, expirationMode); + + String retentionPolicy = parserUtil.getRetentionPolicyType(fsName); + prop.setRetentionPolicy(RetentionPolicy.valueOf(retentionPolicy)); + log.debug("VFS({}).Properties.RetentionPolicy = '{}'", fsName, retentionPolicy); + + String unitType = parserUtil.getNearlineSpaceUnitType(fsName); + long nearLineSize = parserUtil.getNearlineSpaceSize(fsName); + prop.setTotalNearlineSize(unitType, nearLineSize); + log.debug("VFS({}).Properties.NearlineSpaceSize = '{} {}'", fsName, nearLineSize, unitType); + + unitType = parserUtil.getOnlineSpaceUnitType(fsName); + long onlineSize = parserUtil.getOnlineSpaceSize(fsName); + prop.setTotalOnlineSize(unitType, onlineSize); + log.debug("VFS({}).Properties.OnlineSpaceSize = '{} {}'", fsName, onlineSize, unitType); + + boolean hasLimitedSize = parserUtil.getOnlineSpaceLimitedSize(fsName); + prop.setLimitedSize(hasLimitedSize); + log.debug("VFS({}).Properties.OnlineSpaceLimitedSize = '{}'", fsName, hasLimitedSize); + + return prop; + } + + // ******************* CAPABILITY *************************** + + private CapabilityInterface buildCapabilities(String fsName) throws NamespaceException { + + /** + * ACL MODE ELEMENT + */ + String aclMode = parserUtil.getACLMode(fsName); + Capability cap = new Capability(aclMode); + log.debug("VFS({}).Capabilities.aclMode = '{}'", fsName, aclMode); + + /** + * DEFAULT ACL + */ + boolean defaultACLDefined = parserUtil.getDefaultACLDefined(fsName); + log.debug("VFS({}).Capabilities.defaultACL [Defined?] = {}", fsName, defaultACLDefined); + if (defaultACLDefined) { + int nrACLEntries = parserUtil.getNumberOfACL(fsName); + String groupName = null; + String filePermString = null; + ACLEntry aclEntry = null; + for (int entryNumber = 0; entryNumber < nrACLEntries; entryNumber++) { + groupName = parserUtil.getGroupName(fsName, entryNumber); + filePermString = parserUtil.getPermissionString(fsName, entryNumber); + try { + aclEntry = new ACLEntry(groupName, filePermString); + cap.addACLEntry(aclEntry); + } catch (PermissionException permEx) { + log.error("Namespace XML Parser -- ERROR -- : {}", permEx.getMessage()); + } + } + log.debug("VFS({}).Capabilities.defaultACL = {}", fsName, cap.getDefaultACL()); + } + + /** + * QUOTA ELEMENT + */ + boolean quotaDefined = parserUtil.getQuotaDefined(fsName); + Quota quota = null; + if (quotaDefined) { + boolean quotaEnabled = parserUtil.getQuotaEnabled(fsName); + String device = parserUtil.getQuotaDevice(fsName); + + QuotaType quotaType; + String quotaValue = null; + + if (parserUtil.getQuotaFilesetDefined(fsName)) { + quotaType = QuotaType.buildQuotaType(QuotaType.FILESET); + quotaValue = parserUtil.getQuotaFileset(fsName); + } else { + if (parserUtil.getQuotaGroupIDDefined(fsName)) { + quotaType = QuotaType.buildQuotaType(QuotaType.GRP); + quotaValue = parserUtil.getQuotaGroupID(fsName); + } else { + if (parserUtil.getQuotaUserIDDefined(fsName)) { + quotaType = QuotaType.buildQuotaType(QuotaType.USR); + quotaValue = parserUtil.getQuotaUserID(fsName); + } else { + quotaType = QuotaType.buildQuotaType(QuotaType.UNKNOWN); + quotaValue = "unknown"; + } + } + } + + quotaType.setValue(quotaValue); + quota = new Quota(quotaEnabled, device, quotaType); + + } else { + quota = new Quota(); + } + cap.setQuota(quota); + + log.debug("VFS({}).Capabilities.quota = '{}'", fsName, quota); + + /** + * TRANSFER PROTOCOL + */ + int nrProtocols = parserUtil.getNumberOfProt(fsName); + Protocol protocol; + Authority service; + TransportProtocol transportProt; + int protocolIndex; + String serviceHostName; + String servicePortValue; + String schema; + String name; + for (int protCounter = 0; protCounter < nrProtocols; protCounter++) { + protocolIndex = parserUtil.getProtId(fsName, protCounter); + name = parserUtil.getProtName(fsName, protCounter); + schema = parserUtil.getProtSchema(fsName, protCounter); + protocol = Protocol.getProtocol(schema); + protocol.setProtocolServiceName(name); + serviceHostName = parserUtil.getProtHost(fsName, protCounter); + servicePortValue = parserUtil.getProtPort(fsName, protCounter); + int portIntValue = -1; + service = null; + if (servicePortValue != null) { + try { + portIntValue = Integer.parseInt(servicePortValue); + service = new Authority(serviceHostName, portIntValue); + // log.debug("SERVICE PORT: "+service); + } catch (NumberFormatException nfe) { + log.warn("to evaluate the environmental variable " + servicePortValue); + } + } else { + service = new Authority(serviceHostName); + // log.debug("SERVICE : "+service); + } + transportProt = new TransportProtocol(protocol, service); + transportProt.setProtocolID(protocolIndex); // 1.4.0 + log.debug("VFS({}).Capabilities.protocol({}) = '{}'", fsName, protCounter, transportProt); + cap.addTransportProtocolByScheme(protocol, transportProt); + cap.addTransportProtocol(transportProt); + if (protocolIndex != -1) { + cap.addTransportProtocolByID(protocolIndex, transportProt); + } + + } + + /** + * PROTOCOL POOL + */ + int nrPools = parserUtil.getNumberOfPool(fsName); + if (nrPools > 0) { + + for (int poolCounter = 0; poolCounter < nrPools; poolCounter++) { + BalancingStrategyType balanceStrategy = + BalancingStrategyType.getByValue(parserUtil.getBalancerStrategy(fsName, poolCounter)); + ArrayList poolMembers = new ArrayList<>(); + int nrMembers = parserUtil.getNumberOfPoolMembers(fsName, poolCounter); + for (int i = 0; i < nrMembers; i++) { + int protIndex = parserUtil.getMemberID(fsName, poolCounter, i); + TransportProtocol tProtMember = cap.getProtocolByID(protIndex); + if (tProtMember != null) { + PoolMember poolMember; + if (balanceStrategy.requireWeight()) { + int memberWeight = parserUtil.getMemberWeight(fsName, poolCounter, i); + poolMember = new PoolMember(protIndex, tProtMember, memberWeight); + } else { + poolMember = new PoolMember(protIndex, tProtMember); + } + poolMembers.add(poolMember); + } else { // member pointed out doesn't exist!! + String errorMessage = String.format( + "POOL Building: Protocol with index %d does not exists in the VFS : %s", protIndex, + fsName); + log.error(errorMessage); + throw new NamespaceException(errorMessage); + } + } + Protocol pooProtocol = poolMembers.get(0).getMemberProtocol().getProtocol(); + verifyPoolIsValid(poolMembers); + log.debug("Defined pool for protocol {} with size {}", pooProtocol, poolMembers.size()); + cap.addProtocolPoolBySchema(pooProtocol, new ProtocolPool(balanceStrategy, poolMembers)); + log.debug("PROTOCOL POOL: {}", cap.getPoolByScheme(pooProtocol)); + } + } else { + log.debug("Pool is not defined in VFS {}", fsName); + } + + return cap; + } + + /** + * @param poolMembers + * @throws NamespaceException + */ + private void verifyPoolIsValid(ArrayList poolMembers) throws NamespaceException { + + if (poolMembers.isEmpty()) { + throw new NamespaceException("POOL Defined is EMPTY!"); + } + Protocol prot = poolMembers.get(0).getMemberProtocol().getProtocol(); + for (PoolMember member : poolMembers) { + if (!(member.getMemberProtocol().getProtocol().equals(prot))) { + throw new NamespaceException("Defined Pool is NOT HOMOGENEOUS! Protocols " + prot.toString() + + " and " + member.toString() + " differs"); + } + } + } + + // ******************* DEFAULT VALUES *************************** + + private DefaultValuesInterface buildDefaultValues(String fsName) throws NamespaceException { + + DefaultValues def = new DefaultValues(); + if (parserUtil.isDefaultElementPresent(fsName)) { + setSpaceDef(fsName, def); + setFileDef(fsName, def); + } else { // Produce Default Values with default values :o ! + log.debug("VFS({}).DefaultValues is ABSENT. Using DEFAULT values.", fsName); + } + return def; + } + + private void setSpaceDef(String fsName, DefaultValues def) throws NamespaceException { + + String spaceType = parserUtil.getDefaultSpaceType(fsName); + log.debug("VFS({}).DefaultValues.space.type = '{}'", fsName, spaceType); + long lifeTime = parserUtil.getDefaultSpaceLifeTime(fsName); + log.debug("VFS({}).DefaultValues.space.lifeTime = ''", fsName, lifeTime); + long guarSize = parserUtil.getDefaultSpaceGuarSize(fsName); + log.debug("VFS({}).DefaultValues.space.guarSize = '{}'", fsName, guarSize); + long totSize = parserUtil.getDefaultSpaceTotSize(fsName); + log.debug("VFS({}).DefaultValues.space.totSize = '{}'", fsName, totSize); + def.setSpaceDefaults(spaceType, lifeTime, guarSize, totSize); + } + + private void setFileDef(String fsName, DefaultValues def) throws NamespaceException { + + String fileType = parserUtil.getDefaultFileType(fsName); + log.debug("VFS({}).DefaultValues.file.type = '{}'", fsName, fileType); + long lifeTime = parserUtil.getDefaultFileLifeTime(fsName); + log.debug("VFS({}).DefaultValues.file.lifeTime = '{}'", fsName, lifeTime); + def.setFileDefaults(fileType, lifeTime); + } + + // ******************* MAPPING RULE *************************** + + private void buildMapRules() throws NamespaceException { + + int numOfMapRules = parserUtil.getNumberOfMappingRule(); + String ruleName; + String stfnRoot; + String mappedFS; + MappingRule mapRule; + + for (int i = 0; i < numOfMapRules; i++) { + ruleName = parserUtil.getMapRuleName(i); + mappedFS = parserUtil.getMapRule_mappedFS(ruleName); + // Adding mapping rule to VFS within vfss; + if (vfss.containsKey(mappedFS)) { + log.debug("VFS '{}' pointed by RULE : '{}' exists.", mappedFS, ruleName); + stfnRoot = parserUtil.getMapRule_StFNRoot(ruleName); + VirtualFS vfs = vfss.get(mappedFS); + mapRule = new MappingRule(ruleName, stfnRoot, vfs); + ((VirtualFS) vfs).addMappingRule(mapRule); + maprules.put(ruleName, mapRule); + } else { + log.error("VFS '{}' pointed by RULE : '{}' DOES NOT EXISTS.", mappedFS, ruleName); + } + } + } + + // ******************* APPROACHABLE RULE *************************** + + private void buildAppRules() throws NamespaceException { + + int numOfAppRules = parserUtil.getNumberOfApproachRule(); + + String ruleName; + String dn; + String vo_name; + String relPath; + String anonymousHttpReadString; + List appFSList; + ApproachableRule appRule; + + log.debug("Number of APP Rule : {}", numOfAppRules); + + + for (int i = 0; i < numOfAppRules; i++) { + ruleName = parserUtil.getApproachRuleName(i); + log.debug(" APP rule nr: {} is named : {}", i, ruleName); + + dn = parserUtil.getAppRule_SubjectDN(ruleName); + vo_name = parserUtil.getAppRule_SubjectVO(ruleName); + SubjectRules subjectRules = new SubjectRules(dn, vo_name); + + relPath = parserUtil.getAppRule_RelativePath(ruleName); + + anonymousHttpReadString = parserUtil.getAppRule_AnonymousHttpRead(ruleName); + if (anonymousHttpReadString != null && !anonymousHttpReadString.trim().isEmpty()) { + appRule = new ApproachableRule(ruleName, subjectRules, relPath, + Boolean.parseBoolean(anonymousHttpReadString)); + } else { + appRule = new ApproachableRule(ruleName, subjectRules, relPath); + } + + appFSList = parserUtil.getAppRule_AppFS(ruleName); + for (String appFS : appFSList) { + if (vfss.containsKey(appFS)) { + log.debug("VFS '{}' pointed by RULE : '{}' exists.", appFS, ruleName); + VirtualFS vfs = vfss.get(appFS); + ((VirtualFS) vfs).addApproachableRule(appRule); + appRule.addApproachableVFS(vfs); + } else { + log.error("VFS '{}' pointed by RULE : '{}' DOES NOT EXISTS.", appFS, ruleName); + } + } + apprules.put(ruleName, appRule); + } + } + + /***************************************************************************** + * BUSINESS METHODs + ****************************************************************************/ + + public String getNamespaceVersion() { + + return version; + } + + public List getAllVFS_Roots() { + + Collection elem = vfss.values(); + List roots = new ArrayList<>(vfss.size()); + Iterator scan = elem.iterator(); + while (scan.hasNext()) { + String root = null; + root = scan.next().getRootPath(); + roots.add(root); + } + return roots; + } + + public Map getMapVFS_Root() { + + Map result = new HashMap<>(); + Collection elem = vfss.values(); + Iterator scan = elem.iterator(); + while (scan.hasNext()) { + String root = null; + VirtualFS vfs = scan.next(); + root = vfs.getRootPath(); + result.put(root, vfs); + } + return result; + } + + public List getAllMappingRule_StFNRoots() { + + Collection elem = maprules.values(); + List roots = new ArrayList<>(maprules.size()); + Iterator scan = elem.iterator(); + String root = null; + while (scan.hasNext()) { + root = scan.next().getStFNRoot(); + roots.add(root); + } + return roots; + } + + public Map getMappingRuleMAP() { + + Map map = new HashMap<>(); + Collection elem = maprules.values(); + Iterator scan = elem.iterator(); + String root = null; + String name = null; + MappingRule rule; + while (scan.hasNext()) { + rule = scan.next(); + root = rule.getStFNRoot(); + name = rule.getRuleName(); + map.put(name, root); + } + return map; + } + + public VirtualFS getVFS(String vfsName) { + + return vfss.get(vfsName); + } } diff --git a/src/main/java/it/grid/storm/namespace/config/xml/XMLParserUtil.java b/src/main/java/it/grid/storm/namespace/config/xml/XMLParserUtil.java index c30bf5c9a..a1e25ef55 100644 --- a/src/main/java/it/grid/storm/namespace/config/xml/XMLParserUtil.java +++ b/src/main/java/it/grid/storm/namespace/config/xml/XMLParserUtil.java @@ -17,10 +17,6 @@ package it.grid.storm.namespace.config.xml; -import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.model.SAAuthzType; - -import java.util.ArrayList; import java.util.Collection; import java.util.Iterator; import java.util.List; @@ -34,6 +30,9 @@ import com.google.common.collect.Lists; +import it.grid.storm.namespace.NamespaceException; +import it.grid.storm.namespace.model.SAAuthzType; + /** *

* Title: @@ -56,1157 +55,906 @@ */ public class XMLParserUtil implements XMLConst { - private final HierarchicalConfiguration configuration; - private final Logger log = LoggerFactory.getLogger(XMLParserUtil.class); - - public XMLParserUtil(Configuration config) { - - configuration = (HierarchicalConfiguration) config; - } - - /***************************************************************************** - * GENERICS METHODS - */ - - public boolean validateXML() { - - return true; - } - - public boolean areThereSustitutionCharInside(String element) { - - boolean result = false; - result = (element.indexOf(XMLConst.PROT_SUB_PATTERN) != -1) - || (element.indexOf(XMLConst.FS_SUB_PATTERN) != -1) - || (element.indexOf(XMLConst.APPRULE_SUB_PATTERN) != -1) - || (element.indexOf(XMLConst.MAP_SUB_PATTERN) != -1) - || (element.indexOf(XMLConst.ACL_ENTRY_SUB_PATTERN) != -1); - return result; - } - - public char whicSubstitutionChar(String element) { - - if (element.indexOf(XMLConst.PROT_SUB_PATTERN) != -1) { - return XMLConst.PROT_SUB_PATTERN; - } else if (element.indexOf(XMLConst.FS_SUB_PATTERN) != -1) { - return XMLConst.FS_SUB_PATTERN; - } else if (element.indexOf(XMLConst.APPRULE_SUB_PATTERN) != -1) { - return APPRULE_SUB_PATTERN; - } else if (element.indexOf(XMLConst.MAP_SUB_PATTERN) != -1) { - return XMLConst.MAP_SUB_PATTERN; - } else if (element.indexOf(XMLConst.ACL_ENTRY_SUB_PATTERN) != -1) { - return XMLConst.ACL_ENTRY_SUB_PATTERN; - } else if (element.indexOf(XMLConst.MEMBER_SUB_PATTERN) != -1) { - return XMLConst.MEMBER_SUB_PATTERN; - } - return ' '; - } - - /***************************************************************************** - * FILESYSTEMS METHODS - */ - public String getNamespaceVersion() throws NamespaceException { - - String result = null; - result = getStringProperty(XMLConst.NAMESPACE_VERSION); - return result; - } - - public String getFSSpaceTokenDescription(String nameOfFS) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - String result = getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.FS_SPACE_TOKEN_DESCRIPTION)); - return result; - } - - /** - * public String getAuthorizationSource(String nameOfFS) throws - * NamespaceException { int numOfFS = retrieveNumberByName(nameOfFS, - * XMLConst.FS_BY_NAME); String result = null; //Optional element if - * (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.FS_AUTHZ))) { - * result = getStringProperty(substituteNumberInFSElement(numOfFS, - * XMLConst.FS_AUTHZ)); } else { //Default value needed. result = - * XMLConst.DEFAULT_AUTHZ_SOURCE; - * log.debug("AuthZ source for VFS(+'"+nameOfFS+ - * "') is absent. Default value ('"+result+"') will be used."); } return - * result; } - **/ - - /** - * public boolean getQuotaCheck(String nameOfFS) throws NamespaceException { - * int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); boolean - * result = false; //Optional element if - * (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_CHECK))) { - * result = getBooleanProperty(substituteNumberInFSElement(numOfFS, - * XMLConst.QUOTA_CHECK)); } else { //Default value needed. result = - * XMLConst.DEFAULT_CHECKING_QUOTA; - * log.debug("Checking quota flag in VFS(+'"+nameOfFS - * +"') is absent. Default value ('"+result+"') will be used."); } return - * result; } - **/ - - public String getRetentionPolicyType(String nameOfFS) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - String result = getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.RETENTION_POLICY)); - return result; - } - - public String getAccessLatencyType(String nameOfFS) throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - String result = getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.ACCESS_LATENCY)); - return result; - } - - public String getExpirationModeType(String nameOfFS) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - String result = getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.EXPIRATION_MODE)); - return result; - } - - public String getOnlineSpaceUnitType(String nameOfFS) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - String result = null; - // Optional element - if (isPresent(substituteNumberInFSElement(numOfFS, - XMLConst.ONLINE_SIZE_UNIT))) { - result = getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.ONLINE_SIZE_UNIT)); - } else { // Default value needed. - result = XMLConst.DEFAULT_UNIT_TYPE; - log.debug("Online Space Unit type for VFS(+'" + nameOfFS - + "') is absent. Default value ('" + result + "') will be used"); - } - return result; - } - - public long getOnlineSpaceSize(String nameOfFS) throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - long result = getLongProperty(substituteNumberInFSElement(numOfFS, - XMLConst.ONLINE_SIZE)); - return result; - } - - public String getNearlineSpaceUnitType(String nameOfFS) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - String result = null; - // Optional element - if (isPresent(substituteNumberInFSElement(numOfFS, - XMLConst.NEARLINE_SIZE_UNIT))) { - result = getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.NEARLINE_SIZE_UNIT)); - } else { // Default value needed. - result = XMLConst.DEFAULT_UNIT_TYPE; - log.debug("Online Space Unit type for VFS(+'" + nameOfFS - + "') is absent. Default value ('" + result + "') will be used"); - } - return result; - } - - public long getNearlineSpaceSize(String nameOfFS) throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - long result = getLongProperty(substituteNumberInFSElement(numOfFS, - XMLConst.NEARLINE_SIZE)); - return result; - } - - public int getNumberOfFS() throws NamespaceException { - - return getPropertyNumber(XMLConst.FS_COUNTING); - } - - public String getFSName(int numOfFS) throws NamespaceException { - - return getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.FILESYSTEM_NAME)); - } - - public int getFSNumber(String nameOfFS) throws NamespaceException { - - return retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - } - - public String getFSType(String nameOfFS) throws NamespaceException { - - // log.debug("-----FSTYPE------START"); - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - // log.debug("-----FSTYPE------END"); - return getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.FILESYSTEM_TYPE)); - } - - public String getFSRoot(String nameOfFS) throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - String result = getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.FS_ROOT)); - // log.debug("VFS ROOT = "+result); - return result; - } - - public String getFSDriver(String nameOfFS) throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - return getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.FS_DRIVER)); - } - - public String getSpaceDriver(String nameOfFS) throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - return getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.FS_SPACE_DRIVER)); - } - - public boolean isDefaultElementPresent(String nameOfFS) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - boolean result = false; - // FS_DEFAULTVALUES - result = isPresent(substituteNumberInFSElement(numOfFS, - XMLConst.FS_DEFAULTVALUES)); - return result; - } - - public String getDefaultSpaceType(String nameOfFS) throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - return getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.DEF_SPACE_TYPE)); - } + private final HierarchicalConfiguration configuration; + private final Logger log = LoggerFactory.getLogger(XMLParserUtil.class); + + public XMLParserUtil(Configuration config) { + + configuration = (HierarchicalConfiguration) config; + } + + /***************************************************************************** + * GENERICS METHODS + */ + + public boolean validateXML() { + + return true; + } + + public boolean areThereSustitutionCharInside(String element) { + + boolean result = false; + result = (element.indexOf(XMLConst.PROT_SUB_PATTERN) != -1) + || (element.indexOf(XMLConst.FS_SUB_PATTERN) != -1) + || (element.indexOf(XMLConst.APPRULE_SUB_PATTERN) != -1) + || (element.indexOf(XMLConst.MAP_SUB_PATTERN) != -1) + || (element.indexOf(XMLConst.ACL_ENTRY_SUB_PATTERN) != -1); + return result; + } + + public char whicSubstitutionChar(String element) { + + if (element.indexOf(XMLConst.PROT_SUB_PATTERN) != -1) { + return XMLConst.PROT_SUB_PATTERN; + } else if (element.indexOf(XMLConst.FS_SUB_PATTERN) != -1) { + return XMLConst.FS_SUB_PATTERN; + } else if (element.indexOf(XMLConst.APPRULE_SUB_PATTERN) != -1) { + return APPRULE_SUB_PATTERN; + } else if (element.indexOf(XMLConst.MAP_SUB_PATTERN) != -1) { + return XMLConst.MAP_SUB_PATTERN; + } else if (element.indexOf(XMLConst.ACL_ENTRY_SUB_PATTERN) != -1) { + return XMLConst.ACL_ENTRY_SUB_PATTERN; + } else if (element.indexOf(XMLConst.MEMBER_SUB_PATTERN) != -1) { + return XMLConst.MEMBER_SUB_PATTERN; + } + return ' '; + } + + /***************************************************************************** + * FILESYSTEMS METHODS + */ + public String getNamespaceVersion() throws NamespaceException { + + String result = null; + result = getStringProperty(XMLConst.NAMESPACE_VERSION); + return result; + } + + public String getFSSpaceTokenDescription(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + String result = getStringProperty( + substituteNumberInFSElement(numOfFS, XMLConst.FS_SPACE_TOKEN_DESCRIPTION)); + return result; + } + + public String getRetentionPolicyType(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + String result = + getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.RETENTION_POLICY)); + return result; + } + + public String getAccessLatencyType(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + String result = + getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.ACCESS_LATENCY)); + return result; + } + + public String getExpirationModeType(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + String result = + getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.EXPIRATION_MODE)); + return result; + } + + public String getOnlineSpaceUnitType(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + String result = null; + // Optional element + if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.ONLINE_SIZE_UNIT))) { + result = getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.ONLINE_SIZE_UNIT)); + } else { // Default value needed. + result = XMLConst.DEFAULT_UNIT_TYPE; + log.debug("Online Space Unit type for VFS(+'" + nameOfFS + "') is absent. Default value ('" + + result + "') will be used"); + } + return result; + } + + public long getOnlineSpaceSize(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + long result = getLongProperty(substituteNumberInFSElement(numOfFS, XMLConst.ONLINE_SIZE)); + return result; + } + + public String getNearlineSpaceUnitType(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + String result = null; + // Optional element + if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.NEARLINE_SIZE_UNIT))) { + result = getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.NEARLINE_SIZE_UNIT)); + } else { // Default value needed. + result = XMLConst.DEFAULT_UNIT_TYPE; + log.debug("Online Space Unit type for VFS(+'" + nameOfFS + "') is absent. Default value ('" + + result + "') will be used"); + } + return result; + } + + public long getNearlineSpaceSize(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + long result = getLongProperty(substituteNumberInFSElement(numOfFS, XMLConst.NEARLINE_SIZE)); + return result; + } + + public int getNumberOfFS() throws NamespaceException { + + return getPropertyNumber(XMLConst.FS_COUNTING); + } + + public String getFSName(int numOfFS) throws NamespaceException { + + return getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.FILESYSTEM_NAME)); + } + + public int getFSNumber(String nameOfFS) throws NamespaceException { + + return retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + } + + public String getFSType(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + return getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.FILESYSTEM_TYPE)); + } + + public String getFSRoot(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + return getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.FS_ROOT)); + } + + public String getFSDriver(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + return getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.FS_DRIVER)); + } + + public String getSpaceDriver(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + return getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.FS_SPACE_DRIVER)); + } + + public boolean isDefaultElementPresent(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + return isPresent(substituteNumberInFSElement(numOfFS, XMLConst.FS_DEFAULTVALUES)); + } + + public String getDefaultSpaceType(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + return getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.DEF_SPACE_TYPE)); + } + + public long getDefaultSpaceLifeTime(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + return getLongProperty(substituteNumberInFSElement(numOfFS, XMLConst.DEF_SPACE_LT)); + } + + public long getDefaultSpaceGuarSize(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + return getLongProperty(substituteNumberInFSElement(numOfFS, XMLConst.DEF_SPACE_GUARSIZE)); + } + + public long getDefaultSpaceTotSize(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + return getLongProperty(substituteNumberInFSElement(numOfFS, XMLConst.DEF_SPACE_TOTSIZE)); + } + + public String getDefaultFileType(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + return getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.DEF_FILE_TYPE)); + } + + public long getDefaultFileLifeTime(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + return getLongProperty(substituteNumberInFSElement(numOfFS, XMLConst.DEF_FILE_LT)); + } + + public String getACLMode(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + return getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.ACL_MODE)); + } + + public int getNumberOfProt(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + if (numOfFS == -1) { + throw new NamespaceException("FS named '" + nameOfFS + "' does not exist in config"); + } + String protCount = + substitutionNumber(XMLConst.PROTOCOL_COUNTING, XMLConst.FS_SUB_PATTERN, numOfFS); + return getPropertyNumber(protCount); + } + + public String getProtName(String nameOfFS, int numOfProt) throws NamespaceException { + + return getStringProperty( + substituteNumberInProtocolElement(nameOfFS, numOfProt, XMLConst.PROTOCOL_NAME)); + } + + public int getProtNumberByName(String nameOfFS, String nameOfProt) throws NamespaceException { + + int numFS = getFSNumber(nameOfFS); + String collElem = substituteNumberInFSElement(numFS, XMLConst.PROTOCOL_BY_NAME); + return retrieveNumberByName(nameOfProt, collElem); + } + + public String getProtSchema(String nameOfFS, int numOfProt) throws NamespaceException { + + return getStringProperty( + substituteNumberInProtocolElement(nameOfFS, numOfProt, XMLConst.PROT_SCHEMA)); + } + + public String getProtHost(String nameOfFS, int numOfProt) throws NamespaceException { + + return getStringProperty( + substituteNumberInProtocolElement(nameOfFS, numOfProt, XMLConst.PROT_HOST)); + } + + public String getProtPort(String nameOfFS, int numOfProt) throws NamespaceException { - public long getDefaultSpaceLifeTime(String nameOfFS) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - return getLongProperty(substituteNumberInFSElement(numOfFS, - XMLConst.DEF_SPACE_LT)); - } + return getStringProperty( + substituteNumberInProtocolElement(nameOfFS, numOfProt, XMLConst.PROT_PORT)); + } - public long getDefaultSpaceGuarSize(String nameOfFS) - throws NamespaceException { + /* + * MAPPING RULES METHODS + */ - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - return getLongProperty(substituteNumberInFSElement(numOfFS, - XMLConst.DEF_SPACE_GUARSIZE)); - } + public int getNumberOfMappingRule() throws NamespaceException { - public long getDefaultSpaceTotSize(String nameOfFS) throws NamespaceException { + return getPropertyNumber(XMLConst.MAP_RULE_COUNTING); + } - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - return getLongProperty(substituteNumberInFSElement(numOfFS, - XMLConst.DEF_SPACE_TOTSIZE)); - } + public String getMapRuleName(int numOfMapRule) throws NamespaceException { - public String getDefaultFileType(String nameOfFS) throws NamespaceException { + return getStringProperty(substituteNumberInMAPElement(numOfMapRule, XMLConst.MAP_RULE_NAME)); + } - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - return getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.DEF_FILE_TYPE)); - } + public String getMapRule_StFNRoot(String nameOfMapRule) throws NamespaceException { - public long getDefaultFileLifeTime(String nameOfFS) throws NamespaceException { + int numOfMapRule = retrieveNumberByName(nameOfMapRule, XMLConst.MAP_RULE_BY_NAME); + return getStringProperty( + substituteNumberInMAPElement(numOfMapRule, XMLConst.MAP_RULE_STFNROOT)); + } - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - return getLongProperty(substituteNumberInFSElement(numOfFS, - XMLConst.DEF_FILE_LT)); - } + public String getMapRule_mappedFS(String nameOfMapRule) throws NamespaceException { - public String getACLMode(String nameOfFS) throws NamespaceException { + int numOfMapRule = retrieveNumberByName(nameOfMapRule, XMLConst.MAP_RULE_BY_NAME); + return getStringProperty( + substituteNumberInMAPElement(numOfMapRule, XMLConst.MAP_RULE_MAPPED_FS)); + } - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - return getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.ACL_MODE)); - } + /* + * APPROACHING METHODS + */ - public int getNumberOfProt(String nameOfFS) throws NamespaceException { + public int getNumberOfApproachRule() throws NamespaceException { - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - if (numOfFS == -1) { - throw new NamespaceException("FS named '" + nameOfFS - + "' does not exist in config"); - } - String protCount = substitutionNumber(XMLConst.PROTOCOL_COUNTING, - XMLConst.FS_SUB_PATTERN, numOfFS); - // log.debug( configuration.getString(protCount)); - return getPropertyNumber(protCount); - } + return getPropertyNumber(XMLConst.APP_RULE_COUNTING); + } - public String getProtName(String nameOfFS, int numOfProt) - throws NamespaceException { + public String getApproachRuleName(int numOfAppRule) throws NamespaceException { - return getStringProperty(substituteNumberInProtocolElement(nameOfFS, - numOfProt, XMLConst.PROTOCOL_NAME)); - } + return getStringProperty(substituteNumberInAPPElement(numOfAppRule, XMLConst.APP_RULE_NAME)); + } - public int getProtNumberByName(String nameOfFS, String nameOfProt) - throws NamespaceException { + public String getAppRule_SubjectDN(String nameOfAppRule) throws NamespaceException { - int numFS = getFSNumber(nameOfFS); - String collElem = substituteNumberInFSElement(numFS, - XMLConst.PROTOCOL_BY_NAME); - // log.debug("COLLECTION = "+collElem); - return retrieveNumberByName(nameOfProt, collElem); - } + int numOfAppRule = retrieveNumberByName(nameOfAppRule, XMLConst.APP_RULE_BY_NAME); + return getStringProperty(substituteNumberInAPPElement(numOfAppRule, XMLConst.APP_DN)); + } - public String getProtSchema(String nameOfFS, int numOfProt) - throws NamespaceException { + public String getAppRule_SubjectVO(String nameOfAppRule) throws NamespaceException { - return getStringProperty(substituteNumberInProtocolElement(nameOfFS, - numOfProt, XMLConst.PROT_SCHEMA)); - } + int numOfAppRule = retrieveNumberByName(nameOfAppRule, XMLConst.APP_RULE_BY_NAME); + return getStringProperty(substituteNumberInAPPElement(numOfAppRule, XMLConst.APP_VO_NAME)); + } - public String getProtHost(String nameOfFS, int numOfProt) - throws NamespaceException { + public List getAppRule_AppFS(String nameOfAppRule) throws NamespaceException { - return getStringProperty(substituteNumberInProtocolElement(nameOfFS, - numOfProt, XMLConst.PROT_HOST)); - } + int numOfAppRule = retrieveNumberByName(nameOfAppRule, XMLConst.APP_RULE_BY_NAME); + return getListValue(substituteNumberInAPPElement(numOfAppRule, XMLConst.APPROACHABLE_FS)); + } - public String getProtPort(String nameOfFS, int numOfProt) - throws NamespaceException { + public String getAppRule_RelativePath(String nameOfAppRule) throws NamespaceException { - return getStringProperty(substituteNumberInProtocolElement(nameOfFS, - numOfProt, XMLConst.PROT_PORT)); - } + int numOfAppRule = retrieveNumberByName(nameOfAppRule, XMLConst.APP_RULE_BY_NAME); + return getStringProperty( + substituteNumberInAPPElement(numOfAppRule, XMLConst.APP_SPACE_REL_PATH)); + } - /***************************************************************************** - * MAPPING RULES METHODS - */ + public String getAppRule_AnonymousHttpRead(String nameOfAppRule) throws NamespaceException { - public int getNumberOfMappingRule() throws NamespaceException { + int numOfAppRule = retrieveNumberByName(nameOfAppRule, XMLConst.APP_RULE_BY_NAME); + return getStringProperty( + substituteNumberInAPPElement(numOfAppRule, XMLConst.APP_ANONYMOUS_HTTP_READ)); + } - return getPropertyNumber(XMLConst.MAP_RULE_COUNTING); - } + /* + * QUOTA METHODS + */ - public String getMapRuleName(int numOfMapRule) throws NamespaceException { + public boolean getQuotaDefined(String nameOfFS) throws NamespaceException { - return getStringProperty(substituteNumberInMAPElement(numOfMapRule, - XMLConst.MAP_RULE_NAME)); - } + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + boolean result = false; + if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_ENABLED))) { + result = true; + } + return result; + } - public String getMapRule_StFNRoot(String nameOfMapRule) - throws NamespaceException { + public boolean getQuotaEnabled(String nameOfFS) throws NamespaceException { - int numOfMapRule = retrieveNumberByName(nameOfMapRule, - XMLConst.MAP_RULE_BY_NAME); - return getStringProperty(substituteNumberInMAPElement(numOfMapRule, - XMLConst.MAP_RULE_STFNROOT)); - } - - public String getMapRule_mappedFS(String nameOfMapRule) - throws NamespaceException { + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + boolean result = false; + result = getBooleanProperty(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_ENABLED)); + return result; + } - int numOfMapRule = retrieveNumberByName(nameOfMapRule, - XMLConst.MAP_RULE_BY_NAME); - return getStringProperty(substituteNumberInMAPElement(numOfMapRule, - XMLConst.MAP_RULE_MAPPED_FS)); - } - - /***************************************************************************** - * APPROACHING METHODS - */ - - public int getNumberOfApproachRule() throws NamespaceException { - - return getPropertyNumber(XMLConst.APP_RULE_COUNTING); - } - - public String getApproachRuleName(int numOfAppRule) throws NamespaceException { - - return getStringProperty(substituteNumberInAPPElement(numOfAppRule, - XMLConst.APP_RULE_NAME)); - } - - public String getAppRule_SubjectDN(String nameOfAppRule) - throws NamespaceException { - - int numOfAppRule = retrieveNumberByName(nameOfAppRule, - XMLConst.APP_RULE_BY_NAME); - return getStringProperty(substituteNumberInAPPElement(numOfAppRule, - XMLConst.APP_DN)); - } - - public String getAppRule_SubjectVO(String nameOfAppRule) - throws NamespaceException { - - int numOfAppRule = retrieveNumberByName(nameOfAppRule, - XMLConst.APP_RULE_BY_NAME); - return getStringProperty(substituteNumberInAPPElement(numOfAppRule, - XMLConst.APP_VO_NAME)); - } - - public List getAppRule_AppFS(String nameOfAppRule) throws NamespaceException { - - int numOfAppRule = retrieveNumberByName(nameOfAppRule, - XMLConst.APP_RULE_BY_NAME); - return getListValue(substituteNumberInAPPElement(numOfAppRule, - XMLConst.APPROACHABLE_FS)); - } - - public String getAppRule_RelativePath(String nameOfAppRule) - throws NamespaceException { - - int numOfAppRule = retrieveNumberByName(nameOfAppRule, - XMLConst.APP_RULE_BY_NAME); - return getStringProperty(substituteNumberInAPPElement(numOfAppRule, - XMLConst.APP_SPACE_REL_PATH)); - } - - public String getAppRule_AnonymousHttpRead(String nameOfAppRule) - throws NamespaceException { - - int numOfAppRule = retrieveNumberByName(nameOfAppRule, - XMLConst.APP_RULE_BY_NAME); - return getStringProperty(substituteNumberInAPPElement(numOfAppRule, - XMLConst.APP_ANONYMOUS_HTTP_READ)); - } - - /***************************************************************************** - * QUOTA METHODS - */ - - public boolean getQuotaDefined(String nameOfFS) throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - boolean result = false; - if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_ENABLED))) { - result = true; - } - return result; - } - - public boolean getQuotaEnabled(String nameOfFS) throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - boolean result = false; - result = getBooleanProperty(substituteNumberInFSElement(numOfFS, - XMLConst.QUOTA_ENABLED)); - return result; - } - - public boolean getQuotaDeviceDefined(String nameOfFS) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - boolean result = false; - if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_DEVICE))) { - result = true; - } - return result; - } - - public String getQuotaDevice(String nameOfFS) throws NamespaceException { - - String result = null; - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_DEVICE))) { - result = getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.QUOTA_DEVICE)); - } else { - throw new NamespaceException("Unable to find the element '" - + XMLConst.QUOTA_DEVICE + "' for the VFS:'" + nameOfFS + "'"); - } - return result; - } - - public boolean getQuotaFilesetDefined(String nameOfFS) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - boolean result = false; - if (isPresent(substituteNumberInFSElement(numOfFS, - XMLConst.QUOTA_FILE_SET_NAME))) { - result = true; - } - return result; - } - - public String getQuotaFileset(String nameOfFS) throws NamespaceException { - - String result = null; - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - if (isPresent(substituteNumberInFSElement(numOfFS, - XMLConst.QUOTA_FILE_SET_NAME))) { - result = getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.QUOTA_FILE_SET_NAME)); - } else { - throw new NamespaceException("Unable to find the element '" - + XMLConst.QUOTA_FILE_SET_NAME + "' for the VFS:'" + nameOfFS + "'"); - } - return result; - } - - public boolean getQuotaGroupIDDefined(String nameOfFS) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - boolean result = false; - if (isPresent(substituteNumberInFSElement(numOfFS, - XMLConst.QUOTA_GROUP_NAME))) { - result = true; - } - return result; - } - - public String getQuotaGroupID(String nameOfFS) throws NamespaceException { - - String result = null; - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - if (isPresent(substituteNumberInFSElement(numOfFS, - XMLConst.QUOTA_GROUP_NAME))) { - result = getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.QUOTA_GROUP_NAME)); - } else { - throw new NamespaceException("Unable to find the element '" - + XMLConst.QUOTA_GROUP_NAME + "' for the VFS:'" + nameOfFS + "'"); - } - return result; - } - - public boolean getQuotaUserIDDefined(String nameOfFS) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - boolean result = false; - if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_USER_NAME))) { - result = true; - } - return result; - } - - public String getQuotaUserID(String nameOfFS) throws NamespaceException { - - String result = null; - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_USER_NAME))) { - result = getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.QUOTA_USER_NAME)); - } else { - throw new NamespaceException("Unable to find the element '" - + XMLConst.QUOTA_USER_NAME + "' for the VFS:'" + nameOfFS + "'"); - } - return result; - } - - /***************************************************************************** - * STORAGE CLASS METHODs - */ - public String getStorageClass(String nameOfFS) throws NamespaceException { - - String result = XMLConst.DEFAULT_STORAGE_CLASS; - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - if (isPresent(substituteNumberInFSElement(numOfFS, - XMLConst.FS_STORAGE_CLASS))) { - result = getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.FS_STORAGE_CLASS)); - } else { - log.debug("Storage Class for VFS(+'" + nameOfFS - + "') is absent. Default value ('" + result + "') will be used."); - } - return result; - } - - /***************************************************************************** - * PRIVATE METHOD - *****************************************************************************/ - private String substitutionNumber(String xpath, char patternChar, int number) { - - int startIndex = 0; - int pos = 0; - StringBuilder result = new StringBuilder(); - pos = xpath.indexOf(patternChar, startIndex); - String numStr = Integer.toString(number); - result.append(xpath.substring(startIndex, pos)); - result.append(numStr); - result.append(xpath.substring(pos + 1)); - return result.toString(); - } - - private String substituteNumberInFSElement(int numberOfFS, String element) - throws NamespaceException { - - int numFS = getNumberOfFS(); - if (numberOfFS > numFS) { - throw new NamespaceException("Invalid pointing of Virtual File system"); - } - String new_element = substitutionNumber(element, XMLConst.FS_SUB_PATTERN, - numberOfFS); - return new_element; - } - - private String substituteNumberInACLEntryElement(String nameOfFS, - int numberOfACLEntry, String element) throws NamespaceException { - - int numFS = getFSNumber(nameOfFS); - if (numFS == -1) { - throw new NamespaceException("Virtual File system (" + nameOfFS - + ") does not exists"); - } - int numACL = getNumberOfACL(nameOfFS); - if (numberOfACLEntry > numACL) { - throw new NamespaceException("Invalid pointing of ACL Entry within VFS"); - } - String new_element = substitutionNumber(element, XMLConst.FS_SUB_PATTERN, - numFS); - new_element = substitutionNumber(new_element, - XMLConst.ACL_ENTRY_SUB_PATTERN, numberOfACLEntry); - return new_element; - } - - private String substituteNumberInProtocolElement(String nameOfFS, - int numberOfProtocol, String element) throws NamespaceException { - - int numFS = getFSNumber(nameOfFS); - if (numFS == -1) { - throw new NamespaceException("Virtual File system (" + nameOfFS - + ") does not exists"); - } - int numProt = getNumberOfProt(nameOfFS); - if (numberOfProtocol > numProt) { - throw new NamespaceException("Invalid pointing of Protocol within VFS"); - } - String new_element = substitutionNumber(element, XMLConst.FS_SUB_PATTERN, - numFS); - new_element = substitutionNumber(new_element, XMLConst.PROT_SUB_PATTERN, - numberOfProtocol); - return new_element; - } - - private String substituteNumberInPoolElement(String nameOfFS, - int numberOfPool, String element) throws NamespaceException { - - int numFS = getFSNumber(nameOfFS); - if (numFS == -1) { - throw new NamespaceException("Virtual File system (" + nameOfFS - + ") does not exists"); - } - int numPool = getNumberOfPool(nameOfFS); - if (numberOfPool > numPool) { - throw new NamespaceException("Invalid pointing of Pool within VFS"); - } - String new_element = substitutionNumber(element, XMLConst.FS_SUB_PATTERN, - numFS); - new_element = substitutionNumber(new_element, XMLConst.POOL_SUB_PATTERN, - numberOfPool); - return new_element; - } - - private String substituteNumberInMembersElement(String nameOfFS, - int numOfPool, int numberOfMember, String element) - throws NamespaceException { - - int numFS = getFSNumber(nameOfFS); - if (numFS == -1) { - throw new NamespaceException("Virtual File system (" + nameOfFS - + ") does not exists"); - } - int numMembers = getNumberOfPoolMembers(nameOfFS, numOfPool); - if (numberOfMember > numMembers) { - throw new NamespaceException("Invalid pointing of Member within VFS"); - } - String new_element = substitutionNumber(element, XMLConst.FS_SUB_PATTERN, - numFS); - new_element = substitutionNumber(new_element, XMLConst.POOL_SUB_PATTERN, - numOfPool); - new_element = substitutionNumber(new_element, XMLConst.MEMBER_SUB_PATTERN, - numberOfMember); - return new_element; - } - - private String substituteNumberInMAPElement(int numberOfMapRule, - String element) throws NamespaceException { - - int numMapRule = getNumberOfMappingRule(); - - if (numberOfMapRule > numMapRule) { - throw new NamespaceException("Invalid pointing of Mapping Rule"); - } - String new_element = substitutionNumber(element, XMLConst.MAP_SUB_PATTERN, - numberOfMapRule); - return new_element; - } - - private String substituteNumberInAPPElement(int numberOfAppRule, - String element) throws NamespaceException { - - int numAppRule = getNumberOfApproachRule(); - if (numberOfAppRule > numAppRule) { - throw new NamespaceException("Invalid pointing of Approachable Rule"); - } - String new_element = substitutionNumber(element, - XMLConst.APPRULE_SUB_PATTERN, numberOfAppRule); - return new_element; - } - - private int retrieveNumberByName(String name, String collectionElement, - boolean logging) { - - int result = -1; - int size = -1; - // log.debug(" NAME : "+name+" | Collection Element :"+collectionElement); - List prop = configuration.getList(collectionElement); - if (prop != null) { - size = prop.size(); - // log.debug("Size = "+size); - if (logging) { - for (int i = 0; i < size; i++) { - log.debug(prop.get(i).toString()); - } - } - result = prop.indexOf(name); - } else { - log.warn("[retrieveNumberByName_3] Element <" + collectionElement - + "> does not exists in namespace configuration file"); - } - return result; - } - - private int retrieveNumberByName(String name, String collectionElement) { - - int result = -1; - int size = -1; - // log.debug(" NAME : "+name+" | Collection Element :"+collectionElement); - List prop = configuration.getList(collectionElement); - if (prop != null) { - size = prop.size(); - result = prop.indexOf(name); - } else { - log.warn("[retrieveNumberByName_2] Element <" + collectionElement - + "> does not exists in namespace configuration file"); - } - return result; - } - - public Iterator getKeys() { - - return configuration.getKeys(); - } - - /** - * - * @param element - * String - * @return int - */ - private int getPropertyNumber(String element) { - - int result = -1; - Object prop = configuration.getProperty(element); - if (prop != null) { - result = 1; // If it is not null its value is atleast '1'! - if (prop instanceof Collection) { - result = ((Collection) prop).size(); - } - } else { - log.warn("[getPropertyNumber] Element <" + element - + "> does not exists in namespace configuration file"); - } - - return result; - } - - private boolean isPresent(String element) { - - boolean result = false; - result = configuration.containsKey(element); - // log.debug("XMLPArserUtil: isPresent('"+element+"')="+result); - return result; - } - - /** - * - * @param element - * String - * @return int - */ - private String getStringProperty(String element) throws NamespaceException { - - String prop = null; - try { - prop = configuration.getString(element); - // log.debug("ELEMENT = "+element+" VALUE = "+prop); - } catch (ConversionException ce) { - log.warn("[getStringProperty] Element <" + element - + "> does not contains a String value"); - } catch (NoSuchElementException note) { - log.warn("[getStringProperty] Element <" + element - + "> does not exists in namespace configuration file"); - } - return prop; - } - - /** - * - * @param element - * String - * @return boolean - */ - private boolean getBooleanProperty(String element) throws NamespaceException { - - boolean result = false; - try { - result = configuration.getBoolean(element); - } catch (ConversionException ce) { - log.warn("[getLongProperty] Element <" + element - + "> does not contains a String value"); - } catch (NoSuchElementException note) { - log.warn("[getLongProperty] Element <" + element - + "> does not exists in namespace configuration file"); - } - return result; - } - - /** - * - * @param element - * String - * @return int - */ - private long getLongProperty(String element) throws NamespaceException { - - long prop = -1L; - try { - prop = configuration.getLong(element); - } catch (ConversionException ce) { - log.warn("[getLongProperty] Element <" + element - + "> does not contains a String value"); - } catch (NoSuchElementException note) { - log.warn("[getLongProperty] Element <" + element - + "> does not exists in namespace configuration file"); - } - return prop; - } - - /** - * - * @param element - * String - * @return int - */ - private int getIntProperty(String element) { - - int prop = -1; - try { - prop = configuration.getInt(element); - } catch (ConversionException ce) { - log.warn("[getIntProperty] Element <" + element - + "> does not contains a String value"); - } catch (NoSuchElementException note) { - log.warn("[getIntProperty] Element <" + element - + "> does not exists in namespace configuration file"); - } - return prop; - } - - /** - * - * @param element - * String - * @return int - */ - private String[] getListProperty(String element) throws NamespaceException { - - String prop = null; - try { - prop = configuration.getString(element); - } catch (ConversionException ce) { - log.warn("[getListProperty] Element <" + element - + "> does not contains a String value"); - } catch (NoSuchElementException note) { - log.warn("[getListProperty] Element <" + element - + "> does not exists in namespace configuration file"); - } - // log.debug("LIST : "+prop); - String[] result = prop.split(","); - // log.debug(" LIST lenght :"+result.length); - return result; - } - - private List getListValue(String collectionElement) { - - List propList = configuration.getList(collectionElement); - List prop = Lists.newArrayList(); - // For a set or list - for (Object element2 : propList) { - String element = (String) element2; - prop.add(element.trim()); - } - - log.debug("LIST - prop : " + prop); - log.debug("Nr. of elements : " + prop.size()); - if (prop.size() == 0) { - log.warn("[retrieveNumberByName_2] Element <" + collectionElement - + "> does not exists in namespace configuration file"); - } - return prop; - } - - public boolean getDefaultACLDefined(String nameOfFS) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - boolean result = false; - if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.GROUP_NAME))) { - result = true; - } - return result; - } - - public int getNumberOfACL(String nameOfFS) throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - if (numOfFS == -1) { - throw new NamespaceException("FS named '" + nameOfFS - + "' does not exist in config"); - } - String aclCount = substitutionNumber(XMLConst.ACL_ENTRY_COUNTING, - XMLConst.FS_SUB_PATTERN, numOfFS); - log.debug("ACL Count = " + aclCount); - return getPropertyNumber(aclCount); - } - - public String getGroupName(String nameOfFS, int aclEntryNumber) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - String aclCount = substitutionNumber(XMLConst.GROUP_NAME, - XMLConst.FS_SUB_PATTERN, numOfFS); - String result = null; - Object prop = configuration.getProperty(aclCount); - if (prop != null) { - if (prop instanceof Collection) { - ArrayList propList = new ArrayList((Collection) prop); - if (propList.size() > aclEntryNumber) { - result = propList.get(aclEntryNumber); - } - } else { - if (prop instanceof String) { - result = ((String) prop); - } - } - } else { - log.warn("[getPropertyNumber] Element <" + aclCount - + "> does not exists in namespace configuration file"); - } - return result; - // return getStringProperty(substituteNumberInACLEntryElement(nameOfFS, - // aclEntryNumber, XMLConst.GROUP_NAME)); - } - - public String getPermissionString(String nameOfFS, int aclEntryNumber) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - String aclCount = substitutionNumber(XMLConst.PERMISSIONS, - XMLConst.FS_SUB_PATTERN, numOfFS); - String result = null; - Object prop = configuration.getProperty(aclCount); - if (prop != null) { - if (prop instanceof Collection) { - ArrayList propList = new ArrayList((Collection) prop); - if (propList.size() > aclEntryNumber) { - result = propList.get(aclEntryNumber); - } - } else { - if (prop instanceof String) { - result = ((String) prop); - } - } - } else { - log.warn("[getPropertyNumber] Element <" + aclCount - + "> does not exists in namespace configuration file"); - } - return result; - - // return getStringProperty(substituteNumberInACLEntryElement(nameOfFS, - // aclEntryNumber, XMLConst.PERMISSIONS)); - } - - /** - * ********************************** VERSION 1.4.0 - ***************************************/ - - public String getStorageAreaAuthz(String nameOfFS, SAAuthzType type) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - if (type.equals(SAAuthzType.FIXED)) { - return getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.SA_AUTHZ_FIXED)); - } else { - return getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.SA_AUTHZ_DB)); - } - } - - public SAAuthzType getStorageAreaAuthzType(String nameOfFS) - throws NamespaceException { - - if (getStorageAreaAuthzFixedDefined(nameOfFS)) { - return SAAuthzType.FIXED; - } - if (getStorageAreaAuthzDBDefined(nameOfFS)) { - return SAAuthzType.AUTHZDB; - } - throw new NamespaceException("Unable to find the SAAuthzType in " - + nameOfFS); - } - - public boolean getStorageAreaAuthzFixedDefined(String nameOfFS) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - boolean result = false; - if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.SA_AUTHZ_FIXED))) { - result = true; - } - return result; - } - - public boolean getStorageAreaAuthzDBDefined(String nameOfFS) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - boolean result = false; - if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.SA_AUTHZ_DB))) { - result = true; - } - return result; - } - - public int getProtId(String nameOfFS, int numOfProt) - throws NamespaceException { - - // int numOfProt = getProtNumberByName(nameOfFS, protName); - String protId = substituteNumberInProtocolElement(nameOfFS, numOfProt, - XMLConst.PROT_ID); - // log.debug("ProtID : "+protId); - if (isPresent(protId)) { - return getIntProperty(substituteNumberInProtocolElement(nameOfFS, - numOfProt, XMLConst.PROT_ID)); - } else { - return -1; - } - } - - public boolean getOnlineSpaceLimitedSize(String nameOfFS) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - boolean result = false; - result = getBooleanProperty(substituteNumberInFSElement(numOfFS, - XMLConst.LIMITED_SIZE)); - return result; - } - - public int getNumberOfPool(String nameOfFS) throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - if (numOfFS == -1) { - throw new NamespaceException("FS named '" + nameOfFS - + "' does not exist in config"); - } - if (!getPoolDefined(nameOfFS)) - return 0; - String protCount = substitutionNumber(XMLConst.POOL_COUNTING, - XMLConst.FS_SUB_PATTERN, numOfFS); - return getPropertyNumber(protCount); - } - - public boolean getPoolDefined(String nameOfFS) throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - boolean result = false; - if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.POOL_COUNTING))) { - result = true; - } - return result; - } - - public String getBalancerStrategy(String nameOfFS) throws NamespaceException { - - String result = null; - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - if (isPresent(substituteNumberInFSElement(numOfFS, - XMLConst.BALANCE_STRATEGY))) { - result = getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.BALANCE_STRATEGY)); - } else { - throw new NamespaceException("Unable to find the element '" - + XMLConst.BALANCE_STRATEGY + "' for the VFS:'" + nameOfFS + "'"); - } - return result; - } - - public int getNumberOfPoolMembers(String nameOfFS, int poolCounter) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - if (numOfFS == -1) { - throw new NamespaceException("FS named '" + nameOfFS - + "' does not exist in config"); - } - String subTree = substituteNumberInPoolElement(nameOfFS, poolCounter, - XMLConst.POOL); - HierarchicalConfiguration sub = configuration.configurationAt(subTree); - Object members = sub.getProperty("members.member[@member-id]"); - int numOfMembers = -1; - if (members != null) { - if (members instanceof Collection) { - numOfMembers = ((Collection) members).size(); - } else { - numOfMembers = 1; - } - } else { - log.error("Error during the retrieve of the number of pool member of " - + nameOfFS); - } - return numOfMembers; - } - - public int getMemberID(String nameOfFS, int numOfPool, int memberNr) - throws NamespaceException { - - return getIntProperty(substituteNumberInMembersElement(nameOfFS, numOfPool, - memberNr, XMLConst.POOL_MEMBER_ID)); - } - - public int getMemberWeight(String nameOfFS, int numOfPool, int memberNr) - throws NamespaceException { - - return getIntProperty(substituteNumberInMembersElement(nameOfFS, numOfPool, - memberNr, XMLConst.POOL_MEMBER_WEIGHT)); - } - - public String getBalancerStrategy(String fsName, int poolCounter) - throws NamespaceException { - - String poolId = substituteNumberInPoolElement(fsName, poolCounter, - XMLConst.BALANCE_STRATEGY); - if (isPresent(poolId)) { - return getStringProperty(substituteNumberInPoolElement(fsName, - poolCounter, XMLConst.BALANCE_STRATEGY)); - } else { - throw new NamespaceException("Unable to find the element '" - + XMLConst.BALANCE_STRATEGY + "' for the VFS:'" + fsName + "'"); - } - } + public boolean getQuotaDeviceDefined(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + boolean result = false; + if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_DEVICE))) { + result = true; + } + return result; + } + + public String getQuotaDevice(String nameOfFS) throws NamespaceException { + + String result = null; + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_DEVICE))) { + result = getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_DEVICE)); + } else { + throw new NamespaceException("Unable to find the element '" + XMLConst.QUOTA_DEVICE + + "' for the VFS:'" + nameOfFS + "'"); + } + return result; + } + + public boolean getQuotaFilesetDefined(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + return isPresent(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_FILE_SET_NAME)); + } + + public String getQuotaFileset(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + if (!isPresent(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_FILE_SET_NAME))) { + String errorMessage = String.format("Unable to find the element '%s' for the VFS:'%s'", + XMLConst.QUOTA_FILE_SET_NAME, nameOfFS); + throw new NamespaceException(errorMessage); + } + return getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_FILE_SET_NAME)); + } + + public boolean getQuotaGroupIDDefined(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + return isPresent(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_GROUP_NAME)); + } + + public String getQuotaGroupID(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + if (!isPresent(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_GROUP_NAME))) { + String errorMessage = String.format("Unable to find the element '%s' for the VFS:'%s'", + XMLConst.QUOTA_GROUP_NAME, nameOfFS); + throw new NamespaceException(errorMessage); + } + return getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_GROUP_NAME)); + } + + public boolean getQuotaUserIDDefined(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + return isPresent(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_USER_NAME)); + } + + public String getQuotaUserID(String nameOfFS) throws NamespaceException { + + String result = null; + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_USER_NAME))) { + result = getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_USER_NAME)); + } else { + throw new NamespaceException("Unable to find the element '" + XMLConst.QUOTA_USER_NAME + + "' for the VFS:'" + nameOfFS + "'"); + } + return result; + } + + /* + * STORAGE CLASS METHODs + */ + public String getStorageClass(String nameOfFS) throws NamespaceException { + + String result = XMLConst.DEFAULT_STORAGE_CLASS; + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.FS_STORAGE_CLASS))) { + result = getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.FS_STORAGE_CLASS)); + } else { + log.debug("Storage Class for VFS(+'" + nameOfFS + "') is absent. Default value ('" + result + + "') will be used."); + } + return result; + } + + /***************************************************************************** + * PRIVATE METHOD + *****************************************************************************/ + private String substitutionNumber(String xpath, char patternChar, int number) { + + int startIndex = 0; + int pos = 0; + StringBuilder result = new StringBuilder(); + pos = xpath.indexOf(patternChar, startIndex); + String numStr = Integer.toString(number); + result.append(xpath.substring(startIndex, pos)); + result.append(numStr); + result.append(xpath.substring(pos + 1)); + return result.toString(); + } + + private String substituteNumberInFSElement(int numberOfFS, String element) + throws NamespaceException { + + int numFS = getNumberOfFS(); + if (numberOfFS > numFS) { + throw new NamespaceException("Invalid pointing of Virtual File system"); + } + String new_element = substitutionNumber(element, XMLConst.FS_SUB_PATTERN, numberOfFS); + return new_element; + } + + private String substituteNumberInProtocolElement(String nameOfFS, int numberOfProtocol, + String element) throws NamespaceException { + + int numFS = getFSNumber(nameOfFS); + if (numFS == -1) { + throw new NamespaceException("Virtual File system (" + nameOfFS + ") does not exists"); + } + int numProt = getNumberOfProt(nameOfFS); + if (numberOfProtocol > numProt) { + throw new NamespaceException("Invalid pointing of Protocol within VFS"); + } + String new_element = substitutionNumber(element, XMLConst.FS_SUB_PATTERN, numFS); + new_element = substitutionNumber(new_element, XMLConst.PROT_SUB_PATTERN, numberOfProtocol); + return new_element; + } + + private String substituteNumberInPoolElement(String nameOfFS, int numberOfPool, String element) + throws NamespaceException { + + int numFS = getFSNumber(nameOfFS); + if (numFS == -1) { + throw new NamespaceException("Virtual File system (" + nameOfFS + ") does not exists"); + } + int numPool = getNumberOfPool(nameOfFS); + if (numberOfPool > numPool) { + throw new NamespaceException("Invalid pointing of Pool within VFS"); + } + String new_element = substitutionNumber(element, XMLConst.FS_SUB_PATTERN, numFS); + new_element = substitutionNumber(new_element, XMLConst.POOL_SUB_PATTERN, numberOfPool); + return new_element; + } + + private String substituteNumberInMembersElement(String nameOfFS, int numOfPool, + int numberOfMember, String element) throws NamespaceException { + + int numFS = getFSNumber(nameOfFS); + if (numFS == -1) { + throw new NamespaceException("Virtual File system (" + nameOfFS + ") does not exists"); + } + int numMembers = getNumberOfPoolMembers(nameOfFS, numOfPool); + if (numberOfMember > numMembers) { + throw new NamespaceException("Invalid pointing of Member within VFS"); + } + String new_element = substitutionNumber(element, XMLConst.FS_SUB_PATTERN, numFS); + new_element = substitutionNumber(new_element, XMLConst.POOL_SUB_PATTERN, numOfPool); + new_element = substitutionNumber(new_element, XMLConst.MEMBER_SUB_PATTERN, numberOfMember); + return new_element; + } + + private String substituteNumberInMAPElement(int numberOfMapRule, String element) + throws NamespaceException { + + int numMapRule = getNumberOfMappingRule(); + + if (numberOfMapRule > numMapRule) { + throw new NamespaceException("Invalid pointing of Mapping Rule"); + } + return substitutionNumber(element, XMLConst.MAP_SUB_PATTERN, numberOfMapRule); + } + + private String substituteNumberInAPPElement(int numberOfAppRule, String element) + throws NamespaceException { + + int numAppRule = getNumberOfApproachRule(); + if (numberOfAppRule > numAppRule) { + throw new NamespaceException("Invalid pointing of Approachable Rule"); + } + return substitutionNumber(element, XMLConst.APPRULE_SUB_PATTERN, numberOfAppRule); + } + + private int retrieveNumberByName(String name, String collectionElement) { + + int result = -1; + List prop = configuration.getList(collectionElement); + if (prop != null) { + result = prop.indexOf(name); + } else { + log.warn( + "[retrieveNumberByName_2] Element <{}> does not exists in namespace configuration file", + collectionElement); + } + return result; + } + + public Iterator getKeys() { + + return configuration.getKeys(); + } + + /** + * + * @param element String + * @return int + */ + private int getPropertyNumber(String element) { + + int result = -1; + Object prop = configuration.getProperty(element); + if (prop != null) { + result = 1; // If it is not null its value is at least '1'! + if (prop instanceof Collection) { + result = ((Collection) prop).size(); + } + } else { + log.warn("[getPropertyNumber] Element <{}> does not exists in namespace configuration file", + element); + } + + return result; + } + + private boolean isPresent(String element) { + + return configuration.containsKey(element); + } + + /** + * + * @param element String + * @return int + */ + private String getStringProperty(String element) throws NamespaceException { + + String prop = null; + try { + prop = configuration.getString(element); + } catch (ConversionException ce) { + log.warn("[getStringProperty] Element <{}> does not contains a String value", element); + } catch (NoSuchElementException note) { + log.warn("[getStringProperty] Element <{}> does not exists in namespace configuration file", + element); + } + return prop; + } + + /** + * + * @param element String + * @return boolean + */ + private boolean getBooleanProperty(String element) throws NamespaceException { + + boolean result = false; + try { + result = configuration.getBoolean(element); + } catch (ConversionException ce) { + log.warn("[getLongProperty] Element <{}> does not contains a String value", element); + } catch (NoSuchElementException note) { + log.warn("[getLongProperty] Element <{}> does not exists in namespace configuration file", + element); + } + return result; + } + + /** + * + * @param element String + * @return int + */ + private long getLongProperty(String element) throws NamespaceException { + + long prop = -1L; + try { + prop = configuration.getLong(element); + } catch (ConversionException ce) { + log.warn("[getLongProperty] Element <{}> does not contains a String value", element); + } catch (NoSuchElementException note) { + log.warn("[getLongProperty] Element <{}> does not exists in namespace configuration file", + element); + } + return prop; + } + + /** + * + * @param element String + * @return int + */ + private int getIntProperty(String element) { + + int prop = -1; + try { + prop = configuration.getInt(element); + } catch (ConversionException ce) { + log.warn("[getIntProperty] Element <{}> does not contains a String value", element); + } catch (NoSuchElementException note) { + log.warn("[getIntProperty] Element <{}> does not exists in namespace configuration file", + element); + } + return prop; + } + + private List getListValue(String collectionElement) { + + List propList = configuration.getList(collectionElement); + List prop = Lists.newArrayList(); + // For a set or list + for (Object element2 : propList) { + String element = (String) element2; + prop.add(element.trim()); + } + + log.debug("LIST - prop : {}", prop); + log.debug("Nr. of elements : {}", prop.size()); + if (prop.size() == 0) { + log.warn( + "[retrieveNumberByName_2] Element <{}> does not exists in namespace configuration file", + collectionElement); + } + return prop; + } + + public boolean getDefaultACLDefined(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + boolean result = false; + if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.GROUP_NAME))) { + result = true; + } + return result; + } + + public int getNumberOfACL(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + if (numOfFS == -1) { + throw new NamespaceException("FS named '" + nameOfFS + "' does not exist in config"); + } + String aclCount = + substitutionNumber(XMLConst.ACL_ENTRY_COUNTING, XMLConst.FS_SUB_PATTERN, numOfFS); + log.debug("ACL Count = {}", aclCount); + return getPropertyNumber(aclCount); + } + + @SuppressWarnings("unchecked") + public String getGroupName(String nameOfFS, int aclEntryNumber) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + String aclCount = substitutionNumber(XMLConst.GROUP_NAME, XMLConst.FS_SUB_PATTERN, numOfFS); + String result = null; + Object prop = configuration.getProperty(aclCount); + if (prop != null) { + if (prop instanceof List) { + List propList = Lists.newArrayList((List) prop); + if (propList.size() > aclEntryNumber) { + result = (String) propList.get(aclEntryNumber); + } + } else { + if (prop instanceof String) { + result = ((String) prop); + } + } + } else { + log.warn("[getPropertyNumber] Element <{}> does not exists in namespace configuration file", + aclCount); + } + return result; + } + + @SuppressWarnings("unchecked") + public String getPermissionString(String nameOfFS, int aclEntryNumber) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + String aclCount = substitutionNumber(XMLConst.PERMISSIONS, XMLConst.FS_SUB_PATTERN, numOfFS); + String result = null; + Object prop = configuration.getProperty(aclCount); + if (prop != null) { + if (prop instanceof List) { + List propList = Lists.newArrayList((List) prop); + if (propList.size() > aclEntryNumber) { + result = propList.get(aclEntryNumber); + } + } else { + if (prop instanceof String) { + result = ((String) prop); + } + } + } else { + log.warn("[getPropertyNumber] Element <" + aclCount + + "> does not exists in namespace configuration file"); + } + return result; + } + + public String getStorageAreaAuthz(String nameOfFS, SAAuthzType type) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + if (type.equals(SAAuthzType.FIXED)) { + return getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.SA_AUTHZ_FIXED)); + } else { + return getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.SA_AUTHZ_DB)); + } + } + + public SAAuthzType getStorageAreaAuthzType(String nameOfFS) throws NamespaceException { + + if (getStorageAreaAuthzFixedDefined(nameOfFS)) { + return SAAuthzType.FIXED; + } + if (getStorageAreaAuthzDBDefined(nameOfFS)) { + return SAAuthzType.AUTHZDB; + } + throw new NamespaceException("Unable to find the SAAuthzType in " + nameOfFS); + } + + public boolean getStorageAreaAuthzFixedDefined(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + boolean result = false; + if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.SA_AUTHZ_FIXED))) { + result = true; + } + return result; + } + + public boolean getStorageAreaAuthzDBDefined(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + boolean result = false; + if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.SA_AUTHZ_DB))) { + result = true; + } + return result; + } + + public int getProtId(String nameOfFS, int numOfProt) throws NamespaceException { + + String protId = substituteNumberInProtocolElement(nameOfFS, numOfProt, XMLConst.PROT_ID); + if (isPresent(protId)) { + return getIntProperty( + substituteNumberInProtocolElement(nameOfFS, numOfProt, XMLConst.PROT_ID)); + } else { + return -1; + } + } + + public boolean getOnlineSpaceLimitedSize(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + boolean result = false; + result = getBooleanProperty(substituteNumberInFSElement(numOfFS, XMLConst.LIMITED_SIZE)); + return result; + } + + public int getNumberOfPool(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + if (numOfFS == -1) { + throw new NamespaceException("FS named '" + nameOfFS + "' does not exist in config"); + } + if (!getPoolDefined(nameOfFS)) + return 0; + String protCount = substitutionNumber(XMLConst.POOL_COUNTING, XMLConst.FS_SUB_PATTERN, numOfFS); + return getPropertyNumber(protCount); + } + + public boolean getPoolDefined(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + boolean result = false; + if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.POOL_COUNTING))) { + result = true; + } + return result; + } + + public String getBalancerStrategy(String nameOfFS) throws NamespaceException { + + String result = null; + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.BALANCE_STRATEGY))) { + result = getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.BALANCE_STRATEGY)); + } else { + throw new NamespaceException("Unable to find the element '" + XMLConst.BALANCE_STRATEGY + + "' for the VFS:'" + nameOfFS + "'"); + } + return result; + } + + public int getNumberOfPoolMembers(String nameOfFS, int poolCounter) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + if (numOfFS == -1) { + throw new NamespaceException("FS named '" + nameOfFS + "' does not exist in config"); + } + String subTree = substituteNumberInPoolElement(nameOfFS, poolCounter, XMLConst.POOL); + HierarchicalConfiguration sub = configuration.configurationAt(subTree); + Object members = sub.getProperty("members.member[@member-id]"); + int numOfMembers = -1; + if (members != null) { + if (members instanceof Collection) { + numOfMembers = ((Collection) members).size(); + } else { + numOfMembers = 1; + } + } else { + log.error("Error during the retrieve of the number of pool member of {}", nameOfFS); + } + return numOfMembers; + } + + public int getMemberID(String nameOfFS, int numOfPool, int memberNr) throws NamespaceException { + + return getIntProperty( + substituteNumberInMembersElement(nameOfFS, numOfPool, memberNr, XMLConst.POOL_MEMBER_ID)); + } + + public int getMemberWeight(String nameOfFS, int numOfPool, int memberNr) + throws NamespaceException { + + return getIntProperty(substituteNumberInMembersElement(nameOfFS, numOfPool, memberNr, + XMLConst.POOL_MEMBER_WEIGHT)); + } + + public String getBalancerStrategy(String fsName, int poolCounter) throws NamespaceException { + + String poolId = substituteNumberInPoolElement(fsName, poolCounter, XMLConst.BALANCE_STRATEGY); + if (isPresent(poolId)) { + return getStringProperty( + substituteNumberInPoolElement(fsName, poolCounter, XMLConst.BALANCE_STRATEGY)); + } else { + throw new NamespaceException("Unable to find the element '" + XMLConst.BALANCE_STRATEGY + + "' for the VFS:'" + fsName + "'"); + } + } } diff --git a/src/main/java/it/grid/storm/namespace/config/xml/XMLReloadingStrategy.java b/src/main/java/it/grid/storm/namespace/config/xml/XMLReloadingStrategy.java deleted file mode 100644 index 9532f315b..000000000 --- a/src/main/java/it/grid/storm/namespace/config/xml/XMLReloadingStrategy.java +++ /dev/null @@ -1,139 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.namespace.config.xml; - -import java.io.File; - -import org.apache.commons.configuration.reloading.FileChangedReloadingStrategy; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - *

- * Title: - *

- * - *

- * Description: - *

- * - *

- * Copyright: Copyright (c) 2006 - *

- * - *

- * Company: INFN-CNAF and ICTP/eGrid project - *

- * - * @author Riccardo Zappi - * @version 1.0 - */ -public class XMLReloadingStrategy extends FileChangedReloadingStrategy { - - private boolean notifing = false; - private Logger log = LoggerFactory.getLogger(XMLReloadingStrategy.class); - - private boolean verbosity; - private long reloadingTime; - - public void setVerbosity(boolean verbosity) { - - this.verbosity = verbosity; - } - - protected void notifingPerformed() { - - // log.debug(" NOTIFING set to FALSE"); - this.notifing = false; - } - - protected boolean notifingRequired() { - - // log.debug(" NOTIFING is "+notifing); - return notifing; - } - - protected void notifyNeeded() { - - // log.debug(" NOTIFING set to TRUE"); - this.notifing = true; - - } - - @Override - public boolean reloadingRequired() { - - boolean reloading = false; - - long now = System.currentTimeMillis(); - - if (now > lastChecked + refreshDelay) { - lastChecked = now; - if (hasChanged()) { - reloading = true; - } - } - if (verbosity) { - log.debug(" ...RELOADING REQUIRED? " + reloading); - } - - return reloading; - } - - @Override - public void reloadingPerformed() { - - updateLastModified(); - this.reloadingTime = System.currentTimeMillis(); - - } - - /** - * Check if the configuration has changed since the last time it was loaded. - * - * @return a flag whether the configuration has changed - */ - @Override - protected boolean hasChanged() { - - // log.debug("Checking if Namespace Configuration is changed.."); - File file = getConfigurationFile(); - // File file = thigetFile(); - if (file == null || !file.exists()) { - return false; - } - boolean result = file.lastModified() > lastModified; - if (result) { - notifyNeeded(); - log - .debug(" <<<<< Namespace Configuration is CHANGED ---> Notify needed.."); - } - return result; - } - - public File getConfigurationFile() { - - return this.configuration.getFile(); - } - - public long getLastReload() { - - return this.reloadingTime; - } - -} diff --git a/src/main/java/it/grid/storm/namespace/model/AccessLatency.java b/src/main/java/it/grid/storm/namespace/model/AccessLatency.java index c37b5e18f..1a4b48184 100644 --- a/src/main/java/it/grid/storm/namespace/model/AccessLatency.java +++ b/src/main/java/it/grid/storm/namespace/model/AccessLatency.java @@ -17,53 +17,7 @@ package it.grid.storm.namespace.model; -public class AccessLatency { - - /** - * - **/ - - private String accessLatency; - private String stringSchema; - - public final static AccessLatency ONLINE = new AccessLatency("ONLINE", - "online"); - public final static AccessLatency NEARLINE = new AccessLatency("NEARLINE", - "nearline"); - public final static AccessLatency OFFLINE = new AccessLatency("OFFLINE", - "offline"); - public final static AccessLatency UNKNOWN = new AccessLatency("UNKNOWN", - "Access Latency UNKNOWN!"); - - private AccessLatency(String accessLatency, String stringSchema) { - - this.accessLatency = accessLatency; - this.stringSchema = stringSchema; - } - - // Only get method for Name - public String getAccessLatencyName() { - - return accessLatency; - } - - // Only get method for Schema - public String toString() { - - return this.stringSchema; - } - - public static AccessLatency getAccessLatency(String accessLatency) { - - if (accessLatency.equals(AccessLatency.ONLINE.toString())) - return AccessLatency.ONLINE; - if (accessLatency.equals(AccessLatency.NEARLINE.toString())) - return AccessLatency.NEARLINE; - if (accessLatency.equals(AccessLatency.OFFLINE.toString())) - return AccessLatency.OFFLINE; - return AccessLatency.UNKNOWN; - } +public enum AccessLatency { + online, nearline, offline; } diff --git a/src/main/java/it/grid/storm/namespace/model/ApproachableRule.java b/src/main/java/it/grid/storm/namespace/model/ApproachableRule.java index 88af83a13..7dc3a425b 100644 --- a/src/main/java/it/grid/storm/namespace/model/ApproachableRule.java +++ b/src/main/java/it/grid/storm/namespace/model/ApproachableRule.java @@ -17,267 +17,227 @@ package it.grid.storm.namespace.model; +import java.util.LinkedList; +import java.util.List; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.griduser.AbstractGridUser; import it.grid.storm.griduser.DistinguishedName; import it.grid.storm.griduser.GridUserInterface; -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.VirtualFSInterface; -import java.util.LinkedList; -import java.util.List; +public class ApproachableRule implements Comparable { -import org.slf4j.Logger; + private Logger log = LoggerFactory.getLogger(ApproachableRule.class); -/** - *

- * Title: - *

- * - *

- * Description: - *

- * - *

- * Copyright: Copyright (c) 2006 - *

- * - *

- * Company: INFN-CNAF and ICTP/eGrid project - *

- * - * @author Riccardo Zappi - * @version 1.0 - */ -public class ApproachableRule implements Comparable { + private final String ruleName; + private final SubjectRules subjectRules; + + private String relativePath = null; + private LinkedList appFS = new LinkedList(); + + private final boolean anonymousHttpReadAccess; + + public ApproachableRule(String rulename, SubjectRules subjectRules, String relativePath, + boolean anonymousHttpReadAccess) { - private Logger log = NamespaceDirector.getLogger(); + this.ruleName = rulename; + this.subjectRules = subjectRules; + this.relativePath = relativePath; + this.anonymousHttpReadAccess = anonymousHttpReadAccess; + } - private final String ruleName; - private final SubjectRules subjectRules; + public ApproachableRule(String rulename, SubjectRules subjectRules, String relativePath) { - private String relativePath = null; - private LinkedList appFS = new LinkedList(); - - private final boolean anonymousHttpReadAccess; - - public ApproachableRule(String rulename, SubjectRules subjectRules, - String relativePath, boolean anonymousHttpReadAccess) { - - this.ruleName = rulename; - this.subjectRules = subjectRules; - /** - * @todo : Check if relative Path is a path well formed. - */ - this.relativePath = relativePath; - this.anonymousHttpReadAccess = anonymousHttpReadAccess; - } - - public ApproachableRule(String rulename, SubjectRules subjectRules, - String relativePath) { - - this.ruleName = rulename; - this.subjectRules = subjectRules; - /** - * @todo : Check if relative Path is a path well formed. - */ - this.relativePath = relativePath; - this.anonymousHttpReadAccess = false; - } - - public boolean isAdmitAll() { - - return subjectRules.getDNMatchingRule().isMatchAll() - && subjectRules.getVONameMatchingRule().isMatchAll(); - } - - public void addApproachableVFS(VirtualFSInterface vfs) { - - this.appFS.add(vfs); - } - - public List getApproachableVFS() { - - return this.appFS; - } - - /** - * getSpaceRelativePath - * - * @return String - */ - public String getSpaceRelativePath() { - - return relativePath; - } - - /** - * - * @return String - */ - public String getRuleName() { - - return this.ruleName; - } - - public boolean getAnonymousHttpReadAccess() { - - return this.anonymousHttpReadAccess; - } - - /** - * - * @return Subject - */ - public SubjectRules getSubjectRules() { - - return this.subjectRules; - } - - /** - * MAIN METHOD - * - * @param gUser - * GridUserInterface - * @return boolean - */ - public boolean match(GridUserInterface gUser) { - - return matchDN(gUser.getDn()) && matchVoms(gUser); - } - - private boolean matchVoms(GridUserInterface gUser) { - - // ---- Check if VOMS Attributes are required ---- - if (subjectRules.getVONameMatchingRule().isMatchAll()) { - return true; - } - // VOMS Attribute required. - if (gUser instanceof AbstractGridUser - && ((AbstractGridUser) gUser).hasVoms()) { - log.debug("Grid User Requestor : " - + ((AbstractGridUser) gUser).toString()); - if (subjectRules.getVONameMatchingRule().match( - ((AbstractGridUser) gUser).getVO().getValue())) { - return true; - } - } - return false; - } - - private boolean matchDN(String dnString) { - - if (dnString == null) { - return subjectRules.getDNMatchingRule().isMatchAll(); - } - DistinguishedName dn = new DistinguishedName(dnString); - return subjectRules.getDNMatchingRule().match(dn); - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - String sep = System.getProperty("line.separator"); - sb.append(sep + " --- APPROACHABLE RULE NAME ---" + sep); - sb.append(" Approachable Rule Name : " + this.ruleName + sep); - sb.append(" SUBJECT - dn : " - + this.getSubjectRules().getDNMatchingRule() + sep); - if (!this.getSubjectRules().getVONameMatchingRule().isMatchAll()) { - sb.append(" -- VOMS cert IS MANDATORY!" + sep); - sb.append(" -- SUBJECT - vo_name : " - + this.getSubjectRules().getVONameMatchingRule() + sep); - } else { - sb.append(" -- VOMS cert is not mandatory" + sep); - } - sb.append(" Relative-Path for Space : " + this.getSpaceRelativePath() - + sep); - sb.append(" Approachable VFS : " + this.appFS + sep); - return sb.toString(); - } - - public int compareTo(Object o) { - - int result = 1; - if (o instanceof ApproachableRule) { - ApproachableRule other = (ApproachableRule) o; - result = (this.getRuleName()).compareTo(other.getRuleName()); - } - return result; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#hashCode() - */ - @Override - public int hashCode() { - - final int prime = 31; - int result = 1; - result = prime * result + ((appFS == null) ? 0 : appFS.hashCode()); - result = prime * result + ((log == null) ? 0 : log.hashCode()); - result = prime * result - + ((relativePath == null) ? 0 : relativePath.hashCode()); - result = prime * result + ((ruleName == null) ? 0 : ruleName.hashCode()); - result = prime * result - + ((subjectRules == null) ? 0 : subjectRules.hashCode()); - return result; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#equals(java.lang.Object) - */ - @Override - public boolean equals(Object obj) { - - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - ApproachableRule other = (ApproachableRule) obj; - if (appFS == null) { - if (other.appFS != null) { - return false; - } - } else if (!appFS.equals(other.appFS)) { - return false; - } - if (log == null) { - if (other.log != null) { - return false; - } - } else if (!log.equals(other.log)) { - return false; - } - if (relativePath == null) { - if (other.relativePath != null) { - return false; - } - } else if (!relativePath.equals(other.relativePath)) { - return false; - } - if (ruleName == null) { - if (other.ruleName != null) { - return false; - } - } else if (!ruleName.equals(other.ruleName)) { - return false; - } - if (subjectRules == null) { - if (other.subjectRules != null) { - return false; - } - } else if (!subjectRules.equals(other.subjectRules)) { - return false; - } - return true; - } + this.ruleName = rulename; + this.subjectRules = subjectRules; + this.relativePath = relativePath; + this.anonymousHttpReadAccess = false; + } + + public boolean isAdmitAll() { + + return subjectRules.getDNMatchingRule().isMatchAll() + && subjectRules.getVONameMatchingRule().isMatchAll(); + } + + public void addApproachableVFS(VirtualFS vfs) { + + this.appFS.add(vfs); + } + + public List getApproachableVFS() { + + return this.appFS; + } + + /** + * getSpaceRelativePath + * + * @return String + */ + public String getSpaceRelativePath() { + + return relativePath; + } + + /** + * + * @return String + */ + public String getRuleName() { + + return this.ruleName; + } + + public boolean getAnonymousHttpReadAccess() { + + return this.anonymousHttpReadAccess; + } + + /** + * + * @return Subject + */ + public SubjectRules getSubjectRules() { + + return this.subjectRules; + } + + /** + * MAIN METHOD + * + * @param gUser GridUserInterface + * @return boolean + */ + public boolean match(GridUserInterface gUser) { + + return matchDN(gUser.getDn()) && matchVoms(gUser); + } + + private boolean matchVoms(GridUserInterface gUser) { + + // ---- Check if VOMS Attributes are required ---- + if (subjectRules.getVONameMatchingRule().isMatchAll()) { + return true; + } + // VOMS Attribute required. + if (gUser instanceof AbstractGridUser && ((AbstractGridUser) gUser).hasVoms()) { + log.debug("Grid User Requestor : " + ((AbstractGridUser) gUser).toString()); + if (subjectRules.getVONameMatchingRule() + .match(((AbstractGridUser) gUser).getVO().getValue())) { + return true; + } + } + return false; + } + + private boolean matchDN(String dnString) { + + if (dnString == null) { + return subjectRules.getDNMatchingRule().isMatchAll(); + } + DistinguishedName dn = new DistinguishedName(dnString); + return subjectRules.getDNMatchingRule().match(dn); + } + + @Override + public String toString() { + + StringBuilder sb = new StringBuilder(); + String sep = System.getProperty("line.separator"); + sb.append(sep + " --- APPROACHABLE RULE NAME ---" + sep); + sb.append(" Approachable Rule Name : " + this.ruleName + sep); + sb.append(" SUBJECT - dn : " + this.getSubjectRules().getDNMatchingRule() + sep); + if (!this.getSubjectRules().getVONameMatchingRule().isMatchAll()) { + sb.append(" -- VOMS cert IS MANDATORY!" + sep); + sb.append(" -- SUBJECT - vo_name : " + this.getSubjectRules().getVONameMatchingRule() + + sep); + } else { + sb.append(" -- VOMS cert is not mandatory" + sep); + } + sb.append(" Relative-Path for Space : " + this.getSpaceRelativePath() + sep); + sb.append(" Approachable VFS : " + this.appFS + sep); + return sb.toString(); + } + + public int compareTo(ApproachableRule o) { + + return this.getRuleName().compareTo(o.getRuleName()); + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#hashCode() + */ + @Override + public int hashCode() { + + final int prime = 31; + int result = 1; + result = prime * result + ((appFS == null) ? 0 : appFS.hashCode()); + result = prime * result + ((log == null) ? 0 : log.hashCode()); + result = prime * result + ((relativePath == null) ? 0 : relativePath.hashCode()); + result = prime * result + ((ruleName == null) ? 0 : ruleName.hashCode()); + result = prime * result + ((subjectRules == null) ? 0 : subjectRules.hashCode()); + return result; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#equals(java.lang.Object) + */ + @Override + public boolean equals(Object obj) { + + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + ApproachableRule other = (ApproachableRule) obj; + if (appFS == null) { + if (other.appFS != null) { + return false; + } + } else if (!appFS.equals(other.appFS)) { + return false; + } + if (log == null) { + if (other.log != null) { + return false; + } + } else if (!log.equals(other.log)) { + return false; + } + if (relativePath == null) { + if (other.relativePath != null) { + return false; + } + } else if (!relativePath.equals(other.relativePath)) { + return false; + } + if (ruleName == null) { + if (other.ruleName != null) { + return false; + } + } else if (!ruleName.equals(other.ruleName)) { + return false; + } + if (subjectRules == null) { + if (other.subjectRules != null) { + return false; + } + } else if (!subjectRules.equals(other.subjectRules)) { + return false; + } + return true; + } } diff --git a/src/main/java/it/grid/storm/namespace/model/Authority.java b/src/main/java/it/grid/storm/namespace/model/Authority.java index 541b1d994..f535ff451 100644 --- a/src/main/java/it/grid/storm/namespace/model/Authority.java +++ b/src/main/java/it/grid/storm/namespace/model/Authority.java @@ -17,100 +17,107 @@ package it.grid.storm.namespace.model; +import com.fasterxml.jackson.annotation.JsonProperty; + public class Authority { - private String hostname; - private int port = -1; - - public final static Authority EMPTY = new Authority(""); - - /** - * Complete constructor - * - * @param serviceHostname - * String - * @param servicePort - * int - */ - public Authority(String serviceHostname, int servicePort) { - - this.hostname = serviceHostname; - this.port = servicePort; - } - - /** - * Cnstructor with default port - * - * @param serviceHostname - * String - */ - public Authority(String serviceHostname) { - - this.hostname = serviceHostname; - } - - public String getServiceHostname() { - - return this.hostname; - } - - public void setServiceHostname(String hostname) { - - this.hostname = hostname; - } - - public int getServicePort() { - - return this.port; - } - - public void setServicePort(int port) { - - this.port = port; - } - - private String getHostnameAndPort() { - - StringBuilder result = new StringBuilder(); - if (hostname != null) { - result.append(hostname); - if (port > 0) { - result.append(":"); - result.append(port); - } - } - return result.toString(); - } - - public String toString() { - - return getHostnameAndPort(); - } - - public boolean equals(Object other) { - - boolean result = false; - if (other instanceof Authority) { - Authority otherA = (Authority) other; - if (otherA.getServiceHostname().equals(this.getServiceHostname())) { // Hostname - // is - // equal - // Check if the Port is equal. - if (otherA.getServicePort() == this.getServicePort()) { - result = true; - } - } - } - return result; - } - - @Override - public int hashCode() { - - int result = 17; - result = 31 * result + (hostname != null ? hostname.hashCode() : 0); - result = 31 * result + port; - return result; - } + private String hostname; + private int port = -1; + + public final static Authority EMPTY = new Authority(""); + + /** + * Complete constructor + * + * @param serviceHostname String + * @param servicePort int + */ + public Authority(String serviceHostname, int servicePort) { + + this.hostname = serviceHostname; + this.port = servicePort; + } + + /** + * Constructor with default port + * + * @param serviceHostname String + */ + public Authority(String serviceHostname) { + + this.hostname = serviceHostname; + } + + @JsonProperty("hostname") + public String getServiceHostname() { + + return this.hostname; + } + + @JsonProperty("hostname") + public void setServiceHostname(String hostname) { + + this.hostname = hostname; + } + + @JsonProperty("port") + public int getServicePort() { + + return this.port; + } + + @JsonProperty("port") + public void setServicePort(int port) { + + this.port = port; + } + + private String getHostnameAndPort() { + + StringBuilder result = new StringBuilder(); + if (hostname != null) { + result.append(hostname); + if (port > 0) { + result.append(":"); + result.append(port); + } + } + return result.toString(); + } + + public String toString() { + + return getHostnameAndPort(); + } + + public boolean equals(Object other) { + + boolean result = false; + if (other instanceof Authority) { + Authority otherA = (Authority) other; + if (otherA.getServiceHostname().equals(this.getServiceHostname())) { + // Check if the Port is equal. + if (otherA.getServicePort() == this.getServicePort()) { + result = true; + } + } + } + return result; + } + + public static Authority fromString(String endpoint) { + String host = endpoint.split(":")[0]; + int port = Integer.valueOf(endpoint.split(":")[1]); + return new Authority(host, port); + } + + @Override + public int hashCode() { + + int result = 17; + result = 31 * result + (hostname != null ? hostname.hashCode() : 0); + result = 31 * result + port; + return result; + } } diff --git a/src/main/java/it/grid/storm/namespace/model/Capability.java b/src/main/java/it/grid/storm/namespace/model/Capability.java index cbe61d3ae..c09bcce54 100644 --- a/src/main/java/it/grid/storm/namespace/model/Capability.java +++ b/src/main/java/it/grid/storm/namespace/model/Capability.java @@ -17,14 +17,6 @@ package it.grid.storm.namespace.model; -import it.grid.storm.balancer.BalancingStrategy; -import it.grid.storm.balancer.BalancingStrategyFactory; -import it.grid.storm.balancer.Node; -import it.grid.storm.balancer.ftp.FTPNode; -import it.grid.storm.namespace.CapabilityInterface; -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.NamespaceException; - import java.util.ArrayList; import java.util.Hashtable; import java.util.LinkedList; @@ -32,6 +24,14 @@ import java.util.Map; import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import it.grid.storm.balancer.BalancingStrategy; +import it.grid.storm.balancer.BalancingStrategyFactory; +import it.grid.storm.balancer.Node; +import it.grid.storm.balancer.ftp.FTPNode; +import it.grid.storm.namespace.CapabilityInterface; +import it.grid.storm.namespace.NamespaceException; /** *

@@ -56,359 +56,344 @@ public class Capability implements CapabilityInterface { - private Logger log = NamespaceDirector.getLogger(); - private ACLMode aclMode = ACLMode.UNDEF; - private Quota quota = null; - // List of TransportProtocol by Protocol. - private Map transpProtocolsByScheme = new Hashtable(); - // List of TransportProtocol by Protocol. - private Map transpProtocolsByID = new Hashtable(); - // List of TransportProtocol by Entering order. - private List transpProtocolsList = new ArrayList(); - // List of ProtocolPool. - private Map protocolPoolsByScheme = new Hashtable(); - // List of Balancer. - private Map> balancerByScheme = new Hashtable>(); - - private DefaultACL defaultACL = new DefaultACL(); - - /** - * Constructor - * - */ - public Capability(String aclMode) throws NamespaceException { - - setACLMode(aclMode); - } - - public Capability() throws NamespaceException { - - } - - /***************************************************************************** - * BUILDING METHODs - ****************************************************************************/ - - public void setACLMode(String aclMode) throws NamespaceException { - - this.aclMode = ACLMode.makeFromString(aclMode); - } - - /** - * addProtocol - * - * @param prot - * Protocol - */ - public void addTransportProtocolByScheme(Protocol protocol, - TransportProtocol trasfProt) { - - transpProtocolsByScheme.put(protocol, trasfProt); - } - - public void addTransportProtocolByID(int protocolIndex, - TransportProtocol trasfProt) { - - transpProtocolsByID.put(Integer.valueOf(protocolIndex), trasfProt); - } - - public void addTransportProtocol(TransportProtocol trasfProt) { - - transpProtocolsList.add(trasfProt); - } - - public void addACLEntry(ACLEntry aclEntry) { - - if (defaultACL == null) { - defaultACL = new DefaultACL(); - } - defaultACL.addACLEntry(aclEntry); - } - - public void setQuota(Quota quota) { - - this.quota = quota; - } - - public void addProtocolPoolBySchema(Protocol protocol, ProtocolPool protPool) - throws NamespaceException { - - protocolPoolsByScheme.put(protocol, protPool); - - // Building Balancer and put it into Map of Balancers - if (protocol.equals(Protocol.GSIFTP)) { - BalancingStrategy balancingStrategy = null; - LinkedList nodeList = new LinkedList(); - Node node = null; - boolean weighedPool = protPool.getBalanceStrategy().requireWeight(); - for (PoolMember member : protPool.getPoolMembers()) { - String hostname = member.getMemberProtocol().getAuthority() - .getServiceHostname(); - int port = member.getMemberProtocol().getAuthority().getServicePort(); - if (weighedPool) { - try { - node = buildNode(protocol, hostname, port, member.getMemberWeight()); - } catch (Exception e) { - log.error("Unable to build a node for protocol " + protocol); - throw new NamespaceException("Unable to build pool for protocol " - + protocol); - } - } else { - try { - node = buildNode(protocol, hostname, port); - } catch (Exception e) { - log.error("Unable to build a node for protocol " + protocol); - throw new NamespaceException("Unable to build pool for protocol " - + protocol); - } - } - nodeList.add(node); - } - try { - balancingStrategy = BalancingStrategyFactory.getBalancingStrategy( - protPool.getBalanceStrategy(), nodeList); - } catch (IllegalArgumentException e) { - log.error("Unable to get " + protPool.getBalanceStrategy().toString() - + " balacing strategy for nodes " + nodeList.toString()); - throw new NamespaceException( - "Unable to create a balancing schema from the protocol pool"); - } - balancerByScheme.put(protocol, balancingStrategy); - } else { - log.error("The current version manage only GSIFTP."); - } - } - - private Node buildNode(Protocol protocol, String hostname, int port) - throws Exception { - - if (Protocol.GSIFTP == protocol) - return new FTPNode(hostname, port); - throw new Exception("Unsupported protocol, no node type available: " - + protocol); - } - - private Node buildNode(Protocol protocol, String hostname, int port, - int memberWeight) throws Exception { - - if (Protocol.GSIFTP == protocol) - return new FTPNode(hostname, port, memberWeight); - throw new Exception("Unsupported protocol, no node type available: " - + protocol); - } - - /***************************************************************************** - * READ METHODs - ****************************************************************************/ - - /** - * getACLMode - * - * @return String - */ - public Capability.ACLMode getACLMode() { - - return aclMode; - } - - public Quota getQuota() { - - return this.quota; - } - - public DefaultACL getDefaultACL() { - - return this.defaultACL; - } - - /***************************************************************************** - * BUSINESS METHODs - ****************************************************************************/ - - public boolean isAllowedProtocol(String protocolScheme) { - - boolean result = false; - /** - * @todo IMPLEMENT THIS! - */ - return result; - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - String sep = System.getProperty("line.separator"); - sb.append(sep + " Cap.aclMode : '" + this.aclMode + "'" + sep); - sb.append(" Cap.Protocol : " + sep); - - /** - * for (Map.Entry entry : m.entrySet()) { String key = entry.getKey(); - * Vector value = entry.getValue(); } - **/ - - // Print TransportProtocol - int count = 0; - for (Map.Entry transP : transpProtocolsByScheme - .entrySet()) { - count++; - sb.append("[TP(" + count + ")] " - + (transP.getKey() + ": " + transP.getValue())); - } - // Print ProtocolPool - count = 0; - for (Map.Entry protPool : protocolPoolsByScheme - .entrySet()) { - count++; - sb.append("[TP(" + count + ")] " - + (protPool.getKey() + ": " + protPool.getValue())); - } - return sb.toString(); - } - - /****************************************** - * VERSION 1.4 * - *******************************************/ - - public ProtocolPool getPoolByScheme(Protocol protocol) { - - ProtocolPool poll = null; - boolean isPresent = protocolPoolsByScheme.containsKey(protocol); - if (isPresent) { - poll = protocolPoolsByScheme.get(protocol); - } - return poll; - } - - public BalancingStrategy getBalancingStrategyByScheme( - Protocol protocol) { - - BalancingStrategy balancer = null; - boolean isPresent = balancerByScheme.containsKey(protocol); - if (isPresent) { - balancer = balancerByScheme.get(protocol); - } - return balancer; - } - - public List getManagedProtocolByScheme(Protocol protocol) { - - List result = new ArrayList(); - for (TransportProtocol tp : transpProtocolsList) { - if (tp.getProtocol().equals(protocol)) { - result.add(tp); - } - } - return result; - } - - public List getAllManagedProtocols() { - - List result = new ArrayList(); - result.addAll(transpProtocolsByScheme.keySet()); - return result; - } - - public boolean isPooledProtocol(Protocol protocol) { - - boolean result = false; - result = protocolPoolsByScheme.containsKey(protocol); - return result; - } - - public TransportProtocol getProtocolByID(int id) { - - TransportProtocol tProt = null; - boolean isPresent = transpProtocolsByID.containsKey(id); // Use of generics - // AUTO-BOXING - if (isPresent) { - tProt = transpProtocolsByID.get(id); - } - return tProt; - } - - /** - * - *

- * Title: - *

- * - *

- * Description: - *

- * - *

- * Copyright: Copyright (c) 2006 - *

- * - *

- * Company: INFN-CNAF and ICTP/eGrid project - *

- * - * @author Riccardo Zappi - * @version 1.0 - */ - public static class ACLMode { - - public static final ACLMode JUST_IN_TIME = new ACLMode("JiT"); - public static final ACLMode AHEAD_OF_TIME = new ACLMode("AoT"); - public static final ACLMode UNDEF = new ACLMode("UNDEF"); - - private String aclMode; - - private ACLMode(String mode) { - - this.aclMode = mode; - } - - private static ACLMode makeFromString(String aclMode) - throws NamespaceException { - - ACLMode result = ACLMode.UNDEF; - if (aclMode.toLowerCase().equals( - ACLMode.AHEAD_OF_TIME.toString().toLowerCase())) { - result = ACLMode.AHEAD_OF_TIME; - } else if (aclMode.toLowerCase().equals( - ACLMode.JUST_IN_TIME.toString().toLowerCase())) { - result = ACLMode.JUST_IN_TIME; - } else { - throw new NamespaceException("ACL Mode is not recognized!"); - } - return result; - } - - @Override - public String toString() { - - return aclMode; - } - - @Override - public boolean equals(Object obj) { - - if (obj == null) { - return false; - } - if (obj instanceof ACLMode) { - ACLMode aclMode = (ACLMode) obj; - if (aclMode.toString().toLowerCase() - .equals(this.toString().toLowerCase())) { - return true; - } - } else { - return false; - } - return false; - } - - @Override - public int hashCode() { - - int result = 17; - result = 31 * result + (aclMode != null ? aclMode.hashCode() : 0); - return result; - } - - } + private Logger log = LoggerFactory.getLogger(Capability.class); + private ACLMode aclMode = ACLMode.UNDEF; + private Quota quota = null; + // List of TransportProtocol by Protocol. + private Map transpProtocolsByScheme = + new Hashtable(); + // List of TransportProtocol by Protocol. + private Map transpProtocolsByID = + new Hashtable(); + // List of TransportProtocol by Entering order. + private List transpProtocolsList = new ArrayList(); + // List of ProtocolPool. + private Map protocolPoolsByScheme = + new Hashtable(); + // List of Balancer. + private Map> balancerByScheme = + new Hashtable>(); + + private DefaultACL defaultACL = new DefaultACL(); + + /** + * Constructor + * + */ + public Capability(String aclMode) throws NamespaceException { + + setACLMode(aclMode); + } + + public Capability() throws NamespaceException { + + } + + /***************************************************************************** + * BUILDING METHODs + ****************************************************************************/ + + public void setACLMode(String aclMode) throws NamespaceException { + + this.aclMode = ACLMode.makeFromString(aclMode); + } + + /** + * addProtocol + * + * @param prot Protocol + */ + public void addTransportProtocolByScheme(Protocol protocol, TransportProtocol trasfProt) { + + transpProtocolsByScheme.put(protocol, trasfProt); + } + + public void addTransportProtocolByID(int protocolIndex, TransportProtocol trasfProt) { + + transpProtocolsByID.put(Integer.valueOf(protocolIndex), trasfProt); + } + + public void addTransportProtocol(TransportProtocol trasfProt) { + + transpProtocolsList.add(trasfProt); + } + + public void addACLEntry(ACLEntry aclEntry) { + + if (defaultACL == null) { + defaultACL = new DefaultACL(); + } + defaultACL.addACLEntry(aclEntry); + } + + public void setQuota(Quota quota) { + + this.quota = quota; + } + + public void addProtocolPoolBySchema(Protocol protocol, ProtocolPool protPool) + throws NamespaceException { + + protocolPoolsByScheme.put(protocol, protPool); + + // Building Balancer and put it into Map of Balancers + if (protocol.equals(Protocol.GSIFTP)) { + BalancingStrategy balancingStrategy = null; + LinkedList nodeList = new LinkedList(); + Node node = null; + boolean weighedPool = protPool.getBalanceStrategy().requireWeight(); + for (PoolMember member : protPool.getPoolMembers()) { + String hostname = member.getMemberProtocol().getAuthority().getServiceHostname(); + int port = member.getMemberProtocol().getAuthority().getServicePort(); + if (weighedPool) { + try { + node = buildNode(protocol, hostname, port, member.getMemberWeight()); + } catch (Exception e) { + log.error("Unable to build a node for protocol " + protocol); + throw new NamespaceException("Unable to build pool for protocol " + protocol); + } + } else { + try { + node = buildNode(protocol, hostname, port); + } catch (Exception e) { + log.error("Unable to build a node for protocol " + protocol); + throw new NamespaceException("Unable to build pool for protocol " + protocol); + } + } + nodeList.add(node); + } + try { + balancingStrategy = + BalancingStrategyFactory.getBalancingStrategy(protPool.getBalanceStrategy(), nodeList); + } catch (IllegalArgumentException e) { + log.error("Unable to get " + protPool.getBalanceStrategy().toString() + + " balacing strategy for nodes " + nodeList.toString()); + throw new NamespaceException("Unable to create a balancing schema from the protocol pool"); + } + balancerByScheme.put(protocol, balancingStrategy); + } else { + log.error("The current version manage only GSIFTP."); + } + } + + private Node buildNode(Protocol protocol, String hostname, int port) throws Exception { + + if (Protocol.GSIFTP == protocol) + return new FTPNode(hostname, port); + throw new Exception("Unsupported protocol, no node type available: " + protocol); + } + + private Node buildNode(Protocol protocol, String hostname, int port, int memberWeight) + throws Exception { + + if (Protocol.GSIFTP == protocol) + return new FTPNode(hostname, port, memberWeight); + throw new Exception("Unsupported protocol, no node type available: " + protocol); + } + + /***************************************************************************** + * READ METHODs + ****************************************************************************/ + + /** + * getACLMode + * + * @return String + */ + public Capability.ACLMode getACLMode() { + + return aclMode; + } + + public Quota getQuota() { + + return this.quota; + } + + public DefaultACL getDefaultACL() { + + return this.defaultACL; + } + + /***************************************************************************** + * BUSINESS METHODs + ****************************************************************************/ + + public boolean isAllowedProtocol(String protocolScheme) { + + boolean result = false; + /** + * @todo IMPLEMENT THIS! + */ + return result; + } + + @Override + public String toString() { + + StringBuilder sb = new StringBuilder(); + String sep = System.getProperty("line.separator"); + sb.append(sep + " Cap.aclMode : '" + this.aclMode + "'" + sep); + sb.append(" Cap.Protocol : " + sep); + + /** + * for (Map.Entry entry : m.entrySet()) { String key = entry.getKey(); Vector value = + * entry.getValue(); } + **/ + + // Print TransportProtocol + int count = 0; + for (Map.Entry transP : transpProtocolsByScheme.entrySet()) { + count++; + sb.append("[TP(" + count + ")] " + (transP.getKey() + ": " + transP.getValue())); + } + // Print ProtocolPool + count = 0; + for (Map.Entry protPool : protocolPoolsByScheme.entrySet()) { + count++; + sb.append("[TP(" + count + ")] " + (protPool.getKey() + ": " + protPool.getValue())); + } + return sb.toString(); + } + + /****************************************** + * VERSION 1.4 * + *******************************************/ + + public ProtocolPool getPoolByScheme(Protocol protocol) { + + ProtocolPool poll = null; + boolean isPresent = protocolPoolsByScheme.containsKey(protocol); + if (isPresent) { + poll = protocolPoolsByScheme.get(protocol); + } + return poll; + } + + public BalancingStrategy getBalancingStrategyByScheme(Protocol protocol) { + + BalancingStrategy balancer = null; + boolean isPresent = balancerByScheme.containsKey(protocol); + if (isPresent) { + balancer = balancerByScheme.get(protocol); + } + return balancer; + } + + public List getManagedProtocolByScheme(Protocol protocol) { + + List result = new ArrayList(); + for (TransportProtocol tp : transpProtocolsList) { + if (tp.getProtocol().equals(protocol)) { + result.add(tp); + } + } + return result; + } + + public List getAllManagedProtocols() { + + List result = new ArrayList(); + result.addAll(transpProtocolsByScheme.keySet()); + return result; + } + + public boolean isPooledProtocol(Protocol protocol) { + + boolean result = false; + result = protocolPoolsByScheme.containsKey(protocol); + return result; + } + + public TransportProtocol getProtocolByID(int id) { + + TransportProtocol tProt = null; + boolean isPresent = transpProtocolsByID.containsKey(id); // Use of generics + // AUTO-BOXING + if (isPresent) { + tProt = transpProtocolsByID.get(id); + } + return tProt; + } + + /** + * + *

+ * Title: + *

+ * + *

+ * Description: + *

+ * + *

+ * Copyright: Copyright (c) 2006 + *

+ * + *

+ * Company: INFN-CNAF and ICTP/eGrid project + *

+ * + * @author Riccardo Zappi + * @version 1.0 + */ + public static class ACLMode { + + public static final ACLMode JUST_IN_TIME = new ACLMode("JiT"); + public static final ACLMode AHEAD_OF_TIME = new ACLMode("AoT"); + public static final ACLMode UNDEF = new ACLMode("UNDEF"); + + private String aclMode; + + private ACLMode(String mode) { + + this.aclMode = mode; + } + + private static ACLMode makeFromString(String aclMode) throws NamespaceException { + + ACLMode result = ACLMode.UNDEF; + if (aclMode.toLowerCase().equals(ACLMode.AHEAD_OF_TIME.toString().toLowerCase())) { + result = ACLMode.AHEAD_OF_TIME; + } else if (aclMode.toLowerCase().equals(ACLMode.JUST_IN_TIME.toString().toLowerCase())) { + result = ACLMode.JUST_IN_TIME; + } else { + throw new NamespaceException("ACL Mode is not recognized!"); + } + return result; + } + + @Override + public String toString() { + + return aclMode; + } + + @Override + public boolean equals(Object obj) { + + if (obj == null) { + return false; + } + if (obj instanceof ACLMode) { + ACLMode aclMode = (ACLMode) obj; + if (aclMode.toString().toLowerCase().equals(this.toString().toLowerCase())) { + return true; + } + } else { + return false; + } + return false; + } + + @Override + public int hashCode() { + + int result = 17; + result = 31 * result + (aclMode != null ? aclMode.hashCode() : 0); + return result; + } + + } } diff --git a/src/main/java/it/grid/storm/namespace/model/DefaultValues.java b/src/main/java/it/grid/storm/namespace/model/DefaultValues.java index 77833f51b..7d45f2f07 100644 --- a/src/main/java/it/grid/storm/namespace/model/DefaultValues.java +++ b/src/main/java/it/grid/storm/namespace/model/DefaultValues.java @@ -17,10 +17,12 @@ package it.grid.storm.namespace.model; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.common.types.SizeUnit; import it.grid.storm.common.types.TimeUnit; import it.grid.storm.namespace.DefaultValuesInterface; -import it.grid.storm.namespace.NamespaceDirector; import it.grid.storm.namespace.NamespaceException; import it.grid.storm.srm.types.InvalidTSizeAttributesException; import it.grid.storm.srm.types.TFileStorageType; @@ -28,8 +30,6 @@ import it.grid.storm.srm.types.TSizeInBytes; import it.grid.storm.srm.types.TSpaceType; -import org.slf4j.Logger; - /** *

* Title: @@ -52,7 +52,7 @@ */ public class DefaultValues implements DefaultValuesInterface { - private Logger log = NamespaceDirector.getLogger(); + private Logger log = LoggerFactory.getLogger(DefaultValues.class); private SpaceDefault spaceDefault; private FileDefault fileDefault; diff --git a/src/main/java/it/grid/storm/namespace/model/MappingRule.java b/src/main/java/it/grid/storm/namespace/model/MappingRule.java index af773b53f..5c5e2cb4a 100644 --- a/src/main/java/it/grid/storm/namespace/model/MappingRule.java +++ b/src/main/java/it/grid/storm/namespace/model/MappingRule.java @@ -17,76 +17,50 @@ package it.grid.storm.namespace.model; -import it.grid.storm.namespace.VirtualFSInterface; - -/** - *

- * Title: - *

- * - *

- * Description: - *

- * - *

- * Copyright: Copyright (c) 2006 - *

- * - *

- * Company: INFN-CNAF and ICTP/eGrid project - *

- * - * @author Riccardo Zappi - * @version 1.0 - */ public class MappingRule { - private final String ruleName; - private final String stfnRoot; - private final VirtualFSInterface mappedFS; + private final String ruleName; + private final String stfnRoot; + private final VirtualFS mappedFS; - /** - * Constructor - * - * @param ruleName - * String - * @param stfn_root - * String - * @param vfs - * @param mapped_fs - * String - */ - public MappingRule(String ruleName, String stfn_root/* , String mapped_fs */, - VirtualFSInterface vfs) { + /** + * Constructor + * + * @param ruleName String + * @param stfn_root String + * @param vfs + * @param mapped_fs String + */ + public MappingRule(String ruleName, String stfn_root, VirtualFS vfs) { - this.ruleName = ruleName; - this.stfnRoot = stfn_root; - this.mappedFS = vfs; - } + this.ruleName = ruleName; + this.stfnRoot = stfn_root; + this.mappedFS = vfs; + } - public String getRuleName() { + public String getRuleName() { - return this.ruleName; - } + return this.ruleName; + } - public String getStFNRoot() { + public String getStFNRoot() { - return this.stfnRoot; - } + return this.stfnRoot; + } - public VirtualFSInterface getMappedFS() { + public VirtualFS getMappedFS() { - return this.mappedFS; - } + return this.mappedFS; + } - public String toString() { + public String toString() { - StringBuilder sb = new StringBuilder(); - String sep = System.getProperty("line.separator"); - sb.append(sep + " Mapping rule name : " + this.ruleName + sep); - sb.append(" StFN-Root : " + this.stfnRoot + sep); - sb.append(" mapped-FS : " + this.mappedFS + sep); - return sb.toString(); - } + StringBuilder sb = new StringBuilder(); + String sep = System.getProperty("line.separator"); + sb.append(sep + " Mapping rule name : " + this.ruleName + sep); + sb.append(" StFN-Root : " + this.stfnRoot + sep); + sb.append(" mapped-FS : " + this.mappedFS + sep); + return sb.toString(); + } } diff --git a/src/main/java/it/grid/storm/namespace/model/PermissionException.java b/src/main/java/it/grid/storm/namespace/model/PermissionException.java index a02e9ad72..04cd710a7 100644 --- a/src/main/java/it/grid/storm/namespace/model/PermissionException.java +++ b/src/main/java/it/grid/storm/namespace/model/PermissionException.java @@ -19,23 +19,28 @@ public class PermissionException extends RuntimeException { - public PermissionException() { + /** + * + */ + private static final long serialVersionUID = 1L; - super(); - } + public PermissionException() { - public PermissionException(String message) { + super(); + } - super(message); - } + public PermissionException(String message) { - public PermissionException(String message, Throwable cause) { + super(message); + } - super(message, cause); - } + public PermissionException(String message, Throwable cause) { - public PermissionException(Throwable cause) { + super(message, cause); + } - super(cause); - } + public PermissionException(Throwable cause) { + + super(cause); + } } diff --git a/src/main/java/it/grid/storm/namespace/model/Property.java b/src/main/java/it/grid/storm/namespace/model/Property.java index 2a0dce335..8c16cddab 100644 --- a/src/main/java/it/grid/storm/namespace/model/Property.java +++ b/src/main/java/it/grid/storm/namespace/model/Property.java @@ -17,23 +17,23 @@ package it.grid.storm.namespace.model; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.common.types.SizeUnit; -import it.grid.storm.namespace.NamespaceDirector; import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.PropertyInterface; import it.grid.storm.srm.types.InvalidTSizeAttributesException; import it.grid.storm.srm.types.TSizeInBytes; -import org.slf4j.Logger; - public class Property implements PropertyInterface { - private Logger log = NamespaceDirector.getLogger(); + private Logger log = LoggerFactory.getLogger(Property.class); private TSizeInBytes totalOnlineSize = TSizeInBytes.makeEmpty(); private TSizeInBytes totalNearlineSize = TSizeInBytes.makeEmpty(); - private RetentionPolicy retentionPolicy = RetentionPolicy.UNKNOWN; + private RetentionPolicy retentionPolicy; private ExpirationMode expirationMode = ExpirationMode.UNKNOWN; - private AccessLatency accessLatency = AccessLatency.UNKNOWN; + private AccessLatency accessLatency; private boolean hasLimitedSize = false; public static Property from(PropertyInterface other) { @@ -103,15 +103,14 @@ public void setTotalNearlineSize(String unitType, long nearlineSize) } } - public void setRetentionPolicy(String retentionPolicy) - throws NamespaceException { + public void setRetentionPolicy(RetentionPolicy retentionPolicy) { - this.retentionPolicy = RetentionPolicy.getRetentionPolicy(retentionPolicy); + this.retentionPolicy = retentionPolicy; } - public void setAccessLatency(String accessLatency) throws NamespaceException { + public void setAccessLatency(AccessLatency accessLatency) { - this.accessLatency = AccessLatency.getAccessLatency(accessLatency); + this.accessLatency = accessLatency; } public void setExpirationMode(String expirationMode) @@ -157,7 +156,7 @@ public boolean isOnlineSpaceLimited() { */ public static class SizeUnitType { - private Logger log = NamespaceDirector.getLogger(); + private Logger log = LoggerFactory.getLogger(SizeUnitType.class); /** * - **/ - private String retentionPolicy; - private String stringSchema; - - public final static RetentionPolicy CUSTODIAL = new RetentionPolicy( - "CUSTODIAL", "custodial"); - public final static RetentionPolicy OUTPUT = new RetentionPolicy("OUTPUT", - "output"); - public final static RetentionPolicy REPLICA = new RetentionPolicy("REPLICA", - "replica"); - public final static RetentionPolicy UNKNOWN = new RetentionPolicy("UNKNOWN", - "Retention policy UNKNOWN!"); - - private RetentionPolicy(String retentionPolicy, String stringSchema) { - - this.retentionPolicy = retentionPolicy; - this.stringSchema = stringSchema; - } - - // Only get method for Name - public String getRetentionPolicyName() { - - return retentionPolicy; - } - - // Only get method for Schema - public String toString() { - - return this.stringSchema; - } - - public static RetentionPolicy getRetentionPolicy(String retentionPolicy) { - - if (retentionPolicy.equals(RetentionPolicy.CUSTODIAL.toString())) - return RetentionPolicy.CUSTODIAL; - if (retentionPolicy.equals(RetentionPolicy.OUTPUT.toString())) - return RetentionPolicy.OUTPUT; - if (retentionPolicy.equals(RetentionPolicy.REPLICA.toString())) - return RetentionPolicy.REPLICA; - return RetentionPolicy.UNKNOWN; - } +public enum RetentionPolicy { + custodial, output, replica; } diff --git a/src/main/java/it/grid/storm/namespace/model/SAInfo.java b/src/main/java/it/grid/storm/namespace/model/SAInfo.java deleted file mode 100644 index 8ae383863..000000000 --- a/src/main/java/it/grid/storm/namespace/model/SAInfo.java +++ /dev/null @@ -1,208 +0,0 @@ -package it.grid.storm.namespace.model; - -import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.VirtualFSInterface; -import it.grid.storm.namespace.remote.Constants.HttpPerms; - -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; - -public class SAInfo { - - private String name; - private String token; - private String voname; - private String root; - private String storageclass; - private List stfnRoot; - private String retentionPolicy; - private String accessLatency; - private List protocols; - private HttpPerms anonymous; - private long availableNearlineSpace; - private List approachableRules; - - // Must have no-argument constructor - public SAInfo() { - - } - - public String getName() { - - return name; - } - - public void setName(String name) { - - this.name = name; - } - - public String getToken() { - - return token; - } - - public void setToken(String token) { - - this.token = token; - } - - public String getVoname() { - - return voname; - } - - public void setVoname(String voname) { - - this.voname = voname; - } - - public String getRoot() { - - return root; - } - - public void setRoot(String root) { - - this.root = root; - } - - public String getStorageclass() { - - return storageclass; - } - - public void setStorageclass(String storageclass) { - - this.storageclass = storageclass; - } - - public List getStfnRoot() { - - return stfnRoot; - } - - public void setStfnRoot(List stfnRoot) { - - this.stfnRoot = stfnRoot; - } - - public String getRetentionPolicy() { - - return retentionPolicy; - } - - public void setRetentionPolicy(String retentionPolicy) { - - this.retentionPolicy = retentionPolicy; - } - - public String getAccessLatency() { - - return accessLatency; - } - - public void setAccessLatency(String accessLatency) { - - this.accessLatency = accessLatency; - } - - public List getProtocols() { - - return protocols; - } - - public void setProtocols(List protocols) { - - this.protocols = protocols; - } - - public HttpPerms getAnonymous() { - - return anonymous; - } - - public void setAnonymous(HttpPerms anonymous) { - - this.anonymous = anonymous; - } - - public long getAvailableNearlineSpace() { - - return availableNearlineSpace; - } - - public void setAvailableNearlineSpace(long availableNearlineSpace) { - - this.availableNearlineSpace = availableNearlineSpace; - } - - public List getApproachableRules() { - - return approachableRules; - } - - public void setApproachableRules(List approachableRules) { - - this.approachableRules = approachableRules; - } - - public static SAInfo buildFromVFS(VirtualFSInterface vfs) - throws NamespaceException { - - SAInfo sa = new SAInfo(); - - sa.setName(vfs.getAliasName()); - sa.setToken(vfs.getSpaceTokenDescription()); - sa.setVoname(vfs.getApproachableRules().get(0).getSubjectRules() - .getVONameMatchingRule().getVOName()); - sa.setRoot(vfs.getRootPath()); - sa.setStfnRoot(new ArrayList()); - for (MappingRule rule : vfs.getMappingRules()) { - sa.getStfnRoot().add(rule.getStFNRoot()); - } - sa.setProtocols(new ArrayList()); - Iterator protocolsIterator = vfs.getCapabilities() - .getAllManagedProtocols().iterator(); - while (protocolsIterator.hasNext()) { - sa.getProtocols().add(protocolsIterator.next().getSchema()); - } - if (vfs.isHttpWorldReadable()) { - if (vfs.isApproachableByAnonymous()) { - sa.setAnonymous(HttpPerms.READWRITE); - } else { - sa.setAnonymous(HttpPerms.READ); - } - } else { - sa.setAnonymous(HttpPerms.NOREAD); - } - sa.setStorageclass(vfs.getStorageClassType().getStorageClassTypeString()); - sa.setRetentionPolicy(vfs.getProperties().getRetentionPolicy() - .getRetentionPolicyName()); - sa.setAccessLatency(vfs.getProperties().getAccessLatency() - .getAccessLatencyName()); - sa.setAvailableNearlineSpace(vfs.getAvailableNearlineSpace().value()); - sa.setApproachableRules(new ArrayList()); - for (ApproachableRule rule : vfs.getApproachableRules()) { - if (rule.getSubjectRules().getDNMatchingRule().isMatchAll() - && rule.getSubjectRules().getVONameMatchingRule().isMatchAll()) { - continue; - } - if (!rule.getSubjectRules().getDNMatchingRule().isMatchAll()) { - sa.getApproachableRules().add( - rule.getSubjectRules().getDNMatchingRule() - .toShortSlashSeparatedString()); - } - if (!rule.getSubjectRules().getVONameMatchingRule().isMatchAll()) { - sa.getApproachableRules().add( - "vo:" + rule.getSubjectRules().getVONameMatchingRule().getVOName()); - } - } - if (sa.getApproachableRules().size() == 0) { - sa.getApproachableRules().add("'ALL'"); - } - - return sa; - } -} \ No newline at end of file diff --git a/src/main/java/it/grid/storm/namespace/model/StorageClassType.java b/src/main/java/it/grid/storm/namespace/model/StorageClassType.java index ed1a87d5d..8fd7baff1 100644 --- a/src/main/java/it/grid/storm/namespace/model/StorageClassType.java +++ b/src/main/java/it/grid/storm/namespace/model/StorageClassType.java @@ -19,51 +19,7 @@ public enum StorageClassType { - T0D0("T0D0", "T0D0"), T0D1("T0D1", "T0D1"), T1D0("T1D0", "T1D0"), T1D1( - "T1D1", "T1D1"), UNKNOWN("UNKNOWN", "Storage Class Type UNKNOWN!"); - - private String storageClassTypeString; - private String stringSchema; - - private StorageClassType(String storageClassTypeString, String stringSchema) { - - this.storageClassTypeString = storageClassTypeString; - this.stringSchema = stringSchema; - - } - - /** - * - * @param storageClassTypeString - * String - * @return StorageClassType - */ - public static StorageClassType getStorageClassType( - String storageClassTypeString) { - - for (StorageClassType sct : StorageClassType.values()) { - if (sct.getStorageClassTypeString().equals(storageClassTypeString)) { - return sct; - } - } - - return UNKNOWN; - } - - /** - * Returns the String representation of this storage class type instance. - * - * @return the String representation of this storage class type instance. - */ - public String getStorageClassTypeString() { - - return storageClassTypeString; - } - - public String getStringSchema() { - - return stringSchema; - } + T0D1, T1D0, T1D1; public boolean isTapeEnabled() { @@ -73,10 +29,4 @@ public boolean isTapeEnabled() { return false; } - - // Only get method for Schema - public String toString() { - - return this.stringSchema; - } } diff --git a/src/main/java/it/grid/storm/namespace/model/TransportProtocol.java b/src/main/java/it/grid/storm/namespace/model/TransportProtocol.java index 8b27050ee..ed6a3a588 100644 --- a/src/main/java/it/grid/storm/namespace/model/TransportProtocol.java +++ b/src/main/java/it/grid/storm/namespace/model/TransportProtocol.java @@ -17,8 +17,6 @@ package it.grid.storm.namespace.model; -import it.grid.storm.namespace.naming.*; - public class TransportProtocol { private int protocolID = -1; @@ -62,13 +60,6 @@ public Authority getAuthority() { } } - public void setLocalAuthority() { - - if (!this.protocol.equals(Protocol.FILE)) { - this.service = new Authority(NamingConst.getServiceDefaultHost()); - } - } - public void setAuthority(Authority service) { this.service = service; diff --git a/src/main/java/it/grid/storm/namespace/model/VirtualFS.java b/src/main/java/it/grid/storm/namespace/model/VirtualFS.java index d577d0a47..46cd98685 100644 --- a/src/main/java/it/grid/storm/namespace/model/VirtualFS.java +++ b/src/main/java/it/grid/storm/namespace/model/VirtualFS.java @@ -23,11 +23,13 @@ import java.lang.reflect.InvocationTargetException; import java.net.URI; import java.net.URISyntaxException; -import java.util.ArrayList; import java.util.Hashtable; import java.util.List; import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Lists; import it.grid.storm.balancer.BalancingStrategy; import it.grid.storm.balancer.Node; @@ -52,13 +54,10 @@ import it.grid.storm.namespace.CapabilityInterface; import it.grid.storm.namespace.DefaultValuesInterface; import it.grid.storm.namespace.ExpiredSpaceTokenException; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.NamespaceInterface; import it.grid.storm.namespace.PropertyInterface; import it.grid.storm.namespace.StoRI; -import it.grid.storm.namespace.StoRIImpl; -import it.grid.storm.namespace.VirtualFSInterface; import it.grid.storm.namespace.naming.NamespaceUtil; import it.grid.storm.namespace.naming.NamingConst; import it.grid.storm.persistence.exceptions.DataAccessException; @@ -71,20 +70,9 @@ import it.grid.storm.srm.types.TSpaceToken; import it.grid.storm.srm.types.TSpaceType; -/** - *

Title:

- * - *

Description:

- * - *

Copyright: Copyright (c) 2006

- * - *

Company: INFN-CNAF and ICTP/eGrid project

- * - * @author Riccardo Zappi @version 1.0 - */ -public class VirtualFS implements VirtualFSInterface { +public class VirtualFS { - private final Logger log = NamespaceDirector.getLogger(); + private final Logger log = LoggerFactory.getLogger(VirtualFS.class); String aliasName = null; String type = null; @@ -101,8 +89,8 @@ public class VirtualFS implements VirtualFSInterface { genericfs genericFS = null; SpaceSystem spaceSystem = null; FilesystemIF fsWrapper = null; - List mappingRules = new ArrayList(); - List approachableRules = new ArrayList(); + List mappingRules = Lists.newArrayList(); + List approachableRules = Lists.newArrayList(); Configuration config; StorageClassType storageClass = null; TSpaceToken spaceToken; @@ -125,18 +113,18 @@ public void setAliasName(String name) { public void setFSType(String type) { this.type = type; - initializeSpaceUpdaterHelper(); + // initializeSpaceUpdaterHelper(); } public void setFSDriver(Class fsDriver) throws NamespaceException { this.fsDriver = fsDriver; - this.genericFS = makeFSInstance(); + // this.genericFS = makeFSInstance(); - fsWrapper = RandomWaitFilesystemAdapter.maybeWrapFilesystem(fsWrapper); - this.fsWrapper = new MetricsFilesystemAdapter( - new Filesystem(getFSDriverInstance()), - METRIC_REGISTRY.getRegistry()); + // fsWrapper = RandomWaitFilesystemAdapter.maybeWrapFilesystem(fsWrapper); + // this.fsWrapper = new MetricsFilesystemAdapter( + // new Filesystem(getFSDriverInstance()), + // METRIC_REGISTRY.getRegistry()); } public void setSpaceTokenDescription(String spaceTokenDescription) { @@ -149,19 +137,17 @@ public void setStorageClassType(StorageClassType storageClass) { this.storageClass = storageClass; } - private void initializeSpaceUpdaterHelper() { + public void initializeSpaceUpdaterHelper() { spaceUpdater = SpaceUpdaterHelperFactory.getSpaceUpdaterHelper(this); } - @Override public void setProperties(PropertyInterface prop) { this.properties = prop; } - public void setSpaceSystemDriver(Class spaceDriver) - throws NamespaceException { + public void setSpaceSystemDriver(Class spaceDriver) throws NamespaceException { if (spaceDriver == null) { throw new NamespaceException("NULL space driver"); @@ -183,7 +169,6 @@ public void setCapabilities(CapabilityInterface cap) { public void setRoot(String rootPath) throws NamespaceException { this.rootPath = buildRootPath(rootPath); - buildStoRIRoot(rootPath); } public void addMappingRule(MappingRule mappingRule) { @@ -218,21 +203,11 @@ private String buildRootPath(String rootPath) throws NamespaceException { rootPathUri = new URI(rootPath); } catch (URISyntaxException e) { throw new NamespaceException( - "Unable to set rootPath. Invalid string. URISyntaxException: " - + e.getMessage()); + "Unable to set rootPath. Invalid string. URISyntaxException: " + e.getMessage()); } return rootPathUri.normalize().toString(); } - private void buildStoRIRoot(String rootPath) throws NamespaceException { - - /** - * @todo - */ - - // storiRoot = new StoRIImpl(this, rootPath, StoRIType.FOLDER); - } - /***************************************************************************** * READ METHODs ****************************************************************************/ @@ -261,11 +236,11 @@ public TSizeInBytes getUsedOnlineSpace() throws NamespaceException { TSizeInBytes result = TSizeInBytes.makeEmpty(); /** - * @todo : This method must contact Space Manager (or who for him) to - * retrieve the real situation + * @todo : This method must contact Space Manager (or who for him) to retrieve the real + * situation * - * @todo : Contact Space Catalog to retrieve the logical space occupied. - * This space must to be equal to space occupied in underlying FS. + * @todo : Contact Space Catalog to retrieve the logical space occupied. This space must to be + * equal to space occupied in underlying FS. */ return result; } @@ -274,11 +249,11 @@ public TSizeInBytes getAvailableOnlineSpace() throws NamespaceException { TSizeInBytes result = TSizeInBytes.makeEmpty(); /** - * @todo : This method must contact Space Manager (or who for him) to - * retrieve the real situation + * @todo : This method must contact Space Manager (or who for him) to retrieve the real + * situation * - * @todo : Contact Space Catalog to retrieve the logical space occupied. - * This space must to be equal to space occupied in underlying FS. + * @todo : Contact Space Catalog to retrieve the logical space occupied. This space must to be + * equal to space occupied in underlying FS. */ return result; @@ -300,7 +275,6 @@ public String getAliasName() { return this.aliasName; } - @Override public boolean isHttpWorldReadable() { for (ApproachableRule rule : approachableRules) { @@ -328,18 +302,16 @@ public List getMappingRules() throws NamespaceException { if (this.mappingRules.isEmpty()) { throw new NamespaceException( - "No one MAPPING RULES bound with this VFS (" + aliasName + "). "); + "No one MAPPING RULES bound with this VFS (" + aliasName + "). "); } return this.mappingRules; } - @Override - public List getApproachableRules() - throws NamespaceException { + public List getApproachableRules() throws NamespaceException { if (this.approachableRules.isEmpty()) { throw new NamespaceException( - "No one APPROACHABLE RULES bound with this VFS (" + aliasName + "). "); + "No one APPROACHABLE RULES bound with this VFS (" + aliasName + "). "); } return this.approachableRules; } @@ -353,27 +325,23 @@ private genericfs makeFSInstance() throws NamespaceException { genericfs fs = null; if (fsDriver == null) { - throw new NamespaceException( - "Cannot build FS Driver istance without a valid Driver Class!"); + throw new NamespaceException("Cannot build FS Driver istance without a valid Driver Class!"); } Class fsArgumentsClass[] = new Class[1]; fsArgumentsClass[0] = String.class; - Object[] fsArguments = new Object[] { this.rootPath }; + Object[] fsArguments = new Object[] {this.rootPath}; Constructor fsConstructor = null; try { fsConstructor = fsDriver.getConstructor(fsArgumentsClass); } catch (SecurityException ex) { - log.error( - "Unable to retrieve the FS Driver Constructor. Security problem.", ex); + log.error("Unable to retrieve the FS Driver Constructor. Security problem.", ex); throw new NamespaceException( - "Unable to retrieve the FS Driver Constructor. Security problem.", ex); + "Unable to retrieve the FS Driver Constructor. Security problem.", ex); } catch (NoSuchMethodException ex) { - log.error( - "Unable to retrieve the FS Driver Constructor. Security problem.", ex); + log.error("Unable to retrieve the FS Driver Constructor. Security problem.", ex); throw new NamespaceException( - "Unable to retrieve the FS Driver Constructor. No such constructor.", - ex); + "Unable to retrieve the FS Driver Constructor. No such constructor.", ex); } try { fs = (genericfs) fsConstructor.newInstance(fsArguments); @@ -384,21 +352,17 @@ private genericfs makeFSInstance() throws NamespaceException { log.debug("VFS Ex Stack: "); ex1.printStackTrace(); - throw new NamespaceException("Unable to instantiate the FS Driver. ", - ex1); + throw new NamespaceException("Unable to instantiate the FS Driver. ", ex1); } catch (IllegalArgumentException ex1) { - log.error("Unable to instantiate the FS Driver. Using wrong argument.", - ex1); - throw new NamespaceException( - "Unable to instantiate the FS Driver. Using wrong argument.", ex1); + log.error("Unable to instantiate the FS Driver. Using wrong argument.", ex1); + throw new NamespaceException("Unable to instantiate the FS Driver. Using wrong argument.", + ex1); } catch (IllegalAccessException ex1) { log.error("Unable to instantiate the FS Driver. Illegal Access.", ex1); - throw new NamespaceException( - "Unable to instantiate the FS Driver. Illegal Access.", ex1); + throw new NamespaceException("Unable to instantiate the FS Driver. Illegal Access.", ex1); } catch (InstantiationException ex1) { log.error("Unable to instantiate the FS Driver. Generic problem..", ex1); - throw new NamespaceException( - "Unable to instantiate the FS Driver. Generic problem..", ex1); + throw new NamespaceException("Unable to instantiate the FS Driver. Generic problem..", ex1); } return fs; @@ -411,9 +375,8 @@ public FilesystemIF getFilesystem() throws NamespaceException { FilesystemIF fs = new Filesystem(getFSDriverInstance()); fs = RandomWaitFilesystemAdapter.maybeWrapFilesystem(fs); - - fsWrapper = new MetricsFilesystemAdapter(fs, - METRIC_REGISTRY.getRegistry()); + + fsWrapper = new MetricsFilesystemAdapter(fs, METRIC_REGISTRY.getRegistry()); } return this.fsWrapper; @@ -443,71 +406,53 @@ private SpaceSystem makeSpaceSystemInstance() throws NamespaceException { if (spaceSystemDriver == null) { throw new NamespaceException( - "Cannot build Space Driver istance without a valid Driver Class!"); + "Cannot build Space Driver istance without a valid Driver Class!"); } // Check if SpaceSystem is GPFSSpaceSystem used for GPFS FS // Check if SpaceSystem is MockSpaceSystem used for Posix FS - if ((this.spaceSystemDriver.getName() - .equals(GPFSSpaceSystem.class.getName())) - || (this.spaceSystemDriver.getName() - .equals(MockSpaceSystem.class.getName()))) { + if ((this.spaceSystemDriver.getName().equals(GPFSSpaceSystem.class.getName())) + || (this.spaceSystemDriver.getName().equals(MockSpaceSystem.class.getName()))) { // The class type argument is the mount point of GPFS file system Class ssArgumentsClass[] = new Class[1]; ssArgumentsClass[0] = String.class; - Object[] ssArguments = new Object[] { this.rootPath }; + Object[] ssArguments = new Object[] {this.rootPath}; Constructor ssConstructor = null; try { ssConstructor = spaceSystemDriver.getConstructor(ssArgumentsClass); } catch (SecurityException ex) { - log.error( - "Unable to retrieve the FS Driver Constructor. Security problem.", - ex); + log.error("Unable to retrieve the FS Driver Constructor. Security problem.", ex); throw new NamespaceException( - "Unable to retrieve the FS Driver Constructor. Security problem.", - ex); + "Unable to retrieve the FS Driver Constructor. Security problem.", ex); } catch (NoSuchMethodException ex) { - log.error( - "Unable to retrieve the FS Driver Constructor. Security problem.", - ex); + log.error("Unable to retrieve the FS Driver Constructor. Security problem.", ex); throw new NamespaceException( - "Unable to retrieve the FS Driver Constructor. No such constructor.", - ex); + "Unable to retrieve the FS Driver Constructor. No such constructor.", ex); } try { ss = (SpaceSystem) ssConstructor.newInstance(ssArguments); } catch (InvocationTargetException ex1) { - log.error("Unable to instantiate the SpaceSystem Driver. Wrong target.", - ex1); - throw new NamespaceException("Unable to instantiate the FS Driver. ", - ex1); + log.error("Unable to instantiate the SpaceSystem Driver. Wrong target.", ex1); + throw new NamespaceException("Unable to instantiate the FS Driver. ", ex1); } catch (IllegalArgumentException ex1) { - log.error( - "Unable to instantiate the SpaceSystem Driver. Using wrong argument.", - ex1); - throw new NamespaceException( - "Unable to instantiate the FS Driver. Using wrong argument.", ex1); + log.error("Unable to instantiate the SpaceSystem Driver. Using wrong argument.", ex1); + throw new NamespaceException("Unable to instantiate the FS Driver. Using wrong argument.", + ex1); } catch (IllegalAccessException ex1) { - log.error( - "Unable to instantiate the SpaceSystem Driver. Illegal Access.", ex1); - throw new NamespaceException( - "Unable to instantiate the FS Driver. Illegal Access.", ex1); + log.error("Unable to instantiate the SpaceSystem Driver. Illegal Access.", ex1); + throw new NamespaceException("Unable to instantiate the FS Driver. Illegal Access.", ex1); } catch (InstantiationException ex1) { - log.error( - "Unable to instantiate the SpaceSystem Driver. Generic problem..", - ex1); - throw new NamespaceException( - "Unable to instantiate the FS Driver. Generic problem..", ex1); + log.error("Unable to instantiate the SpaceSystem Driver. Generic problem..", ex1); + throw new NamespaceException("Unable to instantiate the FS Driver. Generic problem..", ex1); } } else { log.error("None Space System Driver built"); /** - * @todo : Perhaps a "genericSpaceSystem" could be more disederable rather - * than NULL + * @todo : Perhaps a "genericSpaceSystem" could be more disederable rather than NULL */ ss = null; } @@ -549,7 +494,6 @@ public boolean isApproachableByUser(GridUserInterface user) { return false; } - @Override public boolean isApproachableByAnonymous() { for (ApproachableRule approachableRule : this.approachableRules) { @@ -567,7 +511,7 @@ public StoRI createFile(String relativePath) throws NamespaceException { */ StoRIType type = StoRIType.UNKNOWN; // log.debug("CREATING STORI BY RELATIVE PATH : "+relativePath); - StoRI stori = new StoRIImpl(this, mappingRules.get(0), relativePath, type); + StoRI stori = new StoRI(this, mappingRules.get(0), relativePath, type); return stori; } @@ -577,27 +521,25 @@ public StoRI createFile(String relativePath, StoRIType type) { * @todo Check if relativePath is a valid path for a file. */ log.debug("VFS Class - Relative Path : " + relativePath); - StoRI stori = new StoRIImpl(this, mappingRules.get(0), relativePath, type); + StoRI stori = new StoRI(this, mappingRules.get(0), relativePath, type); return stori; } - @Override - public StoRI createFile(String relativePath, StoRIType type, MappingRule rule) - throws NamespaceException { + public StoRI createFile(String relativePath, StoRIType type, MappingRule rule) + throws NamespaceException { - return new StoRIImpl(this, rule, relativePath, type); - } + return new StoRI(this, rule, relativePath, type); + } /**************************************************************** * Methods used by StoRI to perform IMPLICIT SPACE RESERVATION *****************************************************************/ /** - * Workaround to manage the DEFAULT SPACE TOKEN defined per Storage Area. This - * workaround simply give the possibility to define a list of DEFAULT SPACE - * TOKENs by the StoRM configuration file. If the token specified into the - * PrepareToPut request belongs to the list of default space token, the space - * file is not used (since it does not exists into the space catalog) and a + * Workaround to manage the DEFAULT SPACE TOKEN defined per Storage Area. This workaround simply + * give the possibility to define a list of DEFAULT SPACE TOKENs by the StoRM configuration file. + * If the token specified into the PrepareToPut request belongs to the list of default space + * token, the space file is not used (since it does not exists into the space catalog) and a * simple allocation of blocks is performed for the file * * Return true if the space token specified is a DEAFULT SPACE TOKENS. @@ -606,25 +548,23 @@ public StoRI createFile(String relativePath, StoRIType type, MappingRule rule) private Boolean isVOSAToken(TSpaceToken token) throws NamespaceException { - ReservedSpaceCatalog catalog = new ReservedSpaceCatalog(); + ReservedSpaceCatalog catalog = ReservedSpaceCatalog.getInstance(); StorageSpaceData ssd = null; try { ssd = catalog.getStorageSpace(token); } catch (TransferObjectDecodingException e) { log.error( - "Unable to build StorageSpaceData from StorageSpaceTO. TransferObjectDecodingException: " - + e.getMessage()); + "Unable to build StorageSpaceData from StorageSpaceTO. TransferObjectDecodingException: " + + e.getMessage()); throw new NamespaceException( - "Error retrieving Storage Area space information. TransferObjectDecodingException : " - + e.getMessage()); + "Error retrieving Storage Area space information. TransferObjectDecodingException : " + + e.getMessage()); } catch (DataAccessException e) { - log - .error("Unable to get StorageSpaceTO from the DB. DataAccessException: " - + e.getMessage()); + log.error("Unable to get StorageSpaceTO from the DB. DataAccessException: " + e.getMessage()); throw new NamespaceException( - "Error retrieving Storage Area space information. DataAccessException : " - + e.getMessage()); + "Error retrieving Storage Area space information. DataAccessException : " + + e.getMessage()); } if ((ssd != null) && (ssd.getSpaceType().equals(TSpaceType.VOSPACE))) { @@ -635,15 +575,13 @@ private Boolean isVOSAToken(TSpaceToken token) throws NamespaceException { } public void makeSilhouetteForFile(StoRI stori, TSizeInBytes presumedSize) - throws NamespaceException { + throws NamespaceException { // Check if StoRI is a file if (!(stori.getStoRIType().equals(StoRIType.FILE))) { - log.error("Unable to associate a Space to the StoRI with type: " - + stori.getStoRIType()); + log.error("Unable to associate a Space to the StoRI with type: " + stori.getStoRIType()); throw new NamespaceException( - "Unable to associate a Space to the StoRI with type: " - + stori.getStoRIType()); + "Unable to associate a Space to the StoRI with type: " + stori.getStoRIType()); } // Retrieve the instance of the right Space System @@ -654,52 +592,44 @@ public void makeSilhouetteForFile(StoRI stori, TSizeInBytes presumedSize) TSizeInBytes guarSize = defValue.getDefaultGuaranteedSpaceSize(); - // Space space = createSpace(guarSize, presumedSize, localFile, - // spaceSystem); - Space space = createSpace(presumedSize, presumedSize, localFile, - spaceSystem); + Space space = createSpace(presumedSize, presumedSize, localFile, spaceSystem); stori.setSpace(space); } /* - * THis method is synchronized to avoid multiple execution from different - * thread. In such condition, the SpaceData is token at the same time from - * both thread , and then modified and updated. This means that one of the two - * update will be overwritten from the other thread! + * THis method is synchronized to avoid multiple execution from different thread. In such + * condition, the SpaceData is token at the same time from both thread , and then modified and + * updated. This means that one of the two update will be overwritten from the other thread! */ - public synchronized void useSpaceForFile(TSpaceToken token, StoRI file, - TSizeInBytes sizePresumed) + public synchronized void useSpaceForFile(TSpaceToken token, StoRI file, TSizeInBytes sizePresumed) throws NamespaceException, ExpiredSpaceTokenException { // Check if StoRI is a file if (!(file.getStoRIType().equals(StoRIType.FILE))) { - log.error("Unable to associate a Space to the StoRI with type: " - + file.getStoRIType()); + log.error("Unable to associate a Space to the StoRI with type: " + file.getStoRIType()); throw new NamespaceException( - "Unable to associate a Space to the StoRI with type: " - + file.getStoRIType()); + "Unable to associate a Space to the StoRI with type: " + file.getStoRIType()); } // Get the default space size TSizeInBytes defaultFileSize = null; try { - defaultFileSize = TSizeInBytes - .make(Configuration.getInstance().getFileDefaultSize(), SizeUnit.BYTES); + defaultFileSize = + TSizeInBytes.make(Configuration.getInstance().getFileDefaultSize(), SizeUnit.BYTES); } catch (it.grid.storm.srm.types.InvalidTSizeAttributesException e) { log.debug("Invalid size created."); } /** - * Verify if the token specified is a DEFAULT SPACE TOKENS used to identify - * the Storage Area + * Verify if the token specified is a DEFAULT SPACE TOKENS used to identify the Storage Area */ Boolean found = isVOSAToken(token); /** - * In case of DEFAULT SPACE TOKENspecified do nothing and create a simple - * silhouette for the file... + * In case of DEFAULT SPACE TOKENspecified do nothing and create a simple silhouette for the + * file... */ if (found) { @@ -716,32 +646,30 @@ public synchronized void useSpaceForFile(TSpaceToken token, StoRI file, } /** - * Token for Dynamic space reservation specified. Go ahead in the old way, - * look into the space reservation catalog, ... + * Token for Dynamic space reservation specified. Go ahead in the old way, look into the space + * reservation catalog, ... */ // Use of Reserve Space Manager StorageSpaceData spaceData = null; try { - spaceData = new ReservedSpaceCatalog().getStorageSpace(token); + spaceData = ReservedSpaceCatalog.getInstance().getStorageSpace(token); } catch (TransferObjectDecodingException e) { log.error( - "Unable to build StorageSpaceData from StorageSpaceTO. TransferObjectDecodingException: " - + e.getMessage()); + "Unable to build StorageSpaceData from StorageSpaceTO. TransferObjectDecodingException: " + + e.getMessage()); throw new NamespaceException( - "Error retrieving Storage Area information from Token. TransferObjectDecodingException : " - + e.getMessage()); + "Error retrieving Storage Area information from Token. TransferObjectDecodingException : " + + e.getMessage()); } catch (DataAccessException e) { - log.error("Unable to build get StorageSpaceTO. DataAccessException: " - + e.getMessage()); + log.error("Unable to build get StorageSpaceTO. DataAccessException: " + e.getMessage()); throw new NamespaceException( - "Error retrieving Storage Area information from Token. DataAccessException : " - + e.getMessage()); + "Error retrieving Storage Area information from Token. DataAccessException : " + + e.getMessage()); } if (spaceData == null) { - throw new NamespaceException( - "No Storage Space stored with this token :" + token); + throw new NamespaceException("No Storage Space stored with this token :" + token); } // Check here if Space Reservation is expired @@ -801,44 +729,36 @@ public synchronized void useSpaceForFile(TSpaceToken token, StoRI file, // Create Space StoRI StoRI spaceFile = retrieveSpaceFileByPFN(pfn, totalSize); - if ((!(spaceFile.getLocalFile().exists())) - || (spaceFile.getLocalFile().isDirectory())) { + if ((!(spaceFile.getLocalFile().exists())) || (spaceFile.getLocalFile().isDirectory())) { log.error( - "Unable to get the correct space file!spaceFile does not exsists or it is a directory."); + "Unable to get the correct space file!spaceFile does not exsists or it is a directory."); return; } /** - * Splitting the Space File. In this first version the original space file - * is truncated at the original size minus the new ptp file size presumed, - * and a new space pre_allocation, bound with the new ptp file, is done. + * Splitting the Space File. In this first version the original space file is truncated at the + * original size minus the new ptp file size presumed, and a new space pre_allocation, bound + * with the new ptp file, is done. * - * @todo In the final version, if the new size requested is greater then - * the half of the original space file, the original spacefile is renamed - * to the desired ptp file name and then truncated to the requested size. - * A new space pre_allocation is perfored and bound with the old original - * space file name. + * @todo In the final version, if the new size requested is greater then the half of the + * original space file, the original spacefile is renamed to the desired ptp file name + * and then truncated to the requested size. A new space pre_allocation is perfored and + * bound with the old original space file name. * */ - TSizeInBytes returnedSize = splitSpace(spaceFile, file, - sizePresumed.value()); + TSizeInBytes returnedSize = splitSpace(spaceFile, file, sizePresumed.value()); spaceFile.setStoRIType(StoRIType.SPACE_BOUND); file.setSpace(spaceFile.getSpace()); - /** - * Log ANY data HERE - */ - // Update Storage Space to new values of size TSizeInBytes newUsedSpaceSize = TSizeInBytes.makeEmpty(); TSizeInBytes newAvailableSpaceSize = TSizeInBytes.makeEmpty(); try { - newUsedSpaceSize = TSizeInBytes - .make(totalSpaceSize.value() - remainingSize, SizeUnit.BYTES); - newAvailableSpaceSize = TSizeInBytes.make(remainingSize, - SizeUnit.BYTES); + newUsedSpaceSize = + TSizeInBytes.make(totalSpaceSize.value() - remainingSize, SizeUnit.BYTES); + newAvailableSpaceSize = TSizeInBytes.make(remainingSize, SizeUnit.BYTES); } catch (InvalidTSizeAttributesException ex) { log.error("Unable to create Used Space Size, so use EMPTY size ", ex); } @@ -860,31 +780,28 @@ public synchronized void useSpaceForFile(TSpaceToken token, StoRI file, } /* - * This mehod should be Synchronized? Yes...: From the last internal - * discussion we had, we decide that use the entire available space for a - * single PtP request is not the right behaviour. The correct behaviour is - * that, if the presumed size is not specified as input parameter in the PtP - * request, only a part of the available spacefile is used. The size is the - * minimum between the default file size for the StoRM configuration file and - * the half size of the available spaceFile. TODO + * This mehod should be Synchronized? Yes...: From the last internal discussion we had, we decide + * that use the entire available space for a single PtP request is not the right behaviour. The + * correct behaviour is that, if the presumed size is not specified as input parameter in the PtP + * request, only a part of the available spacefile is used. The size is the minimum between the + * default file size for the StoRM configuration file and the half size of the available + * spaceFile. TODO */ public synchronized void useAllSpaceForFile(TSpaceToken token, StoRI file) - throws NamespaceException, ExpiredSpaceTokenException { + throws NamespaceException, ExpiredSpaceTokenException { // Check if StoRI is a file if (!(file.getStoRIType().equals(StoRIType.FILE))) { - log.error("Unable to associate a Space to the StoRI with type: " - + file.getStoRIType()); + log.error("Unable to associate a Space to the StoRI with type: " + file.getStoRIType()); throw new NamespaceException( - "Unable to associate a Space to the StoRI with type: " - + file.getStoRIType()); + "Unable to associate a Space to the StoRI with type: " + file.getStoRIType()); } // Get the default space size TSizeInBytes defaultFileSize = null; try { - defaultFileSize = TSizeInBytes - .make(Configuration.getInstance().getFileDefaultSize(), SizeUnit.BYTES); + defaultFileSize = + TSizeInBytes.make(Configuration.getInstance().getFileDefaultSize(), SizeUnit.BYTES); } catch (it.grid.storm.srm.types.InvalidTSizeAttributesException e) { log.debug("Invalid size created."); } @@ -892,25 +809,23 @@ public synchronized void useAllSpaceForFile(TSpaceToken token, StoRI file) // Use of Reserve Space Manager StorageSpaceData spaceData = null; try { - spaceData = new ReservedSpaceCatalog().getStorageSpace(token); + spaceData = ReservedSpaceCatalog.getInstance().getStorageSpace(token); } catch (TransferObjectDecodingException e) { log.error( - "Unable to build StorageSpaceData from StorageSpaceTO. TransferObjectDecodingException: " - + e.getMessage()); + "Unable to build StorageSpaceData from StorageSpaceTO. TransferObjectDecodingException: " + + e.getMessage()); throw new NamespaceException( - "Error retrieving Storage Area information from Token. TransferObjectDecodingException : " - + e.getMessage()); + "Error retrieving Storage Area information from Token. TransferObjectDecodingException : " + + e.getMessage()); } catch (DataAccessException e) { - log.error("Unable to build get StorageSpaceTO. DataAccessException: " - + e.getMessage()); + log.error("Unable to build get StorageSpaceTO. DataAccessException: " + e.getMessage()); throw new NamespaceException( - "Error retrieving Storage Area information from Token. DataAccessException : " - + e.getMessage()); + "Error retrieving Storage Area information from Token. DataAccessException : " + + e.getMessage()); } if (spaceData == null) { - throw new NamespaceException( - "No Storage Space stored with this token :" + token); + throw new NamespaceException("No Storage Space stored with this token :" + token); } // Check here if Space Reservation is expired @@ -926,14 +841,13 @@ public synchronized void useAllSpaceForFile(TSpaceToken token, StoRI file) if (isVOSAToken(token)) { // ADD HERE THE LOGIC TO MANAGE DEFAULT SPACE RESERVATION /** - * Check if a DEFAULT SPACE TOKEN is specified. IN that case do nothing - * and create a simple silhouette for the file... + * Check if a DEFAULT SPACE TOKEN is specified. IN that case do nothing and create a simple + * silhouette for the file... * * - * TOREMOVE. The space data will contains this information!!! i METADATA - * non venfgono agrgiornati, sara fatta una funzionalita' nella - * getspacemetadatacatalog che in caso di query sul defaulr space token - * vada a vedre la quota sul file system. + * TOREMOVE. The space data will contains this information!!! i METADATA non venfgono + * agrgiornati, sara fatta una funzionalita' nella getspacemetadatacatalog che in caso di + * query sul defaulr space token vada a vedre la quota sul file system. * */ // WARNING, This double check have to be removed, the firs should be fdone @@ -951,8 +865,8 @@ public synchronized void useAllSpaceForFile(TSpaceToken token, StoRI file) } else { /** - * Token for Dynamic space reservation specified. Go ahead in the old way, - * look into the space reservation catalog, ... + * Token for Dynamic space reservation specified. Go ahead in the old way, look into the space + * reservation catalog, ... */ // Check here if Space Reservation is expired @@ -978,8 +892,7 @@ public synchronized void useAllSpaceForFile(TSpaceToken token, StoRI file) } else { TSizeInBytes fileSizeToUse = null; try { - fileSizeToUse = TSizeInBytes.make(availableSpaceSize.value() / 2, - SizeUnit.BYTES); + fileSizeToUse = TSizeInBytes.make(availableSpaceSize.value() / 2, SizeUnit.BYTES); } catch (it.grid.storm.srm.types.InvalidTSizeAttributesException e) { log.debug("Invalid size created."); } @@ -990,24 +903,19 @@ public synchronized void useAllSpaceForFile(TSpaceToken token, StoRI file) } - /** - * public void bindSpaceToFile(StoRI space, StoRI file) throws - * NamespaceException { file.setSpace(space.getSpace()); } - **/ - /**************************************************************** * Methods used by StoRI to perform EXPLICIT SPACE RESERVATION *****************************************************************/ - public StoRI createSpace(String relativePath, long guaranteedSize, - long totalSize) throws NamespaceException { + public StoRI createSpace(String relativePath, long guaranteedSize, long totalSize) + throws NamespaceException { StoRIType type = StoRIType.SPACE; /* - * TODO Mapping rule should be choosen from the appropriate app-rule - * presents in the namespace.xml file... + * TODO Mapping rule should be choosen from the appropriate app-rule presents in the + * namespace.xml file... */ - StoRI stori = new StoRIImpl(this, mappingRules.get(0), relativePath, type); + StoRI stori = new StoRI(this, mappingRules.get(0), relativePath, type); // Retrieve the instance of the right Space System SpaceSystem spaceSystem = getSpaceSystemDriverInstance(); @@ -1028,40 +936,35 @@ public StoRI createSpace(String relativePath, long guaranteedSize, log.error("Unable to create Total Size, so use EMPTY size", ex2); } - Space space = createSpace(guarSize, totSize, stori.getLocalFile(), - spaceSystem); + Space space = createSpace(guarSize, totSize, stori.getLocalFile(), spaceSystem); stori.setSpace(space); return stori; } - public StoRI createSpace(String relativePath, long totalsize) - throws NamespaceException { + public StoRI createSpace(String relativePath, long totalsize) throws NamespaceException { StoRI stori = createSpace(relativePath, totalsize, totalsize); return stori; } /** - * This method is used to split the specified spaceFile to the desired PtP - * file. The operations performed depends on the input parameters. If the - * desired new size is minor then the half of the total reserved space size, - * the original space file is truncated to new size : (original size - new PtP - * file presumed size), then a new space_preallocation, of the new PtP file + * This method is used to split the specified spaceFile to the desired PtP file. The operations + * performed depends on the input parameters. If the desired new size is minor then the half of + * the total reserved space size, the original space file is truncated to new size : (original + * size - new PtP file presumed size), then a new space_preallocation, of the new PtP file * presumed size, is bound to the requested file. * - * If the presumed size is greater then the half fo the global space - * available, the original space file is renamed to the new PtP file and - * truncated to the presumed size. A new space_preallocation is done to - * recreate the remaining original space file + * If the presumed size is greater then the half fo the global space available, the original space + * file is renamed to the new PtP file and truncated to the presumed size. A new + * space_preallocation is done to recreate the remaining original space file * - * @param spaceOrig StoRI bounds to the original space file. @param file StoRI - * bounds to the desired new PtP file. @param long new PtP file size - * presumed. @returns new Size + * @param spaceOrig StoRI bounds to the original space file. @param file StoRI bounds to the + * desired new PtP file. @param long new PtP file size presumed. @returns new Size */ public TSizeInBytes splitSpace(StoRI spaceOrig, StoRI file, long sizePresumed) - throws NamespaceException { + throws NamespaceException { // Update Storage Space to new values of size TSizeInBytes newSize = TSizeInBytes.makeEmpty(); @@ -1069,13 +972,11 @@ public TSizeInBytes splitSpace(StoRI spaceOrig, StoRI file, long sizePresumed) // Save the name of the current Space File String spacePFN = spaceOrig.getAbsolutePath(); log.debug("VFS Split: spaceFileName:" + spacePFN); - String relativeSpacePFN = NamespaceUtil - .extractRelativePath(this.getRootPath(), spacePFN); + String relativeSpacePFN = NamespaceUtil.extractRelativePath(this.getRootPath(), spacePFN); /** * extractRelativePath seems not working in this case! WHY? * - * @todo Because the mapping rule choosen is always the same, for all - * StFNRoot...BUG to FIX.. + * @todo Because the mapping rule choosen is always the same, for all StFNRoot...BUG to FIX.. * */ log.debug("Looking for root:" + this.getRootPath()); @@ -1091,12 +992,12 @@ public TSizeInBytes splitSpace(StoRI spaceOrig, StoRI file, long sizePresumed) log.debug("VFS Split: relativeSpacePFN:" + relativeSpacePFN); if (failure) { - log.warn( - "SpacePFN does not refer to this VFS root! Something goes wrong in app-rule?"); + log.warn("SpacePFN does not refer to this VFS root! Something goes wrong in app-rule?"); try { newSize = TSizeInBytes.make(sizePresumed, SizeUnit.BYTES); - file = createSpace(NamespaceUtil.extractRelativePath(this.getRootPath(), - file.getAbsolutePath()), sizePresumed); + file = createSpace( + NamespaceUtil.extractRelativePath(this.getRootPath(), file.getAbsolutePath()), + sizePresumed); file.getSpace().allot(); } catch (InvalidTSizeAttributesException ex) { log.error("Unable to create UNUsed Space Size, so use EMPTY size ", ex); @@ -1110,13 +1011,12 @@ public TSizeInBytes splitSpace(StoRI spaceOrig, StoRI file, long sizePresumed) long realSize = spaceOrig.getLocalFile().getSize(); /** - * The next steps depends on the input parameters. Case (1) : new PtP file - * size minor than the half of the available space file. In this case the - * spaceFile is truncated, and a new file is created with the desired - * amount of preallocated blocks. Case(2) : new PtP file size greater than - * the half of the available space file. The spaceFile is renamed to the - * new PtP file, truncated to the presumed size and a new preallocation is - * done bound to the original space file name. + * The next steps depends on the input parameters. Case (1) : new PtP file size minor than the + * half of the available space file. In this case the spaceFile is truncated, and a new file + * is created with the desired amount of preallocated blocks. Case(2) : new PtP file size + * greater than the half of the available space file. The spaceFile is renamed to the new PtP + * file, truncated to the presumed size and a new preallocation is done bound to the original + * space file name. * */ @@ -1124,20 +1024,19 @@ public TSizeInBytes splitSpace(StoRI spaceOrig, StoRI file, long sizePresumed) log.debug("SplitSpace Case (1)"); // Truncate - log.debug("SplitSpace: " + spaceOrig.getAbsolutePath() - + " truncating file to size:" + (realSize - sizePresumed)); - spaceOrig.getSpace().getSpaceFile() - .truncateFile((realSize - sizePresumed)); + log.debug("SplitSpace: " + spaceOrig.getAbsolutePath() + " truncating file to size:" + + (realSize - sizePresumed)); + spaceOrig.getSpace().getSpaceFile().truncateFile((realSize - sizePresumed)); // Allocate space for file try { newSize = TSizeInBytes.make(sizePresumed, SizeUnit.BYTES); - file = createSpace(NamespaceUtil.extractRelativePath( - this.getRootPath(), file.getAbsolutePath()), sizePresumed); + file = createSpace( + NamespaceUtil.extractRelativePath(this.getRootPath(), file.getAbsolutePath()), + sizePresumed); file.getSpace().allot(); } catch (InvalidTSizeAttributesException ex) { - log.error("Unable to create UNUsed Space Size, so use EMPTY size ", - ex); + log.error("Unable to create UNUsed Space Size, so use EMPTY size ", ex); } catch (it.grid.storm.filesystem.ReservationException e2) { log.error("Unable to create space into File System"); } @@ -1161,14 +1060,12 @@ public TSizeInBytes splitSpace(StoRI spaceOrig, StoRI file, long sizePresumed) newSize = TSizeInBytes.make(remainingSize, SizeUnit.BYTES); // Create a new Space file with the old name and with the size // computed. - spaceOrig = createSpace( - NamespaceUtil.extractRelativePath(this.getRootPath(), spacePFN), - newSize.value()); + spaceOrig = createSpace(NamespaceUtil.extractRelativePath(this.getRootPath(), spacePFN), + newSize.value()); // Create the new SpaceFile into the file system spaceOrig.getSpace().allot(); } catch (InvalidTSizeAttributesException ex) { - log.error("Unable to create UNUsed Space Size, so use EMPTY size ", - ex); + log.error("Unable to create UNUsed Space Size, so use EMPTY size ", ex); } catch (it.grid.storm.filesystem.ReservationException e2) { log.error("Unable to create space into File System"); } @@ -1184,8 +1081,7 @@ public TSizeInBytes splitSpace(StoRI spaceOrig, StoRI file, long sizePresumed) * Methods used by Space Reservation Manager *************************************************/ - public StoRI createSpace(long guarSize, long totalSize) - throws NamespaceException { + public StoRI createSpace(long guarSize, long totalSize) throws NamespaceException { // retrieve SPACE FILE NAME String relativePath = makeSpaceFilePath(); @@ -1215,8 +1111,7 @@ public StoRI createSpace() throws NamespaceException { TSizeInBytes guarSize = defValue.getDefaultGuaranteedSpaceSize(); // retrieve DEFAULT TOTAL size TSizeInBytes totalSize = defValue.getDefaultTotalSpaceSize(); - StoRI stori = createSpace(relativePath, guarSize.value(), - totalSize.value()); + StoRI stori = createSpace(relativePath, guarSize.value(), totalSize.value()); return stori; } @@ -1237,8 +1132,7 @@ public String toString() { sb.append(" VFS Name : '" + this.aliasName + "'" + sep); sb.append(" VFS root : '" + this.rootPath + "'" + sep); sb.append(" VFS FS driver : '" + this.fsDriver.getName() + "'" + sep); - sb.append( - " VFS Space driver : '" + this.spaceSystemDriver.getName() + "'" + sep); + sb.append(" VFS Space driver : '" + this.spaceSystemDriver.getName() + "'" + sep); sb.append(" -- DEFAULT VALUES --" + sep); sb.append(this.defValue); sb.append(" -- CAPABILITY --" + sep); @@ -1265,50 +1159,31 @@ private String makeSpaceFilePath() throws NamespaceException { return result; } - /** - * private SpaceSystem retrieveSpaceSystem() throws NamespaceException { - * SpaceSystem ss = null; try { ss = (SpaceSystem) - * (this.getSpaceSystemDriver()).newInstance(); } catch (NamespaceException - * ex) { log.error("Error while retrieving Space System Driver for VFS :" + - * this.aliasName, ex); throw new NamespaceException("Error while retrieving - * Space System Driver for VFS :" + this.aliasName, ex); } catch - * (IllegalAccessException ex) { log.error("Error while accessing Space System - * driver for VFS :" + this.aliasName, ex); throw new - * NamespaceException("Error while accessing Space System driver for VFS :" + - * this.aliasName, ex); } catch (InstantiationException ex) { log.error("Error - * while instancianging Space System driver for VFS :" + this.aliasName, ex); - * throw new NamespaceException( "Error while instancianging Space System - * driver for VFS :" + this.aliasName, ex); } return ss; } - **/ - - private Space createSpace(TSizeInBytes guarSize, TSizeInBytes totalSize, - LocalFile file, SpaceSystem spaceSystem) throws NamespaceException { + private Space createSpace(TSizeInBytes guarSize, TSizeInBytes totalSize, LocalFile file, + SpaceSystem spaceSystem) throws NamespaceException { Space space = null; try { space = new Space(guarSize, totalSize, file, spaceSystem); } catch (InvalidSpaceAttributesException ex3) { log.error("Error while retrieving Space System Driver for VFS ", ex3); - throw new NamespaceException( - "Error while retrieving Space System Driver for VFS ", ex3); + throw new NamespaceException("Error while retrieving Space System Driver for VFS ", ex3); } return space; } - public StorageSpaceData getSpaceByAlias(String desc) - throws NamespaceException { + public StorageSpaceData getSpaceByAlias(String desc) throws NamespaceException { // Retrieve Storage Space from Persistence - ReservedSpaceCatalog catalog = new ReservedSpaceCatalog(); + ReservedSpaceCatalog catalog = ReservedSpaceCatalog.getInstance(); StorageSpaceData spaceData = catalog.getStorageSpaceByAlias(desc); return spaceData; } - public void storeSpaceByToken(StorageSpaceData spaceData) - throws NamespaceException { + public void storeSpaceByToken(StorageSpaceData spaceData) throws NamespaceException { // Retrieve Storage Space from Persistence - ReservedSpaceCatalog catalog = new ReservedSpaceCatalog(); + ReservedSpaceCatalog catalog = ReservedSpaceCatalog.getInstance(); try { catalog.updateStorageSpace(spaceData); } catch (DataAccessException e) { @@ -1316,11 +1191,9 @@ public void storeSpaceByToken(StorageSpaceData spaceData) } } - public StoRI retrieveSpaceFileByPFN(PFN pfn, long totalSize) - throws NamespaceException { + public StoRI retrieveSpaceFileByPFN(PFN pfn, long totalSize) throws NamespaceException { - NamespaceInterface namespace = NamespaceDirector.getNamespace(); - StoRI stori = namespace.resolveStoRIbyPFN(pfn); + StoRI stori = Namespace.getInstance().resolveStoRIbyPFN(pfn); stori.setStoRIType(StoRIType.SPACE); // Create the Space istance log.debug("VFS: retrieveSpace, relative {}-{}", stori.getRelativePath(), stori); @@ -1344,8 +1217,7 @@ public TSpaceToken getSpaceToken() { return this.spaceToken; } - public BalancingStrategy getProtocolBalancingStrategy( - Protocol protocol) { + public BalancingStrategy getProtocolBalancingStrategy(Protocol protocol) { return this.capabilities.getBalancingStrategyByScheme(protocol); } @@ -1374,18 +1246,15 @@ public String getStorageAreaAuthzFixed() throws NamespaceException { if (getStorageAreaAuthzType().equals(SAAuthzType.FIXED)) { return saAuthzSourceName; } else { - throw new NamespaceException( - "Required FIXED-AUTHZ, but it is UNDEFINED."); + throw new NamespaceException("Required FIXED-AUTHZ, but it is UNDEFINED."); } } - public boolean increaseUsedSpace(long size) { - - return spaceUpdater.increaseUsedSpace(this, size); - } - - public boolean decreaseUsedSpace(long size) { + public SpaceUpdaterHelperInterface getSpaceUpdater() { - return spaceUpdater.decreaseUsedSpace(this, size); + if (spaceUpdater == null) { + spaceUpdater = SpaceUpdaterHelperFactory.getSpaceUpdaterHelper(this); + } + return spaceUpdater; } } diff --git a/src/main/java/it/grid/storm/namespace/naming/NamespaceUtil.java b/src/main/java/it/grid/storm/namespace/naming/NamespaceUtil.java index f25e0cd6f..8db9e9768 100644 --- a/src/main/java/it/grid/storm/namespace/naming/NamespaceUtil.java +++ b/src/main/java/it/grid/storm/namespace/naming/NamespaceUtil.java @@ -29,563 +29,563 @@ import com.google.common.collect.Lists; import it.grid.storm.griduser.VONameMatchingRule; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.VirtualFSInterface; import it.grid.storm.namespace.model.MappingRule; +import it.grid.storm.namespace.model.VirtualFS; import it.grid.storm.srm.types.TSURL; public class NamespaceUtil { - private static final Logger log = LoggerFactory.getLogger(NamespaceUtil.class); - - /** - * PRIVATE Constructor - */ - private NamespaceUtil() { - - } - - /** - * Compute the distance between two path. Return -1 when the two path are different completely. - * - * @param path1 String - * @param path2 String - * @return int - */ - public static int computeDistanceFromPath(String path1, String path2) { - - return (new Path(path1)).distance(new Path(path2)); - } - - /** - * Retrieve all path elements within path - * - * @param path String - * @return Collection - */ - public static List getPathElement(String path) { - - return (new Path(path)).getPathElements(); - } - - /** - * getFileName - * - * @param stfn String - * @return String - */ - public static String getFileName(String stfn) { - - if (stfn != null) { - if (stfn.endsWith(NamingConst.SEPARATOR)) { - return ""; - } else { - Path path = new Path(stfn); - int length = path.getLength(); - if (length > 0) { - PathElement elem = path.getElementAt(length - 1); - return elem.toString(); - } else { - return ""; - } - } - } else { - return ""; - } - } - - /** - * Return all the VFS residing on a specified path (mount-point) - * - * @param mountPointPath - * @return the set - */ - public static Collection getResidentVFS(String mountPointPath) { - - List vfsSet = NamespaceDirector.getNamespace().getAllDefinedVFS(); - for (VirtualFSInterface vfs : vfsSet) { - String vfsRootPath; - boolean enclosed; - - vfsRootPath = vfs.getRootPath(); - enclosed = NamespaceUtil.isEnclosed(mountPointPath, vfsRootPath); - if (!enclosed) { - vfsSet.remove(vfs); - } - } - return vfsSet; - } - - public static String consumeFileName(String file) { - - if (file != null) { - if (file.endsWith(NamingConst.SEPARATOR)) { - return file; - } else { - Path path = new Path(file); - int length = path.getLength(); - if (length > 1) { - return path.getSubPath(length - 1).getPath() + NamingConst.SEPARATOR; - } else { - return Path.PATH_SEPARATOR; - } - } - } else { - return Path.PATH_SEPARATOR; - } - } - - /** - * get - * - * @param stfn String - * @return String - */ - public static String getStFNPath(String stfn) { - - return consumeFileName(stfn); - } - - public static String consumeElement(String stfnPath) { - - Path path = new Path(stfnPath); - int length = path.getLength(); - if (length > 1) { - return path.getSubPath(length - 1).getPath() + NamingConst.SEPARATOR; - } else { - return ""; - } - } - - public static String extractRelativePath(String root, String absolute) { - - if (absolute.startsWith(root)) { - Path rootPath = new Path(root); - int rootLength = rootPath.getLength(); - - Path absPath = new Path(absolute); - List elem = Lists.newArrayList(); - - for (int i = 0; i < absPath.getLength(); i++) { - // Why use length and not compare single element? - if (i >= rootLength) { - elem.add(absPath.getElementAt(i)); - } - } - Path result = new Path(elem, false); - - return result.getPath(); - } else { - return absolute; - } - } - - /** - * Is the first path within the second one? - * - * @param root - * @param wrapperCandidate - * @return - */ - public static boolean isEnclosed(String root, String wrapperCandidate) { - - boolean result = false; - Path rootPath = new Path(root); - Path wrapperPath = new Path(wrapperCandidate); - result = rootPath.isEnclosed(wrapperPath); - return result; - } - - /** - * - * @param stfnPath - * @param vfsApproachable - * @return the mapped rule or null if not found - */ - public static MappingRule getWinnerRule(String stfnPath, Collection mappingRules, - Collection vfsApproachable) { - - Preconditions.checkNotNull(stfnPath, "Unable to get winning rule: invalid null stfnPath"); - Preconditions.checkNotNull(mappingRules, - "Unable to get winning rule: invalid null mapping rules"); - Preconditions.checkNotNull(vfsApproachable, - "Unable to get winning rule: invalid null VFS list"); - - if (mappingRules.isEmpty()) { - log.warn("Unable to get winning rule: empty mapping rules"); - return null; - } - - if (vfsApproachable.isEmpty()) { - log.debug("Unable to get winning rule: empty VFS list"); - return null; - } - - log.debug("Searching winner rule for {}", stfnPath); - MappingRule winnerRule = null; - - Vector rules = new Vector(mappingRules); - - int minDistance = Integer.MAX_VALUE; - for (MappingRule rule : rules) { - if (isEnclosed(rule.getStFNRoot(), stfnPath) - && vfsApproachable.contains(rule.getMappedFS())) { - int distance = computeDistanceFromPath(rule.getStFNRoot(), stfnPath); - if (distance < minDistance) { - minDistance = distance; - winnerRule = rule; - } - } - } - return winnerRule; - } - - public static MappingRule getWinnerRule(TSURL surl, Collection mappingRules, - Collection vfsApproachable) { - - return getWinnerRule(surl.sfn().stfn().toString(), mappingRules, vfsApproachable); - } - - public static VirtualFSInterface getWinnerVFS(String absolutePath, - Map vfsListByRootPath) throws NamespaceException { - - VirtualFSInterface vfsWinner = null; - int distance = Integer.MAX_VALUE; - for (String vfsRoot : vfsListByRootPath.keySet()) { - int d = computeDistanceFromPath(vfsRoot, absolutePath); - log.debug("Pondering VFS Root '{}' against '{}'. Distance = {}", vfsRoot, absolutePath, d); - if (d < distance) { - boolean enclosed = isEnclosed(vfsRoot, absolutePath); - if (enclosed) { - distance = d; - vfsWinner = vfsListByRootPath.get(vfsRoot); - log.debug("Partial winner is {} (VFS: {})", vfsRoot, vfsWinner.getAliasName()); - } - } - } - if (vfsWinner == null) { - log.error("Unable to found a VFS compatible with path: '{}'", absolutePath); - throw new NamespaceException( - "Unable to found a VFS compatible with path :'" + absolutePath + "'"); - } - return vfsWinner; - } - - public static String resolveVOName(String filename, - Map vfsListByRootPath) throws NamespaceException { - - VirtualFSInterface vfs = getWinnerVFS(filename, vfsListByRootPath); - /* NamespaceException raised if vfs is not found => vfs is not null */ - VONameMatchingRule rule = - vfs.getApproachableRules().get(0).getSubjectRules().getVONameMatchingRule(); - return rule.getVOName(); - } - - /** - * ===================== INNER CLASSES ====================== - */ - - /** - * - *

- * Title: - *

- * - *

- * Description: - *

- * - */ - static class PathElement { - - private final String pathChunk; - - public PathElement(String path) { - - this.pathChunk = path; - } - - public String getPathChunk() { - - return this.pathChunk; - } - - @Override - public int hashCode() { - - return this.pathChunk.hashCode(); - } - - @Override - public boolean equals(Object obj) { - - boolean result = true; - if (!(obj instanceof PathElement)) { - result = false; - } else { - PathElement other = (PathElement) obj; - result = (this.getPathChunk()).equals(other.getPathChunk()); - } - return result; - } - - @Override - public String toString() { - - return pathChunk; - } - } - - /** - * - *

- * Title: - *

- * - *

- * Description: - *

- * - */ - private static class Path { - - private List path; - private static String PATH_SEPARATOR = "/"; - public static final String[] EMPTY_STRING_ARRAY = {}; - public boolean directory; - public boolean absolutePath; - - public Path() { - - this.path = Lists.newArrayList(); - this.directory = false; - this.absolutePath = true; - } - - public Path(List path, boolean absolutePath) { - - this.path = path; - this.directory = false; - this.absolutePath = absolutePath; - } - - public Path(String path) { - - // Factorize path into array of PathElement... - if (path.startsWith(PATH_SEPARATOR)) { - this.absolutePath = true; - } else { - this.absolutePath = false; - } - if (path.endsWith(PATH_SEPARATOR)) { - this.directory = true; - } else { - this.directory = false; - } - - String[] pathElements = factorizePath(path); - if (pathElements != null) { - // ...and build Path - this.path = Lists.newArrayList(); - for (String pathElement : pathElements) { - addPathElement(new PathElement(pathElement)); - } - } - } - - public String[] factorizePath(String path) { - - return toStringArray(path, PATH_SEPARATOR); - } - - public List getPathElements() { - - List result = Lists.newArrayList(); - Iterator scan = path.iterator(); - while (scan.hasNext()) { - PathElement p = scan.next(); - result.add(p.toString()); - } - return result; - } - - private String[] toStringArray(String value, String delim) { - - if (value != null) { - return split(delim, value); - } else { - return EMPTY_STRING_ARRAY; - } - } - - private String[] split(String seperators, String list) { - - return split(seperators, list, false); - } - - private String[] split(String seperators, String list, boolean include) { - - StringTokenizer tokens = new StringTokenizer(list, seperators, include); - String[] result = new String[tokens.countTokens()]; - int i = 0; - while (tokens.hasMoreTokens()) { - result[i++] = tokens.nextToken(); - } - return result; - } - - public String getPath() { - - StringBuilder buf = new StringBuilder(); - if (this.absolutePath) { - buf.append(PATH_SEPARATOR); - } - for (Iterator iter = path.iterator(); iter.hasNext();) { - PathElement item = iter.next(); - buf.append(item.getPathChunk()); - if (iter.hasNext()) { - buf.append(PATH_SEPARATOR); - } - } - if (this.directory) { - buf.append(PATH_SEPARATOR); - } - return buf.toString(); - } - - public int getLength() { - - if (path != null) { - return path.size(); - } else { - return 0; - } - } - - /** - * - * @param position int - * @return PathElement - */ - public PathElement getElementAt(int position) { - - if (position < getLength()) { - return this.path.get(position); - } else { - return null; - } - } - - /** - * - * @param obj Object - * @return boolean - */ - @Override - public boolean equals(Object obj) { - - boolean result = true; - if (!(obj instanceof Path)) { - result = false; - } else { - Path other = (Path) obj; - if (other.getLength() != this.getLength()) { - result = false; - } else { - int size = this.getLength(); - for (int i = 0; i < size; i++) { - if (!(this.getElementAt(i)).equals(other.getElementAt(i))) { - result = false; - break; - } - } - } - } - return result; - } - - /** - * - * @param pathChunk PathElement - */ - public void addPathElement(PathElement pathChunk) { - - this.path.add(pathChunk); - } - - /** - * - * @param elements int - * @return Path - */ - public Path getSubPath(int elements) { - - Path result = new Path(); - for (int i = 0; i < elements; i++) { - result.addPathElement(this.getElementAt(i)); - } - return result; - } - - /** - * - * @param wrapperCandidate Path - * @return boolean - */ - public boolean isEnclosed(Path wrapperCandidate) { - - boolean result = false; - if (this.getLength() > wrapperCandidate.getLength()) { - result = false; - } else { - Path other = wrapperCandidate.getSubPath(this.getLength()); - result = other.equals(this); - } - return result; - } - - /** - * - * @param other Path - * @return int - */ - public int distance(Path other) { - - int result = -1; - Path a; - Path b; - if (this.getLength() > other.getLength()) { - a = this; - b = other; - } else { - a = other; - b = this; - } - if (b.isEnclosed(a)) { - result = (a.getLength() - b.getLength()); - } else { - result = a.getLength() + b.getLength(); - } - return result; - } - - /** - * - * @return String - */ - @Override - public String toString() { - - StringBuilder buf = new StringBuilder(); - buf.append("["); - for (int i = 0; i < this.getLength(); i++) { - buf.append(" "); - buf.append(this.getElementAt(i).getPathChunk()); - } - buf.append(" ]"); - return buf.toString(); - } - } + private static final Logger log = LoggerFactory.getLogger(NamespaceUtil.class); + + /** + * PRIVATE Constructor + */ + private NamespaceUtil() { + + } + + /** + * Compute the distance between two path. Return -1 when the two path are different completely. + * + * @param path1 String + * @param path2 String + * @return int + */ + public static int computeDistanceFromPath(String path1, String path2) { + + return (new Path(path1)).distance(new Path(path2)); + } + + /** + * Retrieve all path elements within path + * + * @param path String + * @return Collection + */ + public static List getPathElement(String path) { + + return (new Path(path)).getPathElements(); + } + + /** + * getFileName + * + * @param stfn String + * @return String + */ + public static String getFileName(String stfn) { + + if (stfn != null) { + if (stfn.endsWith(NamingConst.SEPARATOR)) { + return ""; + } else { + Path path = new Path(stfn); + int length = path.getLength(); + if (length > 0) { + PathElement elem = path.getElementAt(length - 1); + return elem.toString(); + } else { + return ""; + } + } + } else { + return ""; + } + } + + /** + * Return all the VFS residing on a specified path (mount-point) + * + * @param mountPointPath + * @return the set + */ + public static Collection getResidentVFS(String mountPointPath) { + + List vfsSet = Namespace.getInstance().getAllDefinedVFS(); + for (VirtualFS vfs : vfsSet) { + String vfsRootPath; + boolean enclosed; + + vfsRootPath = vfs.getRootPath(); + enclosed = NamespaceUtil.isEnclosed(mountPointPath, vfsRootPath); + if (!enclosed) { + vfsSet.remove(vfs); + } + } + return vfsSet; + } + + public static String consumeFileName(String file) { + + if (file != null) { + if (file.endsWith(NamingConst.SEPARATOR)) { + return file; + } else { + Path path = new Path(file); + int length = path.getLength(); + if (length > 1) { + return path.getSubPath(length - 1).getPath() + NamingConst.SEPARATOR; + } else { + return Path.PATH_SEPARATOR; + } + } + } else { + return Path.PATH_SEPARATOR; + } + } + + /** + * get + * + * @param stfn String + * @return String + */ + public static String getStFNPath(String stfn) { + + return consumeFileName(stfn); + } + + public static String consumeElement(String stfnPath) { + + Path path = new Path(stfnPath); + int length = path.getLength(); + if (length > 1) { + return path.getSubPath(length - 1).getPath() + NamingConst.SEPARATOR; + } else { + return ""; + } + } + + public static String extractRelativePath(String root, String absolute) { + + if (absolute.startsWith(root)) { + Path rootPath = new Path(root); + int rootLength = rootPath.getLength(); + + Path absPath = new Path(absolute); + List elem = Lists.newArrayList(); + + for (int i = 0; i < absPath.getLength(); i++) { + // Why use length and not compare single element? + if (i >= rootLength) { + elem.add(absPath.getElementAt(i)); + } + } + Path result = new Path(elem, false); + + return result.getPath(); + } else { + return absolute; + } + } + + /** + * Is the first path within the second one? + * + * @param root + * @param wrapperCandidate + * @return + */ + public static boolean isEnclosed(String root, String wrapperCandidate) { + + boolean result = false; + Path rootPath = new Path(root); + Path wrapperPath = new Path(wrapperCandidate); + result = rootPath.isEnclosed(wrapperPath); + return result; + } + + /** + * + * @param stfnPath + * @param vfsApproachable + * @return the mapped rule or null if not found + */ + public static MappingRule getWinnerRule(String stfnPath, Collection mappingRules, + Collection vfsApproachable) { + + Preconditions.checkNotNull(stfnPath, "Unable to get winning rule: invalid null stfnPath"); + Preconditions.checkNotNull(mappingRules, + "Unable to get winning rule: invalid null mapping rules"); + Preconditions.checkNotNull(vfsApproachable, + "Unable to get winning rule: invalid null VFS list"); + + if (mappingRules.isEmpty()) { + log.warn("Unable to get winning rule: empty mapping rules"); + return null; + } + + if (vfsApproachable.isEmpty()) { + log.debug("Unable to get winning rule: empty VFS list"); + return null; + } + + log.debug("Searching winner rule for {}", stfnPath); + MappingRule winnerRule = null; + + Vector rules = new Vector(mappingRules); + + int minDistance = Integer.MAX_VALUE; + for (MappingRule rule : rules) { + if (isEnclosed(rule.getStFNRoot(), stfnPath) + && vfsApproachable.contains(rule.getMappedFS())) { + int distance = computeDistanceFromPath(rule.getStFNRoot(), stfnPath); + if (distance < minDistance) { + minDistance = distance; + winnerRule = rule; + } + } + } + return winnerRule; + } + + public static MappingRule getWinnerRule(TSURL surl, Collection mappingRules, + Collection vfsApproachable) { + + return getWinnerRule(surl.sfn().stfn().toString(), mappingRules, vfsApproachable); + } + + public static VirtualFS getWinnerVFS(String absolutePath, + Map vfsListByRootPath) throws NamespaceException { + + VirtualFS vfsWinner = null; + int distance = Integer.MAX_VALUE; + for (String vfsRoot : vfsListByRootPath.keySet()) { + int d = computeDistanceFromPath(vfsRoot, absolutePath); + log.debug("Pondering VFS Root '{}' against '{}'. Distance = {}", vfsRoot, absolutePath, d); + if (d < distance) { + boolean enclosed = isEnclosed(vfsRoot, absolutePath); + if (enclosed) { + distance = d; + vfsWinner = vfsListByRootPath.get(vfsRoot); + log.debug("Partial winner is {} (VFS: {})", vfsRoot, vfsWinner.getAliasName()); + } + } + } + if (vfsWinner == null) { + log.error("Unable to found a VFS compatible with path: '{}'", absolutePath); + throw new NamespaceException( + "Unable to found a VFS compatible with path :'" + absolutePath + "'"); + } + return vfsWinner; + } + + public static String resolveVOName(String filename, Map vfsListByRootPath) + throws NamespaceException { + + VirtualFS vfs = getWinnerVFS(filename, vfsListByRootPath); + /* NamespaceException raised if vfs is not found => vfs is not null */ + VONameMatchingRule rule = + vfs.getApproachableRules().get(0).getSubjectRules().getVONameMatchingRule(); + return rule.getVOName(); + } + + /** + * ===================== INNER CLASSES ====================== + */ + + /** + * + *

+ * Title: + *

+ * + *

+ * Description: + *

+ * + */ + static class PathElement { + + private final String pathChunk; + + public PathElement(String path) { + + this.pathChunk = path; + } + + public String getPathChunk() { + + return this.pathChunk; + } + + @Override + public int hashCode() { + + return this.pathChunk.hashCode(); + } + + @Override + public boolean equals(Object obj) { + + boolean result = true; + if (!(obj instanceof PathElement)) { + result = false; + } else { + PathElement other = (PathElement) obj; + result = (this.getPathChunk()).equals(other.getPathChunk()); + } + return result; + } + + @Override + public String toString() { + + return pathChunk; + } + } + + /** + * + *

+ * Title: + *

+ * + *

+ * Description: + *

+ * + */ + private static class Path { + + private List path; + private static String PATH_SEPARATOR = "/"; + public static final String[] EMPTY_STRING_ARRAY = {}; + public boolean directory; + public boolean absolutePath; + + public Path() { + + this.path = Lists.newArrayList(); + this.directory = false; + this.absolutePath = true; + } + + public Path(List path, boolean absolutePath) { + + this.path = path; + this.directory = false; + this.absolutePath = absolutePath; + } + + public Path(String path) { + + // Factorize path into array of PathElement... + if (path.startsWith(PATH_SEPARATOR)) { + this.absolutePath = true; + } else { + this.absolutePath = false; + } + if (path.endsWith(PATH_SEPARATOR)) { + this.directory = true; + } else { + this.directory = false; + } + + String[] pathElements = factorizePath(path); + if (pathElements != null) { + // ...and build Path + this.path = Lists.newArrayList(); + for (String pathElement : pathElements) { + addPathElement(new PathElement(pathElement)); + } + } + } + + public String[] factorizePath(String path) { + + return toStringArray(path, PATH_SEPARATOR); + } + + public List getPathElements() { + + List result = Lists.newArrayList(); + Iterator scan = path.iterator(); + while (scan.hasNext()) { + PathElement p = scan.next(); + result.add(p.toString()); + } + return result; + } + + private String[] toStringArray(String value, String delim) { + + if (value != null) { + return split(delim, value); + } else { + return EMPTY_STRING_ARRAY; + } + } + + private String[] split(String seperators, String list) { + + return split(seperators, list, false); + } + + private String[] split(String seperators, String list, boolean include) { + + StringTokenizer tokens = new StringTokenizer(list, seperators, include); + String[] result = new String[tokens.countTokens()]; + int i = 0; + while (tokens.hasMoreTokens()) { + result[i++] = tokens.nextToken(); + } + return result; + } + + public String getPath() { + + StringBuilder buf = new StringBuilder(); + if (this.absolutePath) { + buf.append(PATH_SEPARATOR); + } + for (Iterator iter = path.iterator(); iter.hasNext();) { + PathElement item = iter.next(); + buf.append(item.getPathChunk()); + if (iter.hasNext()) { + buf.append(PATH_SEPARATOR); + } + } + if (this.directory) { + buf.append(PATH_SEPARATOR); + } + return buf.toString(); + } + + public int getLength() { + + if (path != null) { + return path.size(); + } else { + return 0; + } + } + + /** + * + * @param position int + * @return PathElement + */ + public PathElement getElementAt(int position) { + + if (position < getLength()) { + return this.path.get(position); + } else { + return null; + } + } + + /** + * + * @param obj Object + * @return boolean + */ + @Override + public boolean equals(Object obj) { + + boolean result = true; + if (!(obj instanceof Path)) { + result = false; + } else { + Path other = (Path) obj; + if (other.getLength() != this.getLength()) { + result = false; + } else { + int size = this.getLength(); + for (int i = 0; i < size; i++) { + if (!(this.getElementAt(i)).equals(other.getElementAt(i))) { + result = false; + break; + } + } + } + } + return result; + } + + /** + * + * @param pathChunk PathElement + */ + public void addPathElement(PathElement pathChunk) { + + this.path.add(pathChunk); + } + + /** + * + * @param elements int + * @return Path + */ + public Path getSubPath(int elements) { + + Path result = new Path(); + for (int i = 0; i < elements; i++) { + result.addPathElement(this.getElementAt(i)); + } + return result; + } + + /** + * + * @param wrapperCandidate Path + * @return boolean + */ + public boolean isEnclosed(Path wrapperCandidate) { + + boolean result = false; + if (this.getLength() > wrapperCandidate.getLength()) { + result = false; + } else { + Path other = wrapperCandidate.getSubPath(this.getLength()); + result = other.equals(this); + } + return result; + } + + /** + * + * @param other Path + * @return int + */ + public int distance(Path other) { + + int result = -1; + Path a; + Path b; + if (this.getLength() > other.getLength()) { + a = this; + b = other; + } else { + a = other; + b = this; + } + if (b.isEnclosed(a)) { + result = (a.getLength() - b.getLength()); + } else { + result = a.getLength() + b.getLength(); + } + return result; + } + + /** + * + * @return String + */ + @Override + public String toString() { + + StringBuilder buf = new StringBuilder(); + buf.append("["); + for (int i = 0; i < this.getLength(); i++) { + buf.append(" "); + buf.append(this.getElementAt(i).getPathChunk()); + } + buf.append(" ]"); + return buf.toString(); + } + } } diff --git a/src/main/java/it/grid/storm/namespace/naming/NamingConst.java b/src/main/java/it/grid/storm/namespace/naming/NamingConst.java index 0d502f08d..03d473ad5 100644 --- a/src/main/java/it/grid/storm/namespace/naming/NamingConst.java +++ b/src/main/java/it/grid/storm/namespace/naming/NamingConst.java @@ -17,6 +17,8 @@ package it.grid.storm.namespace.naming; +import java.net.UnknownHostException; + import it.grid.storm.config.Configuration; public class NamingConst { @@ -45,14 +47,14 @@ private NamingConst() { config = Configuration.getInstance(); } - public static String getServiceDefaultHost() { + public static String getServiceDefaultHost() throws UnknownHostException { - return instance.config.getServiceHostname(); + return instance.config.getSrmServiceHostname(); } public static int getServicePort() { - return instance.config.getServicePort(); + return instance.config.getSrmServicePort(); } public static String getServiceSFNQueryPrefix() { diff --git a/src/main/java/it/grid/storm/namespace/naming/SRMURL.java b/src/main/java/it/grid/storm/namespace/naming/SRMURL.java index c0cf56416..4e71add51 100644 --- a/src/main/java/it/grid/storm/namespace/naming/SRMURL.java +++ b/src/main/java/it/grid/storm/namespace/naming/SRMURL.java @@ -123,22 +123,6 @@ public String getSURLType() { return surlType.toString(); } - /** - * Returns true if the hostname of this srmurl is the one specified in - * configuration file field "storm.service.FE-public.hostname" - * - * @return - */ - public boolean isLocal() { - - if (local == -1) { - localSURL = getServiceHostname().equals( - NamingConst.getServiceDefaultHost()); - local = 1; - } - return localSURL; - } - public boolean isQueriedFormSURL() { if (surlType == null) { diff --git a/src/main/java/it/grid/storm/namespace/naming/SURL.java b/src/main/java/it/grid/storm/namespace/naming/SURL.java index 2c008748a..acae9165e 100644 --- a/src/main/java/it/grid/storm/namespace/naming/SURL.java +++ b/src/main/java/it/grid/storm/namespace/naming/SURL.java @@ -17,269 +17,237 @@ package it.grid.storm.namespace.naming; -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.model.Protocol; - import java.net.URI; -import java.util.ArrayList; +import java.util.Set; import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Sets; + +import it.grid.storm.namespace.NamespaceException; +import it.grid.storm.namespace.model.Protocol; -/** - *

- * Title: - *

- * - *

- * Description: - *

- * - *

- * Copyright: Copyright (c) 2006 - *

- * - *

- * Company: INFN-CNAF and ICTP/eGrid project - *

- * - * @author Riccardo Zappi - * @version 1.0 - */ public class SURL extends SRMURL { - private static Logger log = NamespaceDirector.getLogger(); - private static ArrayList schemes = new ArrayList(); - - static { - schemes.add("srm"); - } - - public final boolean directory; - - private SURL(final String hostName, final int port, - final String serviceEndpoint, final String queryString) { - - super(Protocol.SRM, hostName, port, serviceEndpoint, queryString); - directory = checkDirectory(queryString); - } - - private SURL(final String hostName, final int port, final String stfn) { - - super(Protocol.SRM, hostName, port, stfn); - directory = checkDirectory(stfn); - } - - // TODO MICHELE USER_SURL debug - public SURL(final String stfn) { - - super(Protocol.SRM, NamingConst.getServiceDefaultHost(), NamingConst - .getServicePort(), stfn); - directory = checkDirectory(stfn); - } - - /** - * Build SURL from the string format. Many control will be executed in the - * string format No other way to create a SURL, if u got a SURL for sure it's - * a valid URI normalized - * - * @param surlString - * String - * @return SURL - */ - public static SURL makeSURLfromString(String surlString) - throws NamespaceException { - - SURL result = null; - - // checks if is a valid uri and normalize - URI uri = null; - try { - uri = URI.create(surlString); - } catch (IllegalArgumentException uriEx) { - throw new NamespaceException("SURL_String :'" + surlString - + "' is INVALID. Reason: URI Except: " + uriEx.getMessage()); - } catch (NullPointerException npe) { - throw new NamespaceException("SURL_String :'" + surlString - + "' is INVALID. Reason: URI Except (null SURL): " + npe.getMessage()); - } - - // Check the scheme - // uri should be not null - String scheme = uri.getScheme(); - if (!(schemes.contains(scheme))) { - throw new NamespaceException("SURL_String :'" + surlString - + "' is INVALID. Reason: unknown scheme '" + scheme + "'"); - } - - // Check the query - String host = uri.getHost(); - if (host == null) { - throw new NamespaceException("SURL_String :'" + surlString - + "' is INVALID. Reason: malformed host!"); - } - int port = uri.getPort(); - String query = uri.getQuery(); - if (query == null || query.trim().equals("")) { - String stfn = uri.getPath(); - result = new SURL(host, port, stfn); - } else { - // The SURL_Str is in a Query FORM. - log.debug(" !! SURL ('" + surlString + "') in a query form (query:'" - + query + "') !!"); - String service = uri.getPath(); - log.debug(" Service endpoint : " + service); - if (checkQuery(query)) { - log.debug(" Query is in a valid form."); - // Extract the StFN from query: - String stfn = extractStFNfromQuery(query); - result = new SURL(host, port, service, stfn); - } else { - log.warn("SURL_String :'" + surlString - + "' is not VALID! (query is in invalid form)"); - throw new NamespaceException("SURL_String :'" + surlString - + "' is not VALID within the Query!"); - } - } - return result; - } - - public String getQueryFormAsString() { - if (this.isNormalFormSURL()) { - String uriString = transfProtocol.getProtocol().getSchema() + "://" - + this.transfProtocol.getAuthority().getServiceHostname(); - if (this.transfProtocol.getAuthority().getServicePort() >= 0) { - uriString += ":" + this.transfProtocol.getAuthority().getServicePort(); - } - uriString += "/srm/managerv2?SFN=" + this.path; - return uriString; - } - return this.getSURLAsURIString(); - } - - public String getNormalFormAsString() { - if (this.isQueriedFormSURL()) { - String uriString = transfProtocol.getProtocol().getSchema() + "://" - + this.transfProtocol.getAuthority().getServiceHostname(); - if (this.transfProtocol.getAuthority().getServicePort() >= 0) { - uriString += ":" + this.transfProtocol.getAuthority().getServicePort(); - } - uriString += this.getStFN(); - return uriString; - } - return this.getSURLAsURIString(); - } - - public boolean isDirectory() { - - return directory; - } - - private boolean checkDirectory(String path) { - - if (path != null && path.endsWith(NamingConst.SEPARATOR)) { - return true; - } else { - return false; - } - } - - /** - * - * Checks if the query string begins with the correct prefix ("SFN=") - * - * @param query - * @return - */ - private static boolean checkQuery(String query) { - - if (query == null) { - log.error("Received a null query to check!"); - return false; - } - return query.startsWith(NamingConst.getServiceSFNQueryPrefix() + "="); - } - - private static String extractStFNfromQuery(String query) { - - String stfn = ""; - if (query == null) { - return stfn; - } else { - int len = query.length(); - if (len < 4) { - return stfn; - } else { - stfn = query.substring(4); - } - } - return stfn; - } - - /** - * get the path and query string e.g. /path/service?SFN=pippo.txt if query - * form e.g /path/pippo.txt if simple form - * - * @return the path and its query string - */ - public String getPathQuery() { - - StringBuilder sb = new StringBuilder(250); - sb.append(getPath()); - if (this.isQueriedFormSURL()) { - sb.append("?"); - sb.append(NamingConst.getServiceSFNQueryPrefix()); - sb.append("="); - sb.append(getQueryString()); - } - return sb.toString(); - } - - public String getSURLAsURIString() { - - String uriString = transfProtocol.getProtocol().getSchema() + "://" - + this.transfProtocol.getAuthority().getServiceHostname(); - if (this.transfProtocol.getAuthority().getServicePort() >= 0) { - uriString += ":" + this.transfProtocol.getAuthority().getServicePort(); - } - if (this.isNormalFormSURL()) { - uriString += this.path; - } else { - uriString += this.getPathQuery(); - } - return uriString; - } - - @Override - public String toString() { - - StringBuilder buffer = new StringBuilder(); - buffer.append(this.transfProtocol.toString()); - buffer.append(this.getPathQuery()); - return buffer.toString(); - } - - @Override - public int hashCode() { - - int result = super.hashCode(); - result += 37 * schemes.hashCode() + 63 * (directory ? 1 : 0); - return result; - } - - /* - * - */ - @Override - public boolean equals(Object obj) { - - if (!super.equals(obj)) - return false; - if (!(obj instanceof SURL)) - return false; - SURL other = (SURL) obj; - if (directory != other.directory) - return false; - return true; - } + private static Logger log = LoggerFactory.getLogger(SURL.class); + private static Set schemes = Sets.newHashSet("srm"); + private static Set servicePaths = Sets.newHashSet("/srm/managerv2", "/srm/managerv1"); + + public final boolean directory; + + public SURL(final String hostName, final int port, final String serviceEndpoint, + final String queryString) { + + super(Protocol.SRM, hostName, port, serviceEndpoint, queryString); + directory = checkDirectory(queryString); + } + + public SURL(final String hostName, final int port, final String stfn) { + + super(Protocol.SRM, hostName, port, stfn); + directory = checkDirectory(stfn); + } + + /** + * Build SURL from the string format. Many control will be executed in the string format No other + * way to create a SURL, if u got a SURL for sure it's a valid URI normalized + * + * @param surlString String + * @return SURL + */ + public static SURL makeSURLfromString(String surlString) throws NamespaceException { + + SURL result = null; + String errorPrefix = "SURL_String :'" + surlString + "' is INVALID. Reason: "; + + // checks if is a valid URI and normalize + URI uri = null; + try { + uri = URI.create(surlString); + } catch (IllegalArgumentException uriEx) { + throw new NamespaceException(errorPrefix + "URI Except: " + uriEx.getMessage()); + } catch (NullPointerException npe) { + throw new NamespaceException(errorPrefix + "URI Except (null SURL): " + npe.getMessage()); + } + + // Check scheme + String scheme = uri.getScheme(); + if (!(schemes.contains(scheme))) { + throw new NamespaceException(errorPrefix + "Unknown scheme '" + scheme + "'"); + } + + // Check host + String host = uri.getHost(); + if (host == null) { + throw new NamespaceException(errorPrefix + "Malformed host"); + } + int port = uri.getPort(); + // Check if query form + String query = uri.getQuery(); + if (query == null || query.trim().equals("")) { + // The SURL is in a normal form + String stfn = uri.getPath(); + result = new SURL(host, port, stfn); + } else { + // The SURL is in a Query form + log.debug(" !! SURL ('" + surlString + "') in a query form (query:'" + query + "') !!"); + // Check web service path + String service = uri.getPath(); + log.debug(" Service endpoint : " + service); + if (!(servicePaths.contains(service))) { + throw new NamespaceException(errorPrefix + "Unknown web service path '" + service + "'"); + } + // Check query + if (checkQuery(query)) { + log.debug(" Query is in a valid form."); + // Extract the StFN from query: + String stfn = extractStFNfromQuery(query); + result = new SURL(host, port, service, stfn); + } else { + log.warn(errorPrefix + "Invalid query form"); + throw new NamespaceException(errorPrefix + "Invalid query form"); + } + } + return result; + } + + public String getQueryFormAsString() { + if (this.isNormalFormSURL()) { + String uriString = transfProtocol.getProtocol().getSchema() + "://" + + this.transfProtocol.getAuthority().getServiceHostname(); + if (this.transfProtocol.getAuthority().getServicePort() >= 0) { + uriString += ":" + this.transfProtocol.getAuthority().getServicePort(); + } + uriString += "/srm/managerv2?SFN=" + this.path; + return uriString; + } + return this.getSURLAsURIString(); + } + + public String getNormalFormAsString() { + if (this.isQueriedFormSURL()) { + String uriString = transfProtocol.getProtocol().getSchema() + "://" + + this.transfProtocol.getAuthority().getServiceHostname(); + if (this.transfProtocol.getAuthority().getServicePort() >= 0) { + uriString += ":" + this.transfProtocol.getAuthority().getServicePort(); + } + uriString += this.getStFN(); + return uriString; + } + return this.getSURLAsURIString(); + } + + public boolean isDirectory() { + + return directory; + } + + private boolean checkDirectory(String path) { + + if (path != null && path.endsWith(NamingConst.SEPARATOR)) { + return true; + } else { + return false; + } + } + + /** + * + * Checks if the query string begins with the correct prefix ("SFN=") + * + * @param query + * @return + */ + private static boolean checkQuery(String query) { + + if (query == null) { + log.error("Received a null query to check!"); + return false; + } + return query.startsWith(NamingConst.getServiceSFNQueryPrefix() + "="); + } + + private static String extractStFNfromQuery(String query) { + + String stfn = ""; + if (query == null) { + return stfn; + } else { + int len = query.length(); + if (len < 4) { + return stfn; + } else { + stfn = query.substring(4); + } + } + return stfn; + } + + /** + * get the path and query string e.g. /path/service?SFN=pippo.txt if query form e.g + * /path/pippo.txt if simple form + * + * @return the path and its query string + */ + public String getPathQuery() { + + StringBuilder sb = new StringBuilder(250); + sb.append(getPath()); + if (this.isQueriedFormSURL()) { + sb.append("?"); + sb.append(NamingConst.getServiceSFNQueryPrefix()); + sb.append("="); + sb.append(getQueryString()); + } + return sb.toString(); + } + + public String getSURLAsURIString() { + + String uriString = transfProtocol.getProtocol().getSchema() + "://" + + this.transfProtocol.getAuthority().getServiceHostname(); + if (this.transfProtocol.getAuthority().getServicePort() >= 0) { + uriString += ":" + this.transfProtocol.getAuthority().getServicePort(); + } + if (this.isNormalFormSURL()) { + uriString += this.path; + } else { + uriString += this.getPathQuery(); + } + return uriString; + } + + @Override + public String toString() { + + StringBuilder buffer = new StringBuilder(); + buffer.append(this.transfProtocol.toString()); + buffer.append(this.getPathQuery()); + return buffer.toString(); + } + + @Override + public int hashCode() { + + int result = super.hashCode(); + result += 37 * schemes.hashCode() + 63 * (directory ? 1 : 0); + return result; + } + + /* + * + */ + @Override + public boolean equals(Object obj) { + + if (!super.equals(obj)) + return false; + if (!(obj instanceof SURL)) + return false; + SURL other = (SURL) obj; + if (directory != other.directory) + return false; + return true; + } } diff --git a/src/main/java/it/grid/storm/namespace/remote/Constants.java b/src/main/java/it/grid/storm/namespace/remote/Constants.java deleted file mode 100644 index 2651ddb16..000000000 --- a/src/main/java/it/grid/storm/namespace/remote/Constants.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS - * OF ANY KIND, either express or implied. See the License for the specific - * language governing permissions and limitations under the License. - */ -package it.grid.storm.namespace.remote; - -/** - * @author Michele Dibenedetto - */ -public class Constants { - - public static final String ENCODING_SCHEME = "UTF-8"; - public static final String RESOURCE = "configuration"; - public static final String VERSION_1_0 = "1.0"; - public static final String VERSION_1_1 = "1.1"; - public static final String VERSION_1_2 = "1.2"; - public static final String VERSION = "1.3"; - public static final String LIST_ALL_KEY = "StorageAreaList"; - public static final char VFS_LIST_SEPARATOR = ':'; - public static final String VFS_NAME_KEY = "name"; - public static final char VFS_FIELD_MATCHER = '='; - public static final char VFS_FIELD_SEPARATOR = '&'; - public static final String VFS_ROOT_KEY = "root"; - public static final String VFS_STFN_ROOT_KEY = "stfnRoot"; - public static final char VFS_STFN_ROOT_SEPARATOR = ';'; - public static final String VFS_ENABLED_PROTOCOLS_KEY = "protocols"; - public static final char VFS_ENABLED_PROTOCOLS_SEPARATOR = ';'; - public static final String VFS_ANONYMOUS_PERMS_KEY = "anonymous"; - public static final String LIST_ALL_VFS = "VirtualFSList"; - - public static enum HttpPerms { NOREAD, READ, READWRITE }; - -} diff --git a/src/main/java/it/grid/storm/namespace/remote/resource/VirtualFSResourceCompat_1_0.java b/src/main/java/it/grid/storm/namespace/remote/resource/VirtualFSResourceCompat_1_0.java deleted file mode 100644 index b18da5365..000000000 --- a/src/main/java/it/grid/storm/namespace/remote/resource/VirtualFSResourceCompat_1_0.java +++ /dev/null @@ -1,80 +0,0 @@ -package it.grid.storm.namespace.remote.resource; - -import static javax.ws.rs.core.Response.Status.INTERNAL_SERVER_ERROR; - -import java.util.List; - -import javax.ws.rs.GET; -import javax.ws.rs.Path; -import javax.ws.rs.Produces; -import javax.ws.rs.WebApplicationException; -import javax.ws.rs.core.Response; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.VirtualFSInterface; -import it.grid.storm.namespace.model.MappingRule; -import it.grid.storm.namespace.remote.Constants; - -/** - * @author Michele Dibenedetto - */ -@Path("/" + Constants.RESOURCE + "/" + Constants.VERSION_1_0) -public class VirtualFSResourceCompat_1_0 { - - private static final Logger log = LoggerFactory.getLogger(VirtualFSResourceCompat_1_0.class); - - /** - * @return - */ - @GET - @Path("/" + Constants.LIST_ALL_KEY) - @Produces("text/plain") - public String listVFS() { - - log.info("Serving VFS resource listing"); - String vfsListString = ""; - List vfsCollection = NamespaceDirector.getNamespace().getAllDefinedVFS(); - for (VirtualFSInterface vfs : vfsCollection) { - if (!vfsListString.equals("")) { - vfsListString += Constants.VFS_LIST_SEPARATOR; - } - try { - vfsListString += encodeVFS(vfs); - } catch (NamespaceException e) { - log.error("Unable to encode the virtual file system. NamespaceException : {}", - e.getMessage()); - throw new WebApplicationException(Response.status(INTERNAL_SERVER_ERROR) - .entity("Unable to encode the virtual file system") - .build()); - } - } - return vfsListString; - } - - /** - * @param vfs - * @return - * @throws NamespaceException - */ - private String encodeVFS(VirtualFSInterface vfs) throws NamespaceException { - - String vfsEncoded = Constants.VFS_NAME_KEY + Constants.VFS_FIELD_MATCHER + vfs.getAliasName(); - vfsEncoded += Constants.VFS_FIELD_SEPARATOR; - vfsEncoded += Constants.VFS_ROOT_KEY + Constants.VFS_FIELD_MATCHER + vfs.getRootPath(); - vfsEncoded += Constants.VFS_FIELD_SEPARATOR; - List mappingRules = vfs.getMappingRules(); - vfsEncoded += Constants.VFS_STFN_ROOT_KEY + Constants.VFS_FIELD_MATCHER; - for (int i = 0; i < mappingRules.size(); i++) { - MappingRule mappingRule = mappingRules.get(i); - if (i > 0) { - vfsEncoded += Constants.VFS_STFN_ROOT_SEPARATOR; - } - vfsEncoded += mappingRule.getStFNRoot(); - } - return vfsEncoded; - } -} diff --git a/src/main/java/it/grid/storm/namespace/remote/resource/VirtualFSResourceCompat_1_1.java b/src/main/java/it/grid/storm/namespace/remote/resource/VirtualFSResourceCompat_1_1.java deleted file mode 100644 index 7d539251e..000000000 --- a/src/main/java/it/grid/storm/namespace/remote/resource/VirtualFSResourceCompat_1_1.java +++ /dev/null @@ -1,95 +0,0 @@ -package it.grid.storm.namespace.remote.resource; - -import static javax.ws.rs.core.Response.Status.INTERNAL_SERVER_ERROR; - -import java.util.Iterator; -import java.util.List; - -import javax.ws.rs.GET; -import javax.ws.rs.Path; -import javax.ws.rs.Produces; -import javax.ws.rs.WebApplicationException; -import javax.ws.rs.core.Response; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.VirtualFSInterface; -import it.grid.storm.namespace.model.MappingRule; -import it.grid.storm.namespace.model.Protocol; -import it.grid.storm.namespace.remote.Constants; - -/** - * @author Michele Dibenedetto - */ -@Path("/" + Constants.RESOURCE + "/" + Constants.VERSION_1_1) -public class VirtualFSResourceCompat_1_1 { - - private static final Logger log = LoggerFactory.getLogger(VirtualFSResourceCompat_1_1.class); - - /** - * @return - */ - @GET - @Path("/" + Constants.LIST_ALL_KEY) - @Produces("text/plain") - public String listVFS() { - - log.info("Serving VFS resource listing"); - String vfsListString = ""; - List vfsCollection = NamespaceDirector.getNamespace().getAllDefinedVFS(); - for (VirtualFSInterface vfs : vfsCollection) { - if (!vfsListString.equals("")) { - vfsListString += Constants.VFS_LIST_SEPARATOR; - } - try { - vfsListString += encodeVFS(vfs); - } catch (NamespaceException e) { - log.error("Unable to encode the virtual file system. NamespaceException : {}", - e.getMessage()); - throw new WebApplicationException(Response.status(INTERNAL_SERVER_ERROR) - .entity("Unable to encode the virtual file system") - .build()); - } - } - return vfsListString; - } - - /** - * @param vfs - * @return - * @throws NamespaceException - */ - private String encodeVFS(VirtualFSInterface vfs) throws NamespaceException { - - String vfsEncoded = Constants.VFS_NAME_KEY + Constants.VFS_FIELD_MATCHER + vfs.getAliasName(); - vfsEncoded += Constants.VFS_FIELD_SEPARATOR; - vfsEncoded += Constants.VFS_ROOT_KEY + Constants.VFS_FIELD_MATCHER + vfs.getRootPath(); - vfsEncoded += Constants.VFS_FIELD_SEPARATOR; - List mappingRules = vfs.getMappingRules(); - vfsEncoded += Constants.VFS_STFN_ROOT_KEY + Constants.VFS_FIELD_MATCHER; - for (int i = 0; i < mappingRules.size(); i++) { - MappingRule mappingRule = mappingRules.get(i); - if (i > 0) { - vfsEncoded += Constants.VFS_STFN_ROOT_SEPARATOR; - } - vfsEncoded += mappingRule.getStFNRoot(); - } - Iterator protocolsIterator = - vfs.getCapabilities().getAllManagedProtocols().iterator(); - if (protocolsIterator.hasNext()) { - vfsEncoded += Constants.VFS_FIELD_SEPARATOR; - vfsEncoded += Constants.VFS_ENABLED_PROTOCOLS_KEY; - vfsEncoded += Constants.VFS_FIELD_MATCHER; - } - while (protocolsIterator.hasNext()) { - vfsEncoded += protocolsIterator.next().getSchema(); - if (protocolsIterator.hasNext()) { - vfsEncoded += Constants.VFS_ENABLED_PROTOCOLS_SEPARATOR; - } - } - return vfsEncoded; - } -} diff --git a/src/main/java/it/grid/storm/namespace/remote/resource/VirtualFSResourceCompat_1_2.java b/src/main/java/it/grid/storm/namespace/remote/resource/VirtualFSResourceCompat_1_2.java deleted file mode 100644 index f65a927f7..000000000 --- a/src/main/java/it/grid/storm/namespace/remote/resource/VirtualFSResourceCompat_1_2.java +++ /dev/null @@ -1,110 +0,0 @@ -package it.grid.storm.namespace.remote.resource; - -import static it.grid.storm.namespace.remote.Constants.VFS_LIST_SEPARATOR; -import static java.lang.String.join; -import static java.lang.String.valueOf; -import static javax.ws.rs.core.Response.Status.INTERNAL_SERVER_ERROR; - -import java.util.Iterator; -import java.util.List; - -import javax.ws.rs.GET; -import javax.ws.rs.Path; -import javax.ws.rs.Produces; -import javax.ws.rs.WebApplicationException; -import javax.ws.rs.core.Response; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.collect.Lists; - -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.VirtualFSInterface; -import it.grid.storm.namespace.model.MappingRule; -import it.grid.storm.namespace.model.Protocol; -import it.grid.storm.namespace.remote.Constants; -import it.grid.storm.namespace.remote.Constants.HttpPerms; - -/** - * @author Michele Dibenedetto - */ -@Path("/" + Constants.RESOURCE + "/" + Constants.VERSION_1_2) -public class VirtualFSResourceCompat_1_2 { - - private static final Logger log = LoggerFactory.getLogger(VirtualFSResourceCompat_1_2.class); - - /** - * @return - */ - @GET - @Path("/" + Constants.LIST_ALL_KEY) - @Produces("text/plain") - public String listVFS() { - - log.info("Serving VFS resource listing"); - List vfsCollection = NamespaceDirector.getNamespace().getAllDefinedVFS(); - List encodedVFSs = Lists.newArrayList(); - vfsCollection.forEach(vfs -> { - try { - encodedVFSs.add(encodeVFS(vfs)); - } catch (NamespaceException e) { - log.error( - "Unable to encode the virtual file system. NamespaceException : {}", e.getMessage()); - throw new WebApplicationException(Response.status(INTERNAL_SERVER_ERROR) - .entity("Unable to encode the virtual file system") - .build()); - } - }); - return join(valueOf(VFS_LIST_SEPARATOR), encodedVFSs); - } - - /** - * @param vfs - * @return - * @throws NamespaceException - */ - private String encodeVFS(VirtualFSInterface vfs) throws NamespaceException { - - String vfsEncoded = Constants.VFS_NAME_KEY + Constants.VFS_FIELD_MATCHER + vfs.getAliasName(); - vfsEncoded += Constants.VFS_FIELD_SEPARATOR; - vfsEncoded += Constants.VFS_ROOT_KEY + Constants.VFS_FIELD_MATCHER + vfs.getRootPath(); - vfsEncoded += Constants.VFS_FIELD_SEPARATOR; - List mappingRules = vfs.getMappingRules(); - vfsEncoded += Constants.VFS_STFN_ROOT_KEY + Constants.VFS_FIELD_MATCHER; - for (int i = 0; i < mappingRules.size(); i++) { - MappingRule mappingRule = mappingRules.get(i); - if (i > 0) { - vfsEncoded += Constants.VFS_STFN_ROOT_SEPARATOR; - } - vfsEncoded += mappingRule.getStFNRoot(); - } - Iterator protocolsIterator = - vfs.getCapabilities().getAllManagedProtocols().iterator(); - if (protocolsIterator.hasNext()) { - vfsEncoded += Constants.VFS_FIELD_SEPARATOR; - vfsEncoded += Constants.VFS_ENABLED_PROTOCOLS_KEY; - vfsEncoded += Constants.VFS_FIELD_MATCHER; - } - while (protocolsIterator.hasNext()) { - vfsEncoded += protocolsIterator.next().getSchema(); - if (protocolsIterator.hasNext()) { - vfsEncoded += Constants.VFS_ENABLED_PROTOCOLS_SEPARATOR; - } - } - vfsEncoded += Constants.VFS_FIELD_SEPARATOR; - vfsEncoded += Constants.VFS_ANONYMOUS_PERMS_KEY; - vfsEncoded += Constants.VFS_FIELD_MATCHER; - if (vfs.isHttpWorldReadable()) { - if (vfs.isApproachableByAnonymous()) { - vfsEncoded += HttpPerms.READWRITE; - } else { - vfsEncoded += HttpPerms.READ; - } - } else { - vfsEncoded += HttpPerms.NOREAD; - } - return vfsEncoded; - } -} diff --git a/src/main/java/it/grid/storm/namespace/util/userinfo/UserInfoException.java b/src/main/java/it/grid/storm/namespace/util/userinfo/UserInfoException.java index 5a389a950..04d47082f 100644 --- a/src/main/java/it/grid/storm/namespace/util/userinfo/UserInfoException.java +++ b/src/main/java/it/grid/storm/namespace/util/userinfo/UserInfoException.java @@ -19,23 +19,28 @@ public class UserInfoException extends RuntimeException { - public UserInfoException() { + /** + * + */ + private static final long serialVersionUID = 1L; - super(); - } + public UserInfoException() { - public UserInfoException(String message) { + super(); + } - super(message); - } + public UserInfoException(String message) { - public UserInfoException(String message, Throwable cause) { + super(message); + } - super(message, cause); - } + public UserInfoException(String message, Throwable cause) { - public UserInfoException(Throwable cause) { + super(message, cause); + } - super(cause); - } + public UserInfoException(Throwable cause) { + + super(cause); + } } diff --git a/src/main/java/it/grid/storm/persistence/DAOFactory.java b/src/main/java/it/grid/storm/persistence/DAOFactory.java deleted file mode 100644 index 31c64c4a9..000000000 --- a/src/main/java/it/grid/storm/persistence/DAOFactory.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/* - * (c)2004 INFN / ICTP-eGrid This file can be distributed and/or modified under - * the terms of the INFN Software License. For a copy of the licence please - * visit http://www.cnaf.infn.it/license.html - */ - -package it.grid.storm.persistence; - -import it.grid.storm.persistence.dao.PtGChunkDAO; -import it.grid.storm.persistence.dao.PtPChunkDAO; -import it.grid.storm.persistence.dao.RequestSummaryDAO; -import it.grid.storm.persistence.dao.StorageAreaDAO; -import it.grid.storm.persistence.dao.StorageSpaceDAO; -import it.grid.storm.persistence.dao.TapeRecallDAO; -import it.grid.storm.persistence.exceptions.DataAccessException; - -/** - * Returns an implementation of all Catalog interfaces. - * - * @author Riccardo Zappi - riccardo.zappi AT cnaf.infn.it - * @version $Id: DAOFactory.java,v 1.3 2005/10/22 15:09:40 rzappi Exp $ - */ -public interface DAOFactory { - - /** - * Returns an implementation of StorageSpaceCatalog, specific to a particular - * datastore. - * - * @throws DataAccessException - * @return StorageSpaceDAO - */ - public StorageSpaceDAO getStorageSpaceDAO() throws DataAccessException; - - public TapeRecallDAO getTapeRecallDAO(); - - public TapeRecallDAO getTapeRecallDAO(boolean test) - throws DataAccessException; - - public PtGChunkDAO getPtGChunkDAO() throws DataAccessException; - - public PtPChunkDAO getPtPChunkDAO() throws DataAccessException; - - public StorageAreaDAO getStorageAreaDAO() throws DataAccessException; - - public RequestSummaryDAO getRequestSummaryDAO() throws DataAccessException; - -} diff --git a/src/main/java/it/grid/storm/persistence/DataSourceConnectionFactory.java b/src/main/java/it/grid/storm/persistence/DataSourceConnectionFactory.java deleted file mode 100644 index 76ba4cfa9..000000000 --- a/src/main/java/it/grid/storm/persistence/DataSourceConnectionFactory.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence; - -import java.sql.Connection; -import it.grid.storm.persistence.exceptions.PersistenceException; - -public interface DataSourceConnectionFactory { - - public Connection borrowConnection() throws PersistenceException; - - public void giveBackConnection(Connection con) throws PersistenceException; - -} diff --git a/src/main/java/it/grid/storm/persistence/MySqlDAOFactory.java b/src/main/java/it/grid/storm/persistence/MySqlDAOFactory.java deleted file mode 100644 index bf3de4ff9..000000000 --- a/src/main/java/it/grid/storm/persistence/MySqlDAOFactory.java +++ /dev/null @@ -1,149 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import it.grid.storm.persistence.dao.PtGChunkDAO; -import it.grid.storm.persistence.dao.PtPChunkDAO; -import it.grid.storm.persistence.dao.RequestSummaryDAO; -import it.grid.storm.persistence.dao.StorageAreaDAO; -import it.grid.storm.persistence.dao.StorageSpaceDAO; -import it.grid.storm.persistence.dao.TapeRecallDAO; -import it.grid.storm.persistence.exceptions.DataAccessException; -import it.grid.storm.persistence.impl.mysql.StorageSpaceDAOMySql; -import it.grid.storm.persistence.impl.mysql.TapeRecallDAOMySql; - -public class MySqlDAOFactory implements DAOFactory { - - public static final String factoryName = "JDBC - MySQL DAO Factory"; - - private static final Logger log = LoggerFactory - .getLogger(MySqlDAOFactory.class); - - private static MySqlDAOFactory factory = new MySqlDAOFactory(); - - /** - * - */ - private MySqlDAOFactory() { - log.info("DAO factory: {}", MySqlDAOFactory.factoryName); - } - - public static MySqlDAOFactory getInstance() { - - return MySqlDAOFactory.factory; - } - - /** - * Returns an implementation of StorageSpaceCatalog, specific to a particular - * datastore. - * - * @throws DataAccessException - * @return StorageSpaceDAO - * @todo Implement this it.grid.storm.persistence.DAOFactory method - */ - public StorageSpaceDAO getStorageSpaceDAO() throws DataAccessException { - - return new StorageSpaceDAOMySql(); - } - - /** - * Returns an implementation of TapeRecallCatalog, specific to a particular - * datastore. - * - * @throws DataAccessException - * @return TapeReallDAO - * @todo Implement this it.grid.storm.persistence.DAOFactory method - */ - public TapeRecallDAO getTapeRecallDAO() { - - return new TapeRecallDAOMySql(); - } - - /** - * @return String - */ - @Override - public String toString() { - - return MySqlDAOFactory.factoryName; - } - - - /** - * getPtGChunkDAO - * - * @return PtGChunkDAO - * @throws DataAccessException - * @todo Implement this it.grid.storm.persistence.DAOFactory method - */ - public PtGChunkDAO getPtGChunkDAO() throws DataAccessException { - - return null; - } - - /** - * getPtPChunkDAO - * - * @return PtPChunkDAO - * @throws DataAccessException - * @todo Implement this it.grid.storm.persistence.DAOFactory method - */ - public PtPChunkDAO getPtPChunkDAO() throws DataAccessException { - - return null; - } - - /** - * getRequestSummaryDAO - * - * @return RequestSummaryDAO - * @throws DataAccessException - * @todo Implement this it.grid.storm.persistence.DAOFactory method - */ - public RequestSummaryDAO getRequestSummaryDAO() throws DataAccessException { - - return null; - } - - /** - * getStorageAreaDAO - * - * @return StorageAreaDAO - * @throws DataAccessException - * @todo Implement this it.grid.storm.persistence.DAOFactory method - */ - public StorageAreaDAO getStorageAreaDAO() throws DataAccessException { - - return null; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.persistence.DAOFactory#getTapeRecallDAO(boolean) - */ - public TapeRecallDAO getTapeRecallDAO(boolean test) - throws DataAccessException { - - return new TapeRecallDAOMySql(); - } - -} diff --git a/src/main/java/it/grid/storm/persistence/PersistenceDirector.java b/src/main/java/it/grid/storm/persistence/PersistenceDirector.java deleted file mode 100644 index 855904ac8..000000000 --- a/src/main/java/it/grid/storm/persistence/PersistenceDirector.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence; - -import it.grid.storm.config.Configuration; -import it.grid.storm.persistence.exceptions.PersistenceException; -import it.grid.storm.persistence.util.db.DBConnectionPool; -import it.grid.storm.persistence.util.db.DataBaseStrategy; -import it.grid.storm.persistence.util.db.Databases; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class PersistenceDirector { - - private static final Logger log = LoggerFactory.getLogger("persistence"); - private static Configuration config; - private static DataBaseStrategy dbMan; - private static DAOFactory daoFactory; - private static DataSourceConnectionFactory connFactory; - - static { - log.trace("Initializing Persistence Director..."); - config = Configuration.getInstance(); - dbMan = Databases.getDataBaseStrategy("mysql"); - daoFactory = MySqlDAOFactory.getInstance(); - - int maxActive = config.getBEPersistencePoolDBMaxActive(); - int maxWait = config.getBEPersistencePoolDBMaxWait(); - - log.debug("Datasource connection string = {}", dbMan.getConnectionString()); - log.debug("Pool Max Active = {}", maxActive); - log.debug("Pool Max Wait = {}", maxWait); - - try { - DBConnectionPool.initPool(dbMan, maxActive, maxWait); - connFactory = DBConnectionPool.getPoolInstance(); - } catch (PersistenceException e) { - log.error(e.getMessage(), e); - System.exit(1); - } - } - - public static DAOFactory getDAOFactory() { - - return daoFactory; - } - - public static DataBaseStrategy getDataBase() { - - return dbMan; - } - - public static DataSourceConnectionFactory getConnectionFactory() { - - return connFactory; - } - - public static Logger getLogger() { - - return log; - } - -} diff --git a/src/main/java/it/grid/storm/persistence/converter/DirOptionConverter.java b/src/main/java/it/grid/storm/persistence/converter/DirOptionConverter.java new file mode 100644 index 000000000..c0ae08ef8 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/converter/DirOptionConverter.java @@ -0,0 +1,70 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.converter; + +/** + * Package private class that translates between DPM flag for TDirOption and StoRM TDirOption + * proper. + * + * In particular DPM uses the int 1 to denote a recursive call, yet it fails to distinguish between + * a chosen recursion level; in other words there is no way that DPM specifies the number of levels + * to recurse: so either you recurse till the end or nothing. + * + * @author EGRID - ICTP Trieste + * @version 1.0 + * @date August, 2005 + */ +class DirOptionConverter { + + static private DirOptionConverter converter; + + private DirOptionConverter() { + + } + + static public DirOptionConverter getInstance() { + + if (converter == null) + converter = new DirOptionConverter(); + return converter; + } + + /** + * Method that translates the int used by DPM as flag for TDirOption, into a boolean for + * isDirOption. + * + * 1 causes true to be returned; any other value returns 0. + */ + public boolean toSTORM(int n) { + + return (n == 1); + } + + /** + * Method used to translate the boolean isDirOption into an int used by DPM to express the same + * thing. + * + * true gets translated into 1; false into 0. + */ + public int toDPM(boolean isDirOption) { + + if (isDirOption) + return 1; + return 0; + } +} diff --git a/src/main/java/it/grid/storm/persistence/converter/FileLifetimeConverter.java b/src/main/java/it/grid/storm/persistence/converter/FileLifetimeConverter.java new file mode 100644 index 000000000..664a7cc4e --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/converter/FileLifetimeConverter.java @@ -0,0 +1,72 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.converter; + +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.config.Configuration; + +/** + * Class that handles DB representation of a pinLifetime as expressed by a TLifetimeInSeconds + * objects; in particular it takes care of protocol specification: + * + * 0/null/negative are translated as default StoRM configurable values. StoRMs Empty + * TLifeTimeInSeconds is translated as 0. + * + * @author EGRID ICTP + * @version 1.0 + * @date March 2007 + */ +public class FileLifetimeConverter { + + private static FileLifetimeConverter stc = new FileLifetimeConverter(); + + private FileLifetimeConverter() { + + } + + /** + * Method that returns the only instance of SizeInBytesIntConverter + */ + public static FileLifetimeConverter getInstance() { + + return stc; + } + + /** + * Method that translates the Empty TLifeTimeInSeconds into the empty representation of DB which + * is 0. Any other value is left as is. + */ + public int toDB(long l) { + + if (l == TLifeTimeInSeconds.makeEmpty().value()) + return 0; + return Long.valueOf(l).intValue(); + } + + /** + * Method that returns the long corresponding to the int value in the DB, except if it is 0, NULL + * or negative; a configurable default value is returned instead, corresponding to the + * getFileLifetimeDefault() Configuration class method. + */ + public long toStoRM(int s) { + + if (s <= 0) + return Configuration.getInstance().getFileLifetimeDefault(); + return Integer.valueOf(s).longValue(); + } +} diff --git a/src/main/java/it/grid/storm/persistence/converter/FileStorageTypeConverter.java b/src/main/java/it/grid/storm/persistence/converter/FileStorageTypeConverter.java new file mode 100644 index 000000000..a9dca4b46 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/converter/FileStorageTypeConverter.java @@ -0,0 +1,102 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.converter; + +import java.util.Map; +import java.util.HashMap; +import java.util.Iterator; +import it.grid.storm.srm.types.TFileStorageType; +import it.grid.storm.config.Configuration; + +/** + * Package private auxiliary class used to convert between DB raw data and StoRM object model + * representation of TFileStorageType. + * + * @author: EGRID ICTP + * @version: 2.0 + * @date: June 2005 + */ +public class FileStorageTypeConverter { + + private Map DBtoSTORM = new HashMap(); + private Map STORMtoDB = new HashMap(); + + private static FileStorageTypeConverter c = new FileStorageTypeConverter(); + + /** + * Private constructor that fills in the conversion tables; + * + * V - VOLATILE P - PERMANENT D - DURABLE + */ + private FileStorageTypeConverter() { + + DBtoSTORM.put("V", TFileStorageType.VOLATILE); + DBtoSTORM.put("P", TFileStorageType.PERMANENT); + DBtoSTORM.put("D", TFileStorageType.DURABLE); + String aux; + for (Iterator i = DBtoSTORM.keySet().iterator(); i.hasNext();) { + aux = i.next(); + STORMtoDB.put(DBtoSTORM.get(aux), aux); + } + } + + /** + * Method that returns the only instance of FileStorageTypeConverter. + */ + public static FileStorageTypeConverter getInstance() { + + return c; + } + + /** + * Method that returns the String used in the DB to represent the given TFileStorageType. The + * empty String "" is returned if no match is found. + */ + public String toDB(TFileStorageType fst) { + + String aux = (String) STORMtoDB.get(fst); + if (aux == null) + return ""; + return aux; + } + + /** + * Method that returns the TFileStorageType used by StoRM to represent the supplied String + * representation in the DB. A configured default TFileStorageType is returned in case no + * corresponding StoRM type is found. TFileStorageType.EMPTY is returned if there are + * configuration errors. + */ + public TFileStorageType toSTORM(String s) { + + TFileStorageType aux = DBtoSTORM.get(s); + if (aux == null) + // This case is that the String s is different from V,P or D. + aux = DBtoSTORM.get(Configuration.getInstance().getDefaultFileStorageType().name()); + if (aux == null) + // This case should never happen, but in case we prefer ponder PERMANENT. + return TFileStorageType.EMPTY; + else + return aux; + } + + public String toString() { + + return "FileStorageTypeConverter.\nDBtoSTORM map:" + DBtoSTORM + "\nSTORMtoDB map:" + STORMtoDB; + } + +} diff --git a/src/main/java/it/grid/storm/persistence/converter/OverwriteModeConverter.java b/src/main/java/it/grid/storm/persistence/converter/OverwriteModeConverter.java new file mode 100644 index 000000000..bc7fd8339 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/converter/OverwriteModeConverter.java @@ -0,0 +1,78 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.converter; + +import static it.grid.storm.srm.types.TOverwriteMode.ALWAYS; +import static it.grid.storm.srm.types.TOverwriteMode.NEVER; +import static it.grid.storm.srm.types.TOverwriteMode.WHENFILESAREDIFFERENT; + +import java.util.Map; + +import com.google.common.collect.Maps; + +import it.grid.storm.config.model.v2.OverwriteMode; +import it.grid.storm.srm.types.TOverwriteMode; + +/** + * Package private auxiliary class used to convert between DB and StoRM object model representation + * of TOverwriteMode. + * + * @author: EGRID ICTP + * @version: 2.0 + * @date: June 2005 + */ +public class OverwriteModeConverter { + + private static Map STORMtoDB = Maps.newHashMap(); + + static { + + STORMtoDB.put(NEVER, OverwriteMode.N); + STORMtoDB.put(ALWAYS, OverwriteMode.A); + STORMtoDB.put(WHENFILESAREDIFFERENT, OverwriteMode.D); + } + + public static OverwriteMode toDB(TOverwriteMode om) { + + if (STORMtoDB.containsKey(om)) { + return STORMtoDB.get(om); + } + return OverwriteMode.N; + } + + public static TOverwriteMode toSTORM(String s) { + + OverwriteMode om = OverwriteMode.valueOf(s.trim().toUpperCase()); + return toSTORM(om); + } + + public static TOverwriteMode toSTORM(OverwriteMode om) { + + switch (om) { + case N: + return TOverwriteMode.NEVER; + case A: + return TOverwriteMode.ALWAYS; + case D: + return TOverwriteMode.WHENFILESAREDIFFERENT; + default: + return TOverwriteMode.EMPTY; + } + } + +} diff --git a/src/main/java/it/grid/storm/persistence/converter/PinLifetimeConverter.java b/src/main/java/it/grid/storm/persistence/converter/PinLifetimeConverter.java new file mode 100644 index 000000000..cfc01e04e --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/converter/PinLifetimeConverter.java @@ -0,0 +1,87 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.converter; + +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.config.Configuration; + +/** + * Class that handles DB representation of a TLifetimeInSeconds, in particular it takes care of + * protocol specification: + * + * 0/null/negative are translated as default StoRM configurable values. StoRMs Empty + * TLifeTimeInSeconds is translated as 0. + * + * @author EGRID ICTP + * @version 1.0 + * @date March 2007 + */ +public class PinLifetimeConverter { + + private static PinLifetimeConverter stc = new PinLifetimeConverter(); + + private PinLifetimeConverter() { + + } + + /** + * Method that returns the only instance of SizeInBytesIntConverter + */ + public static PinLifetimeConverter getInstance() { + + return stc; + } + + /** + * Method that translates the Empty TLifeTimeInSeconds into the empty representation of DB which + * is 0. Any other value is left as is. + */ + public int toDB(long l) { + + if (l == TLifeTimeInSeconds.makeEmpty().value()) + return 0; + return Long.valueOf(l).intValue(); + } + + /** + * Method that returns the long corresponding to the int value in the DB, except if it is 0, NULL + * or negative; a configurable default value is returned instead, corresponding to the + * getPinLifetimeMinimum() Configuration class method. + */ + public long toStoRM(int s) { + + if (s == 0) { + return Configuration.getInstance().getPinLifetimeDefault(); + } else if (s < 0) { + // The default is used also as a Minimum + return Configuration.getInstance().getPinLifetimeDefault(); + } + return Integer.valueOf(s).longValue(); + } + + public long toStoRM(long s) { + + if (s == 0) { + return Configuration.getInstance().getPinLifetimeDefault(); + } else if (s < 0) { + // The default is used also as a Minimum + return Configuration.getInstance().getPinLifetimeDefault(); + } + return s; + } +} diff --git a/src/main/java/it/grid/storm/catalogs/RequestTypeConverter.java b/src/main/java/it/grid/storm/persistence/converter/RequestTypeConverter.java similarity index 85% rename from src/main/java/it/grid/storm/catalogs/RequestTypeConverter.java rename to src/main/java/it/grid/storm/persistence/converter/RequestTypeConverter.java index 2396ef2f1..e92af6d63 100644 --- a/src/main/java/it/grid/storm/catalogs/RequestTypeConverter.java +++ b/src/main/java/it/grid/storm/persistence/converter/RequestTypeConverter.java @@ -15,12 +15,12 @@ * the License. */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.converter; -import static it.grid.storm.catalogs.RequestSummaryDataTO.BOL_REQUEST_TYPE; -import static it.grid.storm.catalogs.RequestSummaryDataTO.COPY_REQUEST_TYPE; -import static it.grid.storm.catalogs.RequestSummaryDataTO.PTG_REQUEST_TYPE; -import static it.grid.storm.catalogs.RequestSummaryDataTO.PTP_REQUEST_TYPE; +import static it.grid.storm.persistence.model.RequestSummaryDataTO.BOL_REQUEST_TYPE; +import static it.grid.storm.persistence.model.RequestSummaryDataTO.COPY_REQUEST_TYPE; +import static it.grid.storm.persistence.model.RequestSummaryDataTO.PTG_REQUEST_TYPE; +import static it.grid.storm.persistence.model.RequestSummaryDataTO.PTP_REQUEST_TYPE; import static it.grid.storm.srm.types.TRequestType.BRING_ON_LINE; import static it.grid.storm.srm.types.TRequestType.COPY; import static it.grid.storm.srm.types.TRequestType.EMPTY; @@ -37,7 +37,7 @@ * Package private auxiliary class used to convert between DB and StoRM object model representation * of the request type. */ -class RequestTypeConverter { +public class RequestTypeConverter { private Map dbToStorm = Maps.newHashMap(); private Map stormToDb = Maps.newHashMap(); diff --git a/src/main/java/it/grid/storm/persistence/converter/SizeInBytesIntConverter.java b/src/main/java/it/grid/storm/persistence/converter/SizeInBytesIntConverter.java new file mode 100644 index 000000000..090c397cd --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/converter/SizeInBytesIntConverter.java @@ -0,0 +1,69 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.converter; + +import it.grid.storm.srm.types.TSizeInBytes; + +/** + * Class that handles DB representation of a TSizeInBytes, in particular it takes care of the NULL + * logic of the DB: 0/null are used to mean an empty field, whereas StoRM Object model uses the type + * TSizeInBytes.makeEmpty(); moreover StoRM does accept 0 as a valid TSizeInBytes, so it _is_ + * important to use this converter! + * + * @author EGRID ICTP + * @version 2.0 + * @date July 2005 + */ +public class SizeInBytesIntConverter { + + private static SizeInBytesIntConverter stc = new SizeInBytesIntConverter(); + + private SizeInBytesIntConverter() { + + } + + /** + * Method that returns the only instance of SizeInBytesIntConverter + */ + public static SizeInBytesIntConverter getInstance() { + + return stc; + } + + /** + * Method that transaltes the Empty TSizeInBytes into the empty representation of DB which is 0. + * Any other int is left as is. + */ + public long toDB(long s) { + + if (s == TSizeInBytes.makeEmpty().value()) + return 0; + return s; + } + + /** + * Method that returns the int as is, except if it is 0 which DB interprests as empty field: in + * that case it then returns the Empty TSizeInBytes int representation. + */ + public long toStoRM(long s) { + + if (s == 0) + return TSizeInBytes.makeEmpty().value(); + return s; + } +} diff --git a/src/main/java/it/grid/storm/persistence/converter/SpaceTokenStringConverter.java b/src/main/java/it/grid/storm/persistence/converter/SpaceTokenStringConverter.java new file mode 100644 index 000000000..be36a9a29 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/converter/SpaceTokenStringConverter.java @@ -0,0 +1,69 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.converter; + +import it.grid.storm.srm.types.TSpaceToken; + +/** + * Class that handles DPM DB representation of a SpaceToken, in particular it takes care of the + * NULL/EMPTY logic of DPM. In particular DPM uses the empty string "" as meaning the absence of a + * value for the field, wheras StoRM accepts it as a valis String with which to create a + * TSpaceToken; moreover StoRM uses an Empty TSpaceToken type. + * + * @author EGRID ICTP + * @version 1.0 + * @date June 2005 + */ +public class SpaceTokenStringConverter { + + private static SpaceTokenStringConverter stc = new SpaceTokenStringConverter(); + + private SpaceTokenStringConverter() { + + } + + /** + * Method that returns the only instance od SpaceTokenConverter + */ + public static SpaceTokenStringConverter getInstance() { + + return stc; + } + + /** + * Method that translates StoRM Empty TSpaceToken String representation into DPM empty + * representation; all other Strings are left as are. + */ + public String toDB(String s) { + + if (s.equals(TSpaceToken.makeEmpty().toString())) + return ""; + return s; + } + + /** + * Method that translates DPM String representing an Empty TSpaceToken into StoRM representation; + * any other String is left as is. + */ + public String toStoRM(String s) { + + if ((s == null) || (s.equals(""))) + return TSpaceToken.makeEmpty().toString(); + return s; + } +} diff --git a/src/main/java/it/grid/storm/catalogs/StatusCodeConverter.java b/src/main/java/it/grid/storm/persistence/converter/StatusCodeConverter.java similarity index 96% rename from src/main/java/it/grid/storm/catalogs/StatusCodeConverter.java rename to src/main/java/it/grid/storm/persistence/converter/StatusCodeConverter.java index 922b91741..beee7c3a8 100644 --- a/src/main/java/it/grid/storm/catalogs/StatusCodeConverter.java +++ b/src/main/java/it/grid/storm/persistence/converter/StatusCodeConverter.java @@ -15,7 +15,9 @@ * the License. */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.converter; + +import java.util.Map; import static it.grid.storm.srm.types.TStatusCode.SRM_ABORTED; import static it.grid.storm.srm.types.TStatusCode.SRM_AUTHENTICATION_FAILURE; @@ -52,11 +54,8 @@ import static it.grid.storm.srm.types.TStatusCode.SRM_SUCCESS; import static it.grid.storm.srm.types.TStatusCode.SRM_TOO_MANY_RESULTS; +import java.util.HashMap; import java.util.Iterator; -import java.util.Map; - -import com.google.common.collect.Maps; - import it.grid.storm.srm.types.TStatusCode; /** @@ -69,8 +68,8 @@ */ public class StatusCodeConverter { - private Map DBtoSTORM = Maps.newHashMap(); - private Map STORMtoDB = Maps.newHashMap(); + private Map DBtoSTORM = new HashMap(); + private Map STORMtoDB = new HashMap(); private static StatusCodeConverter c = new StatusCodeConverter(); diff --git a/src/main/java/it/grid/storm/persistence/converter/TURLConverter.java b/src/main/java/it/grid/storm/persistence/converter/TURLConverter.java new file mode 100644 index 000000000..887072d87 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/converter/TURLConverter.java @@ -0,0 +1,68 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.converter; + +import it.grid.storm.srm.types.TTURL; + +/** + * Class that handles DPM DB representation of a TTURL, in particular it takes care of the + * NULL/EMPTY logic of DPM. Indeed DPM uses 0/null to mean an empty field, whereas StoRM uses the + * type TTURL.makeEmpty(); in particular StoRM converts an empty String or a null to an Empty TTURL! + * + * @author EGRID ICTP + * @version 1.0 + * @date March 2006 + */ +public class TURLConverter { + + private static TURLConverter stc = new TURLConverter(); // only instance + + private TURLConverter() { + + } + + /** + * Method that returns the only instance of SizeInBytesIntConverter + */ + public static TURLConverter getInstance() { + + return stc; + } + + /** + * Method that transaltes the Empty TTURL into the empty representation of DPM which is a null! + * Any other String is left as is. + */ + public String toDB(String s) { + + if (s.equals(TTURL.makeEmpty().toString())) + return null; + return s; + } + + /** + * Method that translates DPMs "" or null String as the Empty TTURL String representation. Any + * other String is left as is. + */ + public String toStoRM(String s) { + + if ((s == null) || (s.equals(""))) + return TTURL.makeEmpty().toString(); + return s; + } +} diff --git a/src/main/java/it/grid/storm/persistence/converter/TransferProtocolListConverter.java b/src/main/java/it/grid/storm/persistence/converter/TransferProtocolListConverter.java new file mode 100644 index 000000000..9bb877aa1 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/converter/TransferProtocolListConverter.java @@ -0,0 +1,66 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.converter; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.namespace.model.Protocol; + +import java.util.Iterator; +import java.util.List; +import java.util.ArrayList; + +/** + * Package private auxiliary class used to convert between the DB raw data representation and StoRM + * s Object model list of transfer protocols. + * + */ + +public class TransferProtocolListConverter { + + /** + * Method that returns a List of Uppercase Strings used in the DB to represent the given + * TURLPrefix. An empty List is returned in case the conversion does not succeed, a null + * TURLPrefix is supplied, or its size is 0. + */ + public static List toDB(TURLPrefix turlPrefix) { + + List result = new ArrayList(); + Protocol protocol; + for (Iterator it = turlPrefix.getDesiredProtocols().iterator(); it.hasNext();) { + protocol = it.next(); + result.add(protocol.getSchema()); + } + return result; + } + + /** + * Method that returns a TURLPrefix of transfer protocol. If the translation cannot take place, a + * TURLPrefix of size 0 is returned. Likewise if a null List is supplied. + */ + public static TURLPrefix toSTORM(List listOfProtocol) { + + TURLPrefix turlPrefix = new TURLPrefix(); + Protocol protocol = null; + for (Iterator i = listOfProtocol.iterator(); i.hasNext();) { + protocol = Protocol.getProtocol(i.next()); + if (!(protocol.equals(Protocol.UNKNOWN))) + turlPrefix.addProtocol(protocol); + } + return turlPrefix; + } +} diff --git a/src/main/java/it/grid/storm/persistence/dao/AbstractDAO.java b/src/main/java/it/grid/storm/persistence/dao/AbstractDAO.java index 7f773e373..2111cd7fa 100644 --- a/src/main/java/it/grid/storm/persistence/dao/AbstractDAO.java +++ b/src/main/java/it/grid/storm/persistence/dao/AbstractDAO.java @@ -17,11 +17,6 @@ package it.grid.storm.persistence.dao; -import it.grid.storm.persistence.DataSourceConnectionFactory; -import it.grid.storm.persistence.PersistenceDirector; -import it.grid.storm.persistence.exceptions.DataAccessException; -import it.grid.storm.persistence.exceptions.PersistenceException; - import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; @@ -30,156 +25,71 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import it.grid.storm.persistence.pool.DefaultDatabaseConnectionPool; + public abstract class AbstractDAO { - private static final Logger log = LoggerFactory.getLogger(AbstractDAO.class); - - private DataSourceConnectionFactory connFactory; - - public AbstractDAO() { - connFactory = PersistenceDirector.getConnectionFactory(); - } - - protected void commit(Connection conn) { - - try { - conn.commit(); - conn.setAutoCommit(true); - } catch (SQLException e) { - log.error(e.getMessage(), e); - } - } - - protected Connection getConnection() throws DataAccessException { - - Connection conn = null; - try { - conn = connFactory.borrowConnection(); - } catch (PersistenceException ex) { - throw new DataAccessException(ex); - } - return conn; - } - - protected Statement getStatement(Connection conn) throws DataAccessException { - - Statement stat = null; - if (conn == null) { - throw new DataAccessException( - "No Connection available to create a Statement"); - } else { - try { - stat = conn.createStatement(); - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException(e); - } - } - return stat; - } - - /** - * Release a connection Accessor method. - * - * @param resultSet - * ResultSet - * @param statement - * Statement - * @param connection - * Connection - * @throws DataAccessException - */ - protected void releaseConnection(ResultSet resultSet, Statement statement, - Connection connection) throws DataAccessException { - - // Release the ResultSet - closeResultSet(resultSet); - - // Close the statement - closeStatement(statement); - - // Release the connection - closeConnection(connection); - } - - /** - * Release a connection and a list of statements and result sets Accessor - * method. - * - * @param resultSets - * @param statements - * @param connection - * @throws DataAccessException - */ - protected void releaseConnection(ResultSet[] resultSets, - Statement[] statements, Connection connection) throws DataAccessException { - - // Release the ResultSets - if (resultSets != null) { - for (ResultSet resultSet : resultSets) { - closeResultSet(resultSet); - } - } - // Close the statement - if (statements != null) { - for (Statement statement : statements) { - closeStatement(statement); - } - } - // Release the connection - closeConnection(connection); - } - - private void closeResultSet(ResultSet resultSet) throws DataAccessException { - - if (resultSet != null) { - try { - resultSet.close(); - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException(e); - } - } - } - - private void closeStatement(Statement statement) throws DataAccessException { - - if (statement != null) { - try { - statement.close(); - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException(e); - } - } - } - - private void closeConnection(Connection connection) - throws DataAccessException { - - if (connection != null) { - try { - connFactory.giveBackConnection(connection); - } catch (PersistenceException e) { - log.error(e.getMessage(), e); - throw new DataAccessException(e); - } - } - } - - /** - * @param conn - */ - protected void rollback(Connection conn) { - - try { - - conn.rollback(); - conn.setAutoCommit(true); - - } catch (SQLException e) { - log.error(e.getMessage(), e); - } - } + private static final Logger log = LoggerFactory.getLogger(AbstractDAO.class); + + private final DefaultDatabaseConnectionPool connectionPool; + + public AbstractDAO(DefaultDatabaseConnectionPool connectionPool) { + + this.connectionPool = connectionPool; + } + + protected Connection getConnection() throws SQLException { + + Connection con = connectionPool.getConnection(); + con.setAutoCommit(true); + return con; + } + + protected Connection getManagedConnection() throws SQLException { + + Connection con = connectionPool.getConnection(); + con.setAutoCommit(false); + return con; + } + + protected void closeResultSet(ResultSet resultSet) { + + try { + if (resultSet != null) { + resultSet.close(); + } + } catch (SQLException e) { + handleSQLException(e); + } + } + + protected void closeStatement(Statement statement) { + + try { + if (statement != null) { + statement.close(); + } + } catch (SQLException e) { + handleSQLException(e); + } + } + + protected void closeConnection(Connection connection) { + + try { + if (connection != null) { + connection.close(); + } + } catch (SQLException e) { + handleSQLException(e); + } + } + + private void handleSQLException(SQLException e) { + + log.error("SQL Error: {}, SQLState: {}, VendorError: {}.", e.getMessage(), e.getSQLState(), + e.getErrorCode(), e); + e.printStackTrace(); + } } diff --git a/src/main/java/it/grid/storm/persistence/dao/BoLChunkDAO.java b/src/main/java/it/grid/storm/persistence/dao/BoLChunkDAO.java new file mode 100644 index 000000000..f0f242ad3 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/dao/BoLChunkDAO.java @@ -0,0 +1,40 @@ +package it.grid.storm.persistence.dao; + +import java.util.Collection; + +import it.grid.storm.persistence.model.BoLChunkDataTO; +import it.grid.storm.persistence.model.ReducedBoLChunkDataTO; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TStatusCode; + +public interface BoLChunkDAO { + + void addChild(BoLChunkDataTO to); + + void addNew(BoLChunkDataTO to, String clientDn); + + void update(BoLChunkDataTO to); + + void updateIncomplete(ReducedBoLChunkDataTO to); + + Collection find(TRequestToken requestToken); + + Collection findReduced(TRequestToken requestToken); + + Collection findReduced(TRequestToken requestToken, int[] surlUniqueIDs, + String[] surls); + + Collection findReduced(String griduser, int[] surlUniqueIDs, + String[] surls); + + int updateStatus(BoLChunkDataTO to, TStatusCode status, String explanation); + + int releaseExpiredAndSuccessfulRequests(); + + void updateStatusOnMatchingStatus(TRequestToken requestToken, TStatusCode expectedStatusCode, + TStatusCode newStatusCode, String explanation); + + Collection find(int[] surlsUniqueIDs, String[] surlsArray, String dn); + + Collection find(int[] surlsUniqueIDs, String[] surlsArray); +} diff --git a/src/main/java/it/grid/storm/persistence/dao/PtGChunkDAO.java b/src/main/java/it/grid/storm/persistence/dao/PtGChunkDAO.java index 7516763b5..aa0781619 100644 --- a/src/main/java/it/grid/storm/persistence/dao/PtGChunkDAO.java +++ b/src/main/java/it/grid/storm/persistence/dao/PtGChunkDAO.java @@ -18,19 +18,50 @@ package it.grid.storm.persistence.dao; import java.util.Collection; -import it.grid.storm.persistence.exceptions.DataAccessException; -import it.grid.storm.persistence.model.PtGChunkTO; + +import it.grid.storm.persistence.model.PtGChunkDataTO; +import it.grid.storm.persistence.model.ReducedPtGChunkDataTO; import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TStatusCode; public interface PtGChunkDAO { - public PtGChunkTO getPtGChunkDataById(Long ssId) throws DataAccessException; + public void addChild(PtGChunkDataTO to); + + public void addNew(PtGChunkDataTO to, String clientDn); + + public void update(PtGChunkDataTO to); + + public void updateIncomplete(ReducedPtGChunkDataTO chunkTO); + + public PtGChunkDataTO refresh(long primaryKey); + + public Collection find(TRequestToken requestToken); + + public Collection findReduced(TRequestToken requestToken); + + public Collection findReduced(TRequestToken requestToken, + int[] surlsUniqueIDs, String[] surlsArray); + + public Collection findReduced(String griduser, int[] surlUniqueIDs, + String[] surls); + + public void fail(PtGChunkDataTO auxTO); + + public int numberInSRM_FILE_PINNED(int surlUniqueID); + + public int count(int surlUniqueID, TStatusCode status); + + public Collection transitExpiredSRM_FILE_PINNED(); + + public void transitSRM_FILE_PINNEDtoSRM_RELEASED(long[] ids); - public void addPtGChunkData(PtGChunkTO ptgChunkTO) throws DataAccessException; + public void transitSRM_FILE_PINNEDtoSRM_RELEASED(long[] ids, TRequestToken token); - public Collection getPtGChunksDataByToken(TRequestToken token) - throws DataAccessException; + public void updateStatus(TRequestToken requestToken, int[] surlUniqueIDs, + String[] surls, TStatusCode statusCode, String explanation); - public void removePtGChunksData(PtGChunkTO ptgChunkTO) - throws DataAccessException; + public void updateStatusOnMatchingStatus(TRequestToken requestToken, + TStatusCode expectedStatusCode, TStatusCode newStatusCode, String explanation); } diff --git a/src/main/java/it/grid/storm/persistence/dao/PtPChunkDAO.java b/src/main/java/it/grid/storm/persistence/dao/PtPChunkDAO.java index bd3fe0a19..4493b9b71 100644 --- a/src/main/java/it/grid/storm/persistence/dao/PtPChunkDAO.java +++ b/src/main/java/it/grid/storm/persistence/dao/PtPChunkDAO.java @@ -18,20 +18,43 @@ package it.grid.storm.persistence.dao; import java.util.Collection; -import it.grid.storm.persistence.exceptions.DataAccessException; -import it.grid.storm.persistence.model.PtPChunkTO; +import java.util.Map; + +import it.grid.storm.persistence.model.PtPChunkDataTO; +import it.grid.storm.persistence.model.ReducedPtPChunkDataTO; import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TStatusCode; public interface PtPChunkDAO { - public PtPChunkTO getPtGChunkDataById(Long ssId) throws DataAccessException; + public void update(PtPChunkDataTO to); + + public void updateIncomplete(ReducedPtPChunkDataTO chunkTO); + + public Collection find(TRequestToken requestToken); + + public Collection find(int[] surlsUniqueIDs, String[] surlsArray, String dn); + + public int fail(PtPChunkDataTO auxTO); + + public Map getExpiredSRM_SPACE_AVAILABLE(); + + public Map getExpired(TStatusCode status); + + public int transitExpiredSRM_SPACE_AVAILABLEtoSRM_FILE_LIFETIME_EXPIRED(Collection ids); + + public int transitLongTimeInProgressRequestsToStatus(long expirationTime, TStatusCode status, + String explanation); + + public int updateStatus(Collection ids, TStatusCode fromStatus, TStatusCode toStatus, + String explanation); - public void addPtGChunkData(PtPChunkTO ptpChunkData) - throws DataAccessException; + public int updateStatus(TRequestToken requestToken, int[] surlsUniqueIDs, String[] surls, + TStatusCode statusCode, String explanation); - public Collection getPtPChunksDataByToken(TRequestToken token) - throws DataAccessException; + public int updateStatusOnMatchingStatus(TRequestToken requestToken, + TStatusCode expectedStatusCode, TStatusCode newStatusCode, String explanation); - public void removePtGChunksData(PtPChunkTO ptpChunkData) - throws DataAccessException; + public int updateStatusOnMatchingStatus(TRequestToken requestToken, int[] surlsUniqueIDs, + String[] surls, TStatusCode expectedStatusCode, TStatusCode newStatusCode); } diff --git a/src/main/java/it/grid/storm/persistence/dao/RequestSummaryDAO.java b/src/main/java/it/grid/storm/persistence/dao/RequestSummaryDAO.java index c9eb63a8a..a3a9d308d 100644 --- a/src/main/java/it/grid/storm/persistence/dao/RequestSummaryDAO.java +++ b/src/main/java/it/grid/storm/persistence/dao/RequestSummaryDAO.java @@ -17,17 +17,43 @@ package it.grid.storm.persistence.dao; -import it.grid.storm.persistence.exceptions.DataAccessException; -import it.grid.storm.persistence.model.RequestSummaryTO; +import java.util.Collection; + +import it.grid.storm.persistence.model.RequestSummaryDataTO; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TRequestType; +import it.grid.storm.srm.types.TStatusCode; public interface RequestSummaryDAO { - public RequestSummaryTO getRequestSummaryById(Long ssId) - throws DataAccessException; + Collection fetchNewRequests(int limit); + + void failRequest(long requestId, String explanation); + + void failPtGRequest(long requestId, String explanation); + + void failPtPRequest(long requestId, String explanation); + + void updateGlobalStatus(TRequestToken requestToken, TStatusCode status, String explanation); + + void updateGlobalStatusOnMatchingGlobalStatus(TRequestToken requestToken, + TStatusCode expectedStatusCode, TStatusCode newStatusCode, String explanation); + + void updateGlobalStatusPinFileLifetime(TRequestToken requestToken, TStatusCode status, + String explanation); + + void abortRequest(TRequestToken requestToken); + + void abortInProgressRequest(TRequestToken requestToken); + + void abortChunksOfInProgressRequest(TRequestToken requestToken, Collection surls); + + TRequestType getRequestType(TRequestToken requestToken); + + RequestSummaryDataTO find(TRequestToken requestToken); + + Collection purgeExpiredRequests(long expiredRequestTime, int purgeSize); - public void addRequestSummary(RequestSummaryTO rsd) - throws DataAccessException; + int getNumberExpired(); - public void removeRequestSummary(RequestSummaryTO rsd) - throws DataAccessException; } diff --git a/src/main/java/it/grid/storm/persistence/dao/SURLStatusDAO.java b/src/main/java/it/grid/storm/persistence/dao/SURLStatusDAO.java new file mode 100644 index 000000000..28108caea --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/dao/SURLStatusDAO.java @@ -0,0 +1,40 @@ +package it.grid.storm.persistence.dao; + +import java.util.List; +import java.util.Map; + +import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; + +public interface SURLStatusDAO { + + boolean abortActivePtGsForSURL(GridUserInterface user, TSURL surl, String explanation); + + boolean abortActivePtPsForSURL(GridUserInterface user, TSURL surl, String explanation); + + Map getPinnedSURLsForUser(GridUserInterface user, List surls); + + Map getPinnedSURLsForUser(GridUserInterface user, TRequestToken token, + List surls); + + Map getSURLStatuses(TRequestToken token); + + Map getSURLStatuses(TRequestToken token, List surls); + + int markSURLsReadyForRead(TRequestToken token, List surls); + + void releaseSURL(TSURL surl); + + void releaseSURLs(GridUserInterface user, List surls); + + void releaseSURLs(List surls); + + void releaseSURLs(TRequestToken token, List surls); + + boolean surlHasOngoingPtGs(TSURL surl); + + boolean surlHasOngoingPtPs(TSURL surl, TRequestToken token); + +} diff --git a/src/main/java/it/grid/storm/persistence/dao/StorageAreaDAO.java b/src/main/java/it/grid/storm/persistence/dao/StorageAreaDAO.java deleted file mode 100644 index 6ae104bc4..000000000 --- a/src/main/java/it/grid/storm/persistence/dao/StorageAreaDAO.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.dao; - -public interface StorageAreaDAO { -} diff --git a/src/main/java/it/grid/storm/persistence/dao/TapeRecallDAO.java b/src/main/java/it/grid/storm/persistence/dao/TapeRecallDAO.java index 60c9c51cb..58d93062e 100644 --- a/src/main/java/it/grid/storm/persistence/dao/TapeRecallDAO.java +++ b/src/main/java/it/grid/storm/persistence/dao/TapeRecallDAO.java @@ -22,181 +22,170 @@ import java.util.Date; import java.util.List; +import java.util.Optional; import java.util.UUID; /** * Tape Recall Data Access Object (DAO) */ -public abstract class TapeRecallDAO extends AbstractDAO { - - /** - * - * @return - * @throws DataAccessException - */ - public abstract int getNumberInProgress() throws DataAccessException; - - /** - * - * @param voName - * @return - * @throws DataAccessException - */ - public abstract int getNumberInProgress(String voName) - throws DataAccessException; - - /** - * - * @return - * @throws DataAccessException - */ - public abstract int getNumberQueued() throws DataAccessException; - - /** - * - * @param voName - * @return - * @throws DataAccessException - */ - public abstract int getNumberQueued(String voName) throws DataAccessException; - - /** - * - * @return - * @throws DataAccessException - */ - public abstract int getReadyForTakeOver() throws DataAccessException; - - /** - * - * @param voName - * @return - * @throws DataAccessException - */ - public abstract int getReadyForTakeOver(String voName) - throws DataAccessException; - - /** - * @param taskId - * @param requestToken - * @return - * @throws DataAccessException - */ - public abstract TapeRecallTO getTask(UUID taskId, String requestToken) - throws DataAccessException; - - /** - * @param groupTaskId - * @return - * @throws DataAccessException - */ - public abstract List getGroupTasks(UUID groupTaskId) - throws DataAccessException; - - /** - * Verifies that a recall task with the given taskId and request token exists - * on the database - * - * @param taskId - * @param requestToken - * @return true if the recall task exists - * @throws DataAccessException - */ - public abstract boolean existsTask(UUID taskId, String requestToken) - throws DataAccessException; - - /** - * @param groupTaskId - * @return - * @throws DataAccessException - */ - public abstract boolean existsGroupTask(UUID groupTaskId) - throws DataAccessException; - - /** - * Method called by a garbage collector that removes all tape recalls that are - * not in QUEUED (1) or IN_PROGRESS (2) status - * - * @param expirationTime seconds must pass to consider the request as expired - * @param delete at most numMaxToPurge tasks - * @return the amount of tasks deleted - * @throws DataAccessException - */ - public abstract int purgeCompletedTasks(long expirationTime, int numMaxToPurge) - throws DataAccessException; - - /** - * @param taskId - * @param newValue - * @throws DataAccessException - */ - public abstract void setGroupTaskRetryValue(UUID groupTaskId, int value) - throws DataAccessException; - - /** - * - * @return - * @throws DataAccessException - */ - public abstract TapeRecallTO takeoverTask() throws DataAccessException; - - /** - * - * @param voName - * @return - * @throws DataAccessException - */ - public abstract TapeRecallTO takeoverTask(String voName) - throws DataAccessException; - - /** - * Performs the take-over of max numberOfTaks tasks possibly returning more - * than one file recall task for some files - * - * @param numberOfTaks - * @return - * @throws DataAccessException - */ - public abstract List takeoverTasksWithDoubles(int numberOfTaks) - throws DataAccessException; - - /** - * - * @param numberOfTaks - * @param voName - * @return - * @throws DataAccessException - */ - public abstract List takeoverTasksWithDoubles(int numberOfTaks, - String voName) throws DataAccessException; - - /** - * @param task - * @param statuses - * @param proposedGroupTaskId - * @return - * @throws DataAccessException - */ - public abstract UUID insertCloneTask(TapeRecallTO task, int[] statuses, - UUID proposedGroupTaskId) throws DataAccessException; - - /** - * @param groupTaskId - * @param statusId - * @return - * @throws DataAccessException - */ - public abstract boolean setGroupTaskStatus(UUID groupTaskId, int statusId, - Date timestamp) throws DataAccessException; - - /** - * - * @param numberOfTaks - * @return - * @throws DataAccessException - */ - public abstract List getAllInProgressTasks(int numberOfTaks) - throws DataAccessException; - -} \ No newline at end of file +public interface TapeRecallDAO { + + /** + * + * @return + * @throws DataAccessException + */ + public int getNumberInProgress() throws DataAccessException; + + /** + * + * @param voName + * @return + * @throws DataAccessException + */ + public int getNumberInProgress(String voName) throws DataAccessException; + + /** + * + * @return + * @throws DataAccessException + */ + public int getNumberQueued() throws DataAccessException; + + /** + * + * @param voName + * @return + * @throws DataAccessException + */ + public int getNumberQueued(String voName) throws DataAccessException; + + /** + * + * @return + * @throws DataAccessException + */ + public int getReadyForTakeOver() throws DataAccessException; + + /** + * + * @param voName + * @return + * @throws DataAccessException + */ + public int getReadyForTakeOver(String voName) throws DataAccessException; + + /** + * @param taskId + * @param requestToken + * @return + * @throws DataAccessException + */ + public Optional getTask(UUID taskId, String requestToken) throws DataAccessException; + + /** + * @param groupTaskId + * @return + * @throws DataAccessException + */ + public List getGroupTasks(UUID groupTaskId) throws DataAccessException; + + /** + * Verifies that a recall task with the given taskId and request token exists on the database + * + * @param taskId + * @param requestToken + * @return true if the recall task exists + * @throws DataAccessException + */ + public boolean existsTask(UUID taskId, String requestToken) throws DataAccessException; + + /** + * @param groupTaskId + * @return + * @throws DataAccessException + */ + public boolean existsGroupTask(UUID groupTaskId) throws DataAccessException; + + /** + * Method called by a garbage collector that removes all tape recalls that are not in QUEUED (1) + * or IN_PROGRESS (2) status + * + * @param expirationTime seconds must pass to consider the request as expired + * @param delete at most numMaxToPurge tasks + * @return the amount of tasks deleted + * @throws DataAccessException + */ + public int purgeCompletedTasks(long expirationTime, int numMaxToPurge) throws DataAccessException; + + /** + * @param taskId + * @param newValue + * @throws DataAccessException + */ + public void setGroupTaskRetryValue(UUID groupTaskId, int value) throws DataAccessException; + + /** + * + * @return + * @throws DataAccessException + */ + public TapeRecallTO takeoverTask() throws DataAccessException; + + /** + * + * @param voName + * @return + * @throws DataAccessException + */ + public TapeRecallTO takeoverTask(String voName) throws DataAccessException; + + /** + * Performs the take-over of max numberOfTaks tasks possibly returning more than one file recall + * task for some files + * + * @param numberOfTaks + * @return + * @throws DataAccessException + */ + public List takeoverTasksWithDoubles(int numberOfTaks) throws DataAccessException; + + /** + * + * @param numberOfTaks + * @param voName + * @return + * @throws DataAccessException + */ + public List takeoverTasksWithDoubles(int numberOfTaks, String voName) + throws DataAccessException; + + /** + * @param task + * @param statuses + * @param proposedGroupTaskId + * @return + * @throws DataAccessException + */ + public UUID insertCloneTask(TapeRecallTO task, int[] statuses, UUID proposedGroupTaskId) + throws DataAccessException; + + /** + * @param groupTaskId + * @param statusId + * @return + * @throws DataAccessException + */ + public boolean setGroupTaskStatus(UUID groupTaskId, int statusId, Date timestamp) + throws DataAccessException; + + /** + * + * @param numberOfTaks + * @return + * @throws DataAccessException + */ + public List getAllInProgressTasks(int numberOfTaks) throws DataAccessException; + +} diff --git a/src/main/java/it/grid/storm/persistence/dao/VolatileAndJiTDAO.java b/src/main/java/it/grid/storm/persistence/dao/VolatileAndJiTDAO.java new file mode 100644 index 000000000..9fdb53f22 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/dao/VolatileAndJiTDAO.java @@ -0,0 +1,31 @@ +package it.grid.storm.persistence.dao; + +import java.util.List; + +public interface VolatileAndJiTDAO { + + public void addJiT(String filename, int uid, int gid, int acl, long start, long pinLifetime); + + public void addVolatile(String filename, long start, long fileLifetime); + + public boolean exists(String filename); + + public void forceUpdateJiT(String filename, int uid, int acl, long start, long pinLifetime); + + public int numberJiT(String filename, int uid, int acl); + + public int numberVolatile(String filename); + + public void removeAllJiTsOn(String filename); + + public List removeExpired(long time); + + public void updateJiT(String filename, int uid, int acl, long start, long pinLifetime); + + public void updateVolatile(String filename, long start, long fileLifetime); + + public void updateVolatile(String fileName, long fileStart); + + public List volatileInfoOn(String filename); + +} diff --git a/src/main/java/it/grid/storm/persistence/exceptions/InfrastructureException.java b/src/main/java/it/grid/storm/persistence/exceptions/InfrastructureException.java deleted file mode 100644 index bae662575..000000000 --- a/src/main/java/it/grid/storm/persistence/exceptions/InfrastructureException.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.exceptions; - -/** - * This exception is used to mark (fatal) failures in infrastructure and system - * code. - * - * @author Christian Bauer - */ -public class InfrastructureException extends RuntimeException { - - public InfrastructureException() { - - } - - public InfrastructureException(String message) { - - super(message); - } - - public InfrastructureException(String message, Throwable cause) { - - super(message, cause); - } - - public InfrastructureException(Throwable cause) { - - super(cause); - } -} diff --git a/src/main/java/it/grid/storm/persistence/exceptions/InvalidBoLDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidBoLDataAttributesException.java new file mode 100644 index 000000000..2ef423b5c --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidBoLDataAttributesException.java @@ -0,0 +1,88 @@ +package it.grid.storm.persistence.exceptions; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.srm.types.TDirOption; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.srm.types.TTURL; + +/** + * @author Michele Dibenedetto + * + */ +public class InvalidBoLDataAttributesException extends InvalidFileTransferDataAttributesException { + + private static final long serialVersionUID = 8113403994527678088L; + // booleans that indicate whether the corresponding variable is null + protected boolean nullLifeTime; + protected boolean nullDirOption; + protected boolean nullFileSize; + + public InvalidBoLDataAttributesException(TSURL fromSURL, TLifeTimeInSeconds lifeTime, + TDirOption dirOption, TURLPrefix transferProtocols, TSizeInBytes fileSize, + TReturnStatus status, TTURL transferURL) { + + super(fromSURL, transferProtocols, status, transferURL); + init(lifeTime, dirOption, fileSize); + } + + public InvalidBoLDataAttributesException(TSURL fromSURL, TLifeTimeInSeconds lifeTime, + TDirOption dirOption, TURLPrefix transferProtocols, TSizeInBytes fileSize, + TReturnStatus status, TTURL transferURL, String message) { + + super(fromSURL, transferProtocols, status, transferURL, message); + init(lifeTime, dirOption, fileSize); + } + + public InvalidBoLDataAttributesException(TSURL fromSURL, TLifeTimeInSeconds lifeTime, + TDirOption dirOption, TURLPrefix transferProtocols, TSizeInBytes fileSize, + TReturnStatus status, TTURL transferURL, Throwable cause) { + + super(fromSURL, transferProtocols, status, transferURL, cause); + init(lifeTime, dirOption, fileSize); + } + + public InvalidBoLDataAttributesException(TSURL fromSURL, TLifeTimeInSeconds lifeTime, + TDirOption dirOption, TURLPrefix transferProtocols, TSizeInBytes fileSize, + TReturnStatus status, TTURL transferURL, String message, Throwable cause) { + + super(fromSURL, transferProtocols, status, transferURL, message, cause); + init(lifeTime, dirOption, fileSize); + } + + private void init(TLifeTimeInSeconds lifeTime, TDirOption dirOption, TSizeInBytes fileSize) { + + nullLifeTime = lifeTime == null; + nullDirOption = dirOption == null; + nullFileSize = fileSize == null; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + + StringBuilder builder = new StringBuilder(); + builder.append("InvalidBoLDataAttributesException [nullLifeTime="); + builder.append(nullLifeTime); + builder.append(", nullDirOption="); + builder.append(nullDirOption); + builder.append(", nullFileSize="); + builder.append(nullFileSize); + builder.append(", nullSURL="); + builder.append(nullSURL); + builder.append(", nullTransferProtocols="); + builder.append(nullTransferProtocols); + builder.append(", nullStatus="); + builder.append(nullStatus); + builder.append(", nullTransferURL="); + builder.append(nullTransferURL); + builder.append("]"); + return builder.toString(); + } +} diff --git a/src/main/java/it/grid/storm/persistence/exceptions/InvalidBoLPersistentChunkDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidBoLPersistentChunkDataAttributesException.java new file mode 100644 index 000000000..e37f5bac6 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidBoLPersistentChunkDataAttributesException.java @@ -0,0 +1,82 @@ +/* + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by + * applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS + * OF ANY KIND, either express or implied. See the License for the specific + * language governing permissions and limitations under the License. + */ + +package it.grid.storm.persistence.exceptions; + +import it.grid.storm.srm.types.TDirOption; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TTURL; + +/** + * This class represents an exceptin thrown when the attributes supplied to the constructor of + * PtPChunkData are invalid, that is if any of the following is _null_: requestToken, toSURL, + * lifetime, fileStorageType, spaceToken, knownSizeOfThisFile, TURLPrefix transferProtocols, + * overwriteOption, fileSize, status, transferURL. + * + * @author EGRID - ICTP Trieste + * @date June, 2005 + * @version 2.0 + */ +public class InvalidBoLPersistentChunkDataAttributesException + extends InvalidBoLDataAttributesException { + + private static final long serialVersionUID = -5117535717125685975L; + /** + * booleans that indicate whether the corresponding variable is null + */ + boolean nullRequestToken; + + /** + * Constructor that requires the attributes that caused the exception to be thrown. + */ + public InvalidBoLPersistentChunkDataAttributesException(TRequestToken requestToken, + TSURL fromSURL, TLifeTimeInSeconds lifeTime, TDirOption dirOption, + TURLPrefix desiredProtocols, TSizeInBytes fileSize, TReturnStatus status, TTURL transferURL) { + + super(fromSURL, lifeTime, dirOption, desiredProtocols, fileSize, status, transferURL); + nullRequestToken = requestToken == null; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + + StringBuilder builder = new StringBuilder(); + builder.append("InvalidBoLPersistentChunkDataAttributesException [nullRequestToken="); + builder.append(nullRequestToken); + builder.append(", nullLifeTime="); + builder.append(nullLifeTime); + builder.append(", nullDirOption="); + builder.append(nullDirOption); + builder.append(", nullFileSize="); + builder.append(nullFileSize); + builder.append(", nullSURL="); + builder.append(nullSURL); + builder.append(", nullTransferProtocols="); + builder.append(nullTransferProtocols); + builder.append(", nullStatus="); + builder.append(nullStatus); + builder.append(", nullTransferURL="); + builder.append(nullTransferURL); + builder.append("]"); + return builder.toString(); + } + +} diff --git a/src/main/java/it/grid/storm/persistence/exceptions/InvalidFileTransferDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidFileTransferDataAttributesException.java new file mode 100644 index 000000000..fedb042b5 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidFileTransferDataAttributesException.java @@ -0,0 +1,83 @@ +/* + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by + * applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS + * OF ANY KIND, either express or implied. See the License for the specific + * language governing permissions and limitations under the License. + */ + +package it.grid.storm.persistence.exceptions; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TTURL; + +/** + * @author Michele Dibenedetto + */ +public class InvalidFileTransferDataAttributesException + extends InvalidSurlRequestDataAttributesException { + + private static final long serialVersionUID = 4416318501544415810L; + protected boolean nullTransferProtocols; + protected boolean nullTransferURL; + + public InvalidFileTransferDataAttributesException(TSURL SURL, TURLPrefix transferProtocols, + TReturnStatus status, TTURL transferURL) { + + super(SURL, status); + init(transferProtocols, transferURL); + } + + public InvalidFileTransferDataAttributesException(TSURL SURL, TURLPrefix transferProtocols, + TReturnStatus status, TTURL transferURL, String message) { + + super(SURL, status, message); + init(transferProtocols, transferURL); + } + + public InvalidFileTransferDataAttributesException(TSURL SURL, TURLPrefix transferProtocols, + TReturnStatus status, TTURL transferURL, Throwable cause) { + + super(SURL, status, cause); + init(transferProtocols, transferURL); + } + + public InvalidFileTransferDataAttributesException(TSURL SURL, TURLPrefix transferProtocols, + TReturnStatus status, TTURL transferURL, String message, Throwable cause) { + + super(SURL, status, message, cause); + init(transferProtocols, transferURL); + } + + private void init(TURLPrefix transferProtocols, TTURL transferURL) { + + nullTransferProtocols = transferProtocols == null; + nullTransferURL = transferURL == null; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + + StringBuilder builder = new StringBuilder(); + builder.append("InvalidFileTransferDataAttributesException [nullTransferProtocols="); + builder.append(nullTransferProtocols); + builder.append(", nullTransferURL="); + builder.append(nullTransferURL); + builder.append(", nullSURL="); + builder.append(nullSURL); + builder.append(", nullStatus="); + builder.append(nullStatus); + builder.append("]"); + return builder.toString(); + } +} diff --git a/src/main/java/it/grid/storm/persistence/exceptions/InvalidPtGDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidPtGDataAttributesException.java new file mode 100644 index 000000000..1b71c9b7d --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidPtGDataAttributesException.java @@ -0,0 +1,84 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.exceptions; + +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.srm.types.TDirOption; +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TTURL; + +/** + * This class represents an exceptin thrown when the attributes supplied to the constructor of + * PtGChunkData are invalid, that is if any of the following is _null_: requestToken, fromSURL, + * lifeTime, numOfLevels, transferProtocols, fileSize, status, transferURL. + * + * @author EGRID - ICTP Trieste + * @date March 23rd, 2005 + * @version 3.0 + */ +public class InvalidPtGDataAttributesException extends InvalidFileTransferDataAttributesException { + + private static final long serialVersionUID = -3484929474636108262L; + // booleans that indicate whether the corresponding variable is null + protected boolean nullLifeTime; + protected boolean nullDirOption; + protected boolean nullFileSize; + + /** + * Constructor that requires the attributes that caused the exception to be thrown. + */ + public InvalidPtGDataAttributesException(TSURL fromSURL, TLifeTimeInSeconds lifeTime, + TDirOption dirOption, TURLPrefix transferProtocols, TSizeInBytes fileSize, + TReturnStatus status, TTURL transferURL) { + + super(fromSURL, transferProtocols, status, transferURL); + nullLifeTime = lifeTime == null; + nullDirOption = dirOption == null; + nullFileSize = fileSize == null; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + + StringBuilder builder = new StringBuilder(); + builder.append("InvalidPtGChunkDataAttributesException [nullLifeTime="); + builder.append(nullLifeTime); + builder.append(", nullDirOption="); + builder.append(nullDirOption); + builder.append(", nullFileSize="); + builder.append(nullFileSize); + builder.append(", nullSURL="); + builder.append(nullSURL); + builder.append(", nullTransferProtocols="); + builder.append(nullTransferProtocols); + builder.append(", nullStatus="); + builder.append(nullStatus); + builder.append(", nullTransferURL="); + builder.append(nullTransferURL); + builder.append("]"); + return builder.toString(); + } +} diff --git a/src/main/java/it/grid/storm/persistence/exceptions/InvalidPtGPersistentChunkDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidPtGPersistentChunkDataAttributesException.java new file mode 100644 index 000000000..666272f7f --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidPtGPersistentChunkDataAttributesException.java @@ -0,0 +1,82 @@ +/* + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by + * applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS + * OF ANY KIND, either express or implied. See the License for the specific + * language governing permissions and limitations under the License. + */ + +package it.grid.storm.persistence.exceptions; + +import it.grid.storm.srm.types.TDirOption; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TTURL; + +/** + * This class represents an exceptin thrown when the attributes supplied to the constructor of + * PtPChunkData are invalid, that is if any of the following is _null_: requestToken, toSURL, + * lifetime, fileStorageType, spaceToken, knownSizeOfThisFile, TURLPrefix transferProtocols, + * overwriteOption, fileSize, status, transferURL. + * + * @author EGRID - ICTP Trieste + * @date June, 2005 + * @version 2.0 + */ +public class InvalidPtGPersistentChunkDataAttributesException + extends InvalidPtGDataAttributesException { + + private static final long serialVersionUID = -5117535717125685975L; + /** + * booleans that indicate whether the corresponding variable is null + */ + boolean nullRequestToken; + + /** + * Constructor that requires the attributes that caused the exception to be thrown. + */ + public InvalidPtGPersistentChunkDataAttributesException(TRequestToken requestToken, + TSURL fromSURL, TLifeTimeInSeconds lifeTime, TDirOption dirOption, + TURLPrefix transferProtocols, TSizeInBytes fileSize, TReturnStatus status, + TTURL transferURL) { + + super(fromSURL, lifeTime, dirOption, transferProtocols, fileSize, status, transferURL); + nullRequestToken = requestToken == null; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + + StringBuilder builder = new StringBuilder(); + builder.append("InvalidPtGPersistentChunkDataAttributesException [nullRequestToken="); + builder.append(nullRequestToken); + builder.append(", nullLifeTime="); + builder.append(nullLifeTime); + builder.append(", nullDirOption="); + builder.append(nullDirOption); + builder.append(", nullFileSize="); + builder.append(nullFileSize); + builder.append(", nullSURL="); + builder.append(nullSURL); + builder.append(", nullTransferProtocols="); + builder.append(nullTransferProtocols); + builder.append(", nullStatus="); + builder.append(nullStatus); + builder.append(", nullTransferURL="); + builder.append(nullTransferURL); + builder.append("]"); + return builder.toString(); + } +} diff --git a/src/main/java/it/grid/storm/persistence/exceptions/InvalidPtPDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidPtPDataAttributesException.java new file mode 100644 index 000000000..4577c0582 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidPtPDataAttributesException.java @@ -0,0 +1,125 @@ +/* + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by + * applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS + * OF ANY KIND, either express or implied. See the License for the specific + * language governing permissions and limitations under the License. + */ + +package it.grid.storm.persistence.exceptions; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.srm.types.TFileStorageType; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.srm.types.TOverwriteMode; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.srm.types.TSpaceToken; +import it.grid.storm.srm.types.TTURL; + +/** + * @author Michele Dibenedetto + */ +public class InvalidPtPDataAttributesException extends InvalidFileTransferDataAttributesException { + + /** + * + */ + private static final long serialVersionUID = 1051060981188652979L; + protected boolean nullSpaceToken; + protected boolean nullPinLifetime; + protected boolean nullFileLifetime; + protected boolean nullFileStorageType; + protected boolean nullKnownSizeOfThisFile; + protected boolean nullOverwriteOption; + + public InvalidPtPDataAttributesException(TSURL toSURL, TLifeTimeInSeconds fileLifetime, + TLifeTimeInSeconds pinLifetime, TFileStorageType fileStorageType, TSpaceToken spaceToken, + TSizeInBytes knownSizeOfThisFile, TURLPrefix transferProtocols, + TOverwriteMode overwriteOption, TReturnStatus status, TTURL transferURL) { + + super(toSURL, transferProtocols, status, transferURL); + init(spaceToken, fileLifetime, pinLifetime, fileStorageType, knownSizeOfThisFile, + overwriteOption); + } + + public InvalidPtPDataAttributesException(TSURL toSURL, TLifeTimeInSeconds fileLifetime, + TLifeTimeInSeconds pinLifetime, TFileStorageType fileStorageType, TSpaceToken spaceToken, + TSizeInBytes knownSizeOfThisFile, TURLPrefix transferProtocols, + TOverwriteMode overwriteOption, TReturnStatus status, TTURL transferURL, String message) { + + super(toSURL, transferProtocols, status, transferURL, message); + init(spaceToken, fileLifetime, pinLifetime, fileStorageType, knownSizeOfThisFile, + overwriteOption); + } + + public InvalidPtPDataAttributesException(TSURL toSURL, TLifeTimeInSeconds fileLifetime, + TLifeTimeInSeconds pinLifetime, TFileStorageType fileStorageType, TSpaceToken spaceToken, + TSizeInBytes knownSizeOfThisFile, TURLPrefix transferProtocols, + TOverwriteMode overwriteOption, TReturnStatus status, TTURL transferURL, Throwable cause) { + + super(toSURL, transferProtocols, status, transferURL, cause); + init(spaceToken, fileLifetime, pinLifetime, fileStorageType, knownSizeOfThisFile, + overwriteOption); + } + + public InvalidPtPDataAttributesException(TSURL toSURL, TLifeTimeInSeconds fileLifetime, + TLifeTimeInSeconds pinLifetime, TFileStorageType fileStorageType, TSpaceToken spaceToken, + TSizeInBytes knownSizeOfThisFile, TURLPrefix transferProtocols, + TOverwriteMode overwriteOption, TReturnStatus status, TTURL transferURL, String message, + Throwable cause) { + + super(toSURL, transferProtocols, status, transferURL, message, cause); + init(spaceToken, fileLifetime, pinLifetime, fileStorageType, knownSizeOfThisFile, + overwriteOption); + } + + private void init(TSpaceToken spaceToken, TLifeTimeInSeconds fileLifetime, + TLifeTimeInSeconds pinLifetime, TFileStorageType fileStorageType, + TSizeInBytes knownSizeOfThisFile, TOverwriteMode overwriteOption) { + + nullSpaceToken = spaceToken == null; + nullPinLifetime = pinLifetime == null; + nullFileLifetime = fileLifetime == null; + nullFileStorageType = fileStorageType == null; + nullKnownSizeOfThisFile = knownSizeOfThisFile == null; + nullOverwriteOption = overwriteOption == null; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + + StringBuilder builder = new StringBuilder(); + builder.append("InvalidPtPDataAttributesException [nullSpaceToken="); + builder.append(nullSpaceToken); + builder.append(", nullPinLifetime="); + builder.append(nullPinLifetime); + builder.append(", nullFileLifetime="); + builder.append(nullFileLifetime); + builder.append(", nullFileStorageType="); + builder.append(nullFileStorageType); + builder.append(", nullKnownSizeOfThisFile="); + builder.append(nullKnownSizeOfThisFile); + builder.append(", nullOverwriteOption="); + builder.append(nullOverwriteOption); + builder.append(", nullSURL="); + builder.append(nullSURL); + builder.append(", nullTransferProtocols="); + builder.append(nullTransferProtocols); + builder.append(", nullStatus="); + builder.append(nullStatus); + builder.append(", nullTransferURL="); + builder.append(nullTransferURL); + builder.append("]"); + return builder.toString(); + } +} diff --git a/src/main/java/it/grid/storm/persistence/exceptions/InvalidPtPPersistentChunkDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidPtPPersistentChunkDataAttributesException.java new file mode 100644 index 000000000..f98418d59 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidPtPPersistentChunkDataAttributesException.java @@ -0,0 +1,92 @@ +/* + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by + * applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS + * OF ANY KIND, either express or implied. See the License for the specific + * language governing permissions and limitations under the License. + */ + +package it.grid.storm.persistence.exceptions; + +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.srm.types.TFileStorageType; +import it.grid.storm.srm.types.TSpaceToken; +import it.grid.storm.srm.types.TOverwriteMode; +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TTURL; + +/** + * This class represents an exceptin thrown when the attributes supplied to the constructor of + * PtPChunkData are invalid, that is if any of the following is _null_: requestToken, toSURL, + * lifetime, fileStorageType, spaceToken, knownSizeOfThisFile, TURLPrefix transferProtocols, + * overwriteOption, fileSize, status, transferURL. + * + * @author EGRID - ICTP Trieste + * @date June, 2005 + * @version 2.0 + */ +public class InvalidPtPPersistentChunkDataAttributesException + extends InvalidPtPDataAttributesException { + + private static final long serialVersionUID = -5117535717125685975L; + /** + * booleans that indicate whether the corresponding variable is null + */ + boolean nullRequestToken; + + /** + * Constructor that requires the attributes that caused the exception to be thrown. + */ + public InvalidPtPPersistentChunkDataAttributesException(TRequestToken requestToken, TSURL toSURL, + TLifeTimeInSeconds fileLifetime, TLifeTimeInSeconds pinLifetime, + TFileStorageType fileStorageType, TSpaceToken spaceToken, TSizeInBytes knownSizeOfThisFile, + TURLPrefix transferProtocols, TOverwriteMode overwriteOption, TReturnStatus status, + TTURL transferURL) { + + super(toSURL, fileLifetime, pinLifetime, fileStorageType, spaceToken, knownSizeOfThisFile, + transferProtocols, overwriteOption, status, transferURL); + nullRequestToken = requestToken == null; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + + StringBuilder builder = new StringBuilder(); + builder.append("InvalidPtPPersistentChunkDataAttributesException [nullRequestToken="); + builder.append(nullRequestToken); + builder.append(", nullSpaceToken="); + builder.append(nullSpaceToken); + builder.append(", nullPinLifetime="); + builder.append(nullPinLifetime); + builder.append(", nullFileLifetime="); + builder.append(nullFileLifetime); + builder.append(", nullFileStorageType="); + builder.append(nullFileStorageType); + builder.append(", nullKnownSizeOfThisFile="); + builder.append(nullKnownSizeOfThisFile); + builder.append(", nullOverwriteOption="); + builder.append(nullOverwriteOption); + builder.append(", nullSURL="); + builder.append(nullSURL); + builder.append(", nullTransferProtocols="); + builder.append(nullTransferProtocols); + builder.append(", nullStatus="); + builder.append(nullStatus); + builder.append(", nullTransferURL="); + builder.append(nullTransferURL); + builder.append("]"); + return builder.toString(); + } +} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidReducedBoLChunkDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidReducedBoLChunkDataAttributesException.java similarity index 51% rename from src/main/java/it/grid/storm/catalogs/InvalidReducedBoLChunkDataAttributesException.java rename to src/main/java/it/grid/storm/persistence/exceptions/InvalidReducedBoLChunkDataAttributesException.java index ebcce1ef8..f78d76977 100644 --- a/src/main/java/it/grid/storm/catalogs/InvalidReducedBoLChunkDataAttributesException.java +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidReducedBoLChunkDataAttributesException.java @@ -15,14 +15,14 @@ * the License. */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.exceptions; import it.grid.storm.srm.types.TSURL; import it.grid.storm.srm.types.TReturnStatus; /** - * This class represents an exceptin thrown when the attributes supplied to the - * constructor of ReducedBoLChunkData are invalid, that is if any is _null_. + * This class represents an exceptin thrown when the attributes supplied to the constructor of + * ReducedBoLChunkData are invalid, that is if any is _null_. * * @author EGRID - ICTP Trieste * @date November, 2006 @@ -30,32 +30,30 @@ */ public class InvalidReducedBoLChunkDataAttributesException extends Exception { - private static final long serialVersionUID = -8145580437017768234L; - - // booleans that indicate whether the corresponding variable is null - private boolean nullFromSURL; - private boolean nullStatus; - - /** - * Constructor that requires the attributes that caused the exception to be - * thrown. - */ - public InvalidReducedBoLChunkDataAttributesException(TSURL fromSURL, - TReturnStatus status) { - - nullFromSURL = fromSURL == null; - nullStatus = status == null; - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("Invalid BoLChunkData attributes: null-fromSURL="); - sb.append(nullFromSURL); - sb.append("; null-status="); - sb.append(nullStatus); - sb.append("."); - return sb.toString(); - } + private static final long serialVersionUID = -8145580437017768234L; + + // booleans that indicate whether the corresponding variable is null + private boolean nullFromSURL; + private boolean nullStatus; + + /** + * Constructor that requires the attributes that caused the exception to be thrown. + */ + public InvalidReducedBoLChunkDataAttributesException(TSURL fromSURL, TReturnStatus status) { + + nullFromSURL = fromSURL == null; + nullStatus = status == null; + } + + @Override + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append("Invalid BoLChunkData attributes: null-fromSURL="); + sb.append(nullFromSURL); + sb.append("; null-status="); + sb.append(nullStatus); + sb.append("."); + return sb.toString(); + } } diff --git a/src/main/java/it/grid/storm/catalogs/InvalidReducedPtGChunkDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidReducedPtGChunkDataAttributesException.java similarity index 51% rename from src/main/java/it/grid/storm/catalogs/InvalidReducedPtGChunkDataAttributesException.java rename to src/main/java/it/grid/storm/persistence/exceptions/InvalidReducedPtGChunkDataAttributesException.java index 7a21f0e3f..8dccfff08 100644 --- a/src/main/java/it/grid/storm/catalogs/InvalidReducedPtGChunkDataAttributesException.java +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidReducedPtGChunkDataAttributesException.java @@ -15,14 +15,14 @@ * the License. */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.exceptions; import it.grid.storm.srm.types.TSURL; import it.grid.storm.srm.types.TReturnStatus; /** - * This class represents an exceptin thrown when the attributes supplied to the - * constructor of ReducedPtGChunkData are invalid, that is if any is _null_. + * This class represents an exceptin thrown when the attributes supplied to the constructor of + * ReducedPtGChunkData are invalid, that is if any is _null_. * * @author EGRID - ICTP Trieste * @date November, 2006 @@ -30,32 +30,30 @@ */ public class InvalidReducedPtGChunkDataAttributesException extends Exception { - private static final long serialVersionUID = -7943458526292568164L; - - // booleans that indicate whether the corresponding variable is null - private boolean nullFromSURL; - private boolean nullStatus; - - /** - * Constructor that requires the attributes that caused the exception to be - * thrown. - */ - public InvalidReducedPtGChunkDataAttributesException(TSURL fromSURL, - TReturnStatus status) { - - nullFromSURL = fromSURL == null; - nullStatus = status == null; - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("Invalid PtGChunkData attributes: null-fromSURL="); - sb.append(nullFromSURL); - sb.append("; null-status="); - sb.append(nullStatus); - sb.append("."); - return sb.toString(); - } + private static final long serialVersionUID = -7943458526292568164L; + + // booleans that indicate whether the corresponding variable is null + private boolean nullFromSURL; + private boolean nullStatus; + + /** + * Constructor that requires the attributes that caused the exception to be thrown. + */ + public InvalidReducedPtGChunkDataAttributesException(TSURL fromSURL, TReturnStatus status) { + + nullFromSURL = fromSURL == null; + nullStatus = status == null; + } + + @Override + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append("Invalid PtGChunkData attributes: null-fromSURL="); + sb.append(nullFromSURL); + sb.append("; null-status="); + sb.append(nullStatus); + sb.append("."); + return sb.toString(); + } } diff --git a/src/main/java/it/grid/storm/persistence/exceptions/InvalidReducedPtPChunkDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidReducedPtPChunkDataAttributesException.java new file mode 100644 index 000000000..0658d49d2 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidReducedPtPChunkDataAttributesException.java @@ -0,0 +1,70 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.exceptions; + +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TFileStorageType; +import it.grid.storm.srm.types.TLifeTimeInSeconds; + +/** + * This class represents an exceptin thrown when the attributes supplied to the constructor of + * ReducedPtPChunkData are invalid, that is if any is _null_. + * + * @author EGRID - ICTP Trieste + * @date January, 2007 + * @version 1.0 + */ +public class InvalidReducedPtPChunkDataAttributesException extends Exception { + + private static final long serialVersionUID = 4945626188325362854L; + + // booleans that indicate whether the corresponding variable is null + private boolean nullToSURL; + private boolean nullStatus; + private boolean nullFileStorageType; + private boolean nullFileLifetime; + + /** + * Constructor that requires the attributes that caused the exception to be thrown. + */ + public InvalidReducedPtPChunkDataAttributesException(TSURL toSURL, TReturnStatus status, + TFileStorageType fileStorageType, TLifeTimeInSeconds fileLifetime) { + + nullFileStorageType = fileStorageType == null; + nullToSURL = toSURL == null; + nullStatus = status == null; + nullFileLifetime = fileLifetime == null; + } + + @Override + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append("Invalid PtPChunkData attributes: null-toSURL="); + sb.append(nullToSURL); + sb.append("; null-status="); + sb.append(nullStatus); + sb.append("; null-fileStorageType="); + sb.append(nullFileStorageType); + sb.append("; null-fileLifetime="); + sb.append(nullFileLifetime); + sb.append("."); + return sb.toString(); + } +} diff --git a/src/main/java/it/grid/storm/persistence/exceptions/InvalidRequestSummaryDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidRequestSummaryDataAttributesException.java new file mode 100644 index 000000000..403e1ebb1 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidRequestSummaryDataAttributesException.java @@ -0,0 +1,65 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.exceptions; + +import it.grid.storm.srm.types.TRequestType; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.griduser.GridUserInterface; + +/** + * This class represents an Exception thrown when a RequestSummaryData object is created with any + * invalid attributes: null TRequestType, null TRequestToken, null VomsGridUser. + * + * @author EGRID - ICTP Trieste + * @date March 18th, 2005 + * @version 3.0 + */ +public class InvalidRequestSummaryDataAttributesException extends Exception { + + private static final long serialVersionUID = -7729349713696058669L; + + // booleans true if the corresponding variablesare null or negative + private boolean nullRequestType = true; + private boolean nullRequestToken = true; + private boolean nullVomsGridUser = true; + + /** + * Constructor that requires the attributes that caused the exception to be thrown. + */ + public InvalidRequestSummaryDataAttributesException(TRequestType requestType, + TRequestToken requestToken, GridUserInterface gu) { + + nullRequestType = (requestType == null); + nullRequestToken = (requestToken == null); + nullVomsGridUser = (gu == null); + } + + @Override + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append("Invalid RequestSummaryData attributes exception: "); + sb.append("nullRequestType="); + sb.append(nullRequestType); + sb.append("; nullRequestToken="); + sb.append(nullRequestToken); + sb.append("; nullVomsGridUser="); + sb.append(nullVomsGridUser); + return sb.toString(); + } +} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidSpaceDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidSpaceDataAttributesException.java similarity index 65% rename from src/main/java/it/grid/storm/catalogs/InvalidSpaceDataAttributesException.java rename to src/main/java/it/grid/storm/persistence/exceptions/InvalidSpaceDataAttributesException.java index dd0e2ff90..2b4e8990b 100644 --- a/src/main/java/it/grid/storm/catalogs/InvalidSpaceDataAttributesException.java +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidSpaceDataAttributesException.java @@ -15,14 +15,13 @@ * the License. */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.exceptions; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.srm.types.TSpaceToken; /** - * This class represents an Exception throws if SpaceResData is not well formed. - * * + * This class represents an Exception throws if SpaceResData is not well formed. * * * @author Magnoni Luca * @author Cnaf - INFN Bologna @@ -32,24 +31,24 @@ public class InvalidSpaceDataAttributesException extends Exception { - private static final long serialVersionUID = -5317879266114702669L; + private static final long serialVersionUID = -5317879266114702669L; - private boolean nullAuth = true; - private boolean nullToken = true; + private boolean nullAuth = true; + private boolean nullToken = true; - public InvalidSpaceDataAttributesException(GridUserInterface guser) { + public InvalidSpaceDataAttributesException(GridUserInterface guser) { - nullAuth = (guser == null); - } + nullAuth = (guser == null); + } - public InvalidSpaceDataAttributesException(TSpaceToken token) { + public InvalidSpaceDataAttributesException(TSpaceToken token) { - nullToken = (token == null); - } + nullToken = (token == null); + } - public String toString() { + public String toString() { - return "null-Auth=" + nullAuth + "nullToken=" + nullToken; - } + return "null-Auth=" + nullAuth + "nullToken=" + nullToken; + } } diff --git a/src/main/java/it/grid/storm/persistence/exceptions/InvalidSurlRequestDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidSurlRequestDataAttributesException.java new file mode 100644 index 000000000..582f3c396 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidSurlRequestDataAttributesException.java @@ -0,0 +1,83 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.exceptions; + +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; + +/** + * @author Michele Dibenedetto + * + */ +public class InvalidSurlRequestDataAttributesException extends Exception { + + private static final long serialVersionUID = -8636768167720753989L; + protected boolean nullSURL; + protected boolean nullStatus; + + public InvalidSurlRequestDataAttributesException(TSURL SURL, TReturnStatus status) { + + super(); + init(SURL, status); + } + + public InvalidSurlRequestDataAttributesException(TSURL SURL, TReturnStatus status, + String message) { + + super(message); + init(SURL, status); + } + + public InvalidSurlRequestDataAttributesException(TSURL SURL, TReturnStatus status, + Throwable cause) { + + super(cause); + init(SURL, status); + } + + public InvalidSurlRequestDataAttributesException(TSURL SURL, TReturnStatus status, String message, + Throwable cause) { + + super(message, cause); + init(SURL, status); + } + + private void init(TSURL SURL, TReturnStatus status) { + + nullSURL = SURL == null; + nullStatus = status == null; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + + StringBuilder builder = new StringBuilder(); + builder.append("InvalidSurlRequestDataAttributesException [nullSURL="); + builder.append(nullSURL); + builder.append(", nullStatus="); + builder.append(nullStatus); + builder.append("]"); + return builder.toString(); + } + +} diff --git a/src/main/java/it/grid/storm/catalogs/MalformedGridUserException.java b/src/main/java/it/grid/storm/persistence/exceptions/MalformedGridUserException.java similarity index 95% rename from src/main/java/it/grid/storm/catalogs/MalformedGridUserException.java rename to src/main/java/it/grid/storm/persistence/exceptions/MalformedGridUserException.java index f4b61055e..1f5fc2bb1 100644 --- a/src/main/java/it/grid/storm/catalogs/MalformedGridUserException.java +++ b/src/main/java/it/grid/storm/persistence/exceptions/MalformedGridUserException.java @@ -15,7 +15,7 @@ * the License. */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.exceptions; /** * This class represents an Exception thrown when the RequestSummaryCatalog cannot create a diff --git a/src/main/java/it/grid/storm/persistence/exceptions/PersistenceException.java b/src/main/java/it/grid/storm/persistence/exceptions/PersistenceException.java deleted file mode 100644 index 8c645afba..000000000 --- a/src/main/java/it/grid/storm/persistence/exceptions/PersistenceException.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.exceptions; - -/** - * This exception is used to mark generic failures in persistence layer - * - */ - -public class PersistenceException extends Exception { - - public PersistenceException() { - - super(); - } - - public PersistenceException(String message) { - - super(message); - } - - public PersistenceException(String message, Throwable cause) { - - super(message, cause); - } - - public PersistenceException(Throwable cause) { - - super(cause); - } - -} diff --git a/src/main/java/it/grid/storm/persistence/impl/mysql/BoLChunkDAOMySql.java b/src/main/java/it/grid/storm/persistence/impl/mysql/BoLChunkDAOMySql.java new file mode 100644 index 000000000..db9edaef8 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/impl/mysql/BoLChunkDAOMySql.java @@ -0,0 +1,948 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.impl.mysql; + +import static it.grid.storm.srm.types.TRequestType.BRING_ON_LINE; +import static it.grid.storm.srm.types.TStatusCode.SRM_ABORTED; +import static it.grid.storm.srm.types.TStatusCode.SRM_RELEASED; +import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_INPROGRESS; +import static it.grid.storm.srm.types.TStatusCode.SRM_SUCCESS; +import static java.sql.Statement.RETURN_GENERATED_KEYS; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Timestamp; +import java.util.Collection; +import java.util.Date; +import java.util.Iterator; +import java.util.List; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Lists; + +import it.grid.storm.namespace.NamespaceException; +import it.grid.storm.namespace.naming.SURL; +import it.grid.storm.persistence.converter.RequestTypeConverter; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.dao.AbstractDAO; +import it.grid.storm.persistence.dao.BoLChunkDAO; +import it.grid.storm.persistence.model.BoLChunkDataTO; +import it.grid.storm.persistence.model.ReducedBoLChunkDataTO; +import it.grid.storm.persistence.pool.StormDbConnectionPool; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TStatusCode; + +/** + * DAO class for BoLChunkCatalog. This DAO is specifically designed to connect to a MySQL DB. The + * raw data found in those tables is pre-treated in order to turn it into the Object Model of StoRM. + * See Method comments for further info. BEWARE! DAO Adjusts for extra fields in the DB that are not + * present in the object model. + * + * @author CNAF + * @version 1.0 + * @date Aug 2009 + */ +public class BoLChunkDAOMySql extends AbstractDAO implements BoLChunkDAO { + + private static final Logger log = LoggerFactory.getLogger(BoLChunkDAOMySql.class); + + private static final String SELECT_FROM_REQUEST_QUEUE_WITH_TOKEN = + "SELECT rq.ID FROM request_queue rq WHERE rq.r_token=?"; + + private static final String SELECT_FULL_BOL_REQUEST_WITH_TOKEN_AND_STATUS = + "SELECT sb.statusCode, rq.timeStamp, rq.pinLifetime, rq.deferredStartTime, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, rb.sourceSURL_uniqueID, d.isSourceADirectory, d.allLevelRecursive, d.numOfLevels " + + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " + + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) " + + "LEFT JOIN request_DirOption d ON rb.request_DirOptionID=d.ID " + + "WHERE rq.r_token=? AND sb.statusCode<>?"; + + private static final String SELECT_FULL_BOL_REQUEST_WITH_TOKEN = + "SELECT sb.statusCode, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, rb.sourceSURL_uniqueID " + + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " + + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) " + "WHERE rq.r_token=?"; + + private static final String INSERT_INTO_REQUEST_QUEUE = + "INSERT INTO request_queue (config_RequestTypeID,client_dn,pinLifetime,status,errstring,r_token,nbreqfiles,timeStamp,deferredStartTime) " + + "VALUES (?,?,?,?,?,?,?,?,?)"; + + private static final String INSERT_INTO_REQUEST_TRANSFER_PROTOCOLS = + "INSERT INTO request_TransferProtocols (request_queueID,config_ProtocolsID) VALUES (?,?)"; + + private static final String INSERT_INTO_REQUEST_DIR_OPTION = + "INSERT INTO request_DirOption (isSourceADirectory,allLevelRecursive,numOfLevels) VALUES (?,?,?)"; + + private static final String INSERT_INTO_REQUEST_BOL = + "INSERT INTO request_BoL (request_DirOptionID,request_queueID,sourceSURL,normalized_sourceSURL_StFN,sourceSURL_uniqueID) " + + "VALUES (?,?,?,?,?)"; + + private static final String UPDATE_REQUEST_BOL_WHERE_ID = + "UPDATE request_BoL SET normalized_sourceSURL_StFN=?, sourceSURL_uniqueID=? " + "WHERE ID=?"; + + private static final String INSERT_INTO_STATUS_BOL = + "INSERT INTO status_BoL (request_BoLID,statusCode,explanation) VALUES (?,?,?)"; + + private static final String UPDATE_REQUEST_QUEUE_WHERE_ID = + "UPDATE request_queue rq JOIN (status_BoL sb, request_BoL rb) ON (rq.ID=rb.request_queueID AND sb.request_BoLID=rb.ID) " + + "SET sb.fileSize=?, sb.statusCode=?, sb.explanation=?, rq.pinLifetime=?, rb.normalized_sourceSURL_StFN=?, rb.sourceSURL_uniqueID=? " + + "WHERE rb.ID=?"; + + private static final String SELECT_REQUEST_PROTOCOLS_WHERE_TOKEN = "SELECT tp.config_ProtocolsID " + + "FROM request_TransferProtocols tp JOIN request_queue rq ON tp.request_queueID=rq.ID " + + "WHERE rq.r_token=?"; + + private static final String UPDATE_STATUS_WHERE_ID = + "UPDATE status_BoL SET statusCode=?, explanation=? WHERE request_BoLID=?"; + + private static final String UPDATE_STATUS_FOR_EXPIRED_PIN_REQUESTS_WITH_STATUS = + "UPDATE status_BoL sb " + + "JOIN (request_BoL rb, request_queue rq) ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID " + + "SET sb.statusCode=? " + + "WHERE sb.statusCode=? AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime "; + + private static BoLChunkDAOMySql instance; + + public static synchronized BoLChunkDAO getInstance() { + if (instance == null) { + instance = new BoLChunkDAOMySql(); + } + return instance; + } + + private final StatusCodeConverter statusCodeConverter; + private final RequestTypeConverter requestTypeConverter; + + private BoLChunkDAOMySql() { + super(StormDbConnectionPool.getInstance()); + statusCodeConverter = StatusCodeConverter.getInstance(); + requestTypeConverter = RequestTypeConverter.getInstance(); + } + + /** + * Method used to add a new record to the DB: the supplied BoLChunkDataTO gets its primaryKey + * changed to the one assigned by the DB. The supplied BoLChunkData is used to fill in only the DB + * table where file specific info gets recorded: it does _not_ add a new request! So if spurious + * data is supplied, it will just stay there because of a lack of a parent request! + */ + public synchronized void addChild(BoLChunkDataTO to) { + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + con = getManagedConnection(); + + /* + * WARNING!!!! We are forced to run a query to get the ID of the request, which should NOT be + * so because the corresponding request object should have been changed with the extra field! + * However, it is not possible at the moment to perform such change because of strict deadline + * and the change could wreak havoc the code. So we are forced to make this query!!! + */ + + ps = con.prepareStatement(SELECT_FROM_REQUEST_QUEUE_WITH_TOKEN); + ps.setString(1, to.getRequestToken()); + log.debug("BoL CHUNK DAO: addChild; {}", ps); + res = ps.executeQuery(); + + /* ID of request in request_process! */ + int requestId = extractID(res); + int id = fillBoLTables(con, to, requestId); + + // end transaction! + con.commit(); + + // update primary key reading the generated key + to.setPrimaryKey(id); + } catch (Exception e) { + e.printStackTrace(); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + } + + /** + * Method used to add a new record to the DB: the supplied BoLChunkDataTO gets its primaryKey + * changed to the one assigned by the DB. The client_dn must also be supplied as a String. The + * supplied BoLChunkData is used to fill in all the DB tables where file specific info gets + * recorded: it _adds_ a new request! + */ + public synchronized void addNew(BoLChunkDataTO to, String client_dn) { + + final String DESCRIPTION = "New BoL Request resulting from srmCopy invocation."; + + /* Result set containing the ID of the inserted new request */ + ResultSet rs = null; + PreparedStatement addReqQ = null; + PreparedStatement addReqTP = null; + Connection con = null; + + try { + // begin transaction + + con = getManagedConnection(); + + // add to request_queue... + addReqQ = con.prepareStatement(INSERT_INTO_REQUEST_QUEUE, RETURN_GENERATED_KEYS); + /* request type set to bring online */ + addReqQ.setString(1, requestTypeConverter.toDB(BRING_ON_LINE)); + addReqQ.setString(2, client_dn); + addReqQ.setInt(3, to.getLifeTime()); + addReqQ.setInt(4, statusCodeConverter.toDB(SRM_REQUEST_INPROGRESS)); + addReqQ.setString(5, DESCRIPTION); + addReqQ.setString(6, to.getRequestToken()); + addReqQ.setInt(7, 1); // number of requested files set to 1! + addReqQ.setTimestamp(8, new Timestamp(new Date().getTime())); + addReqQ.setInt(9, to.getDeferredStartTime()); + log.trace("BoL CHUNK DAO: addNew; {}", addReqQ); + addReqQ.execute(); + + rs = addReqQ.getGeneratedKeys(); + int id_new = extractID(rs); + + addReqTP = con.prepareStatement(INSERT_INTO_REQUEST_TRANSFER_PROTOCOLS); + for (Iterator i = to.getProtocolList().iterator(); i.hasNext();) { + addReqTP.setInt(1, id_new); + addReqTP.setString(2, i.next()); + log.trace("BoL CHUNK DAO: addNew; {}", addReqTP); + addReqTP.execute(); + } + + // addChild... + int id_s = fillBoLTables(con, to, id_new); + + // end transaction! + con.commit(); + + // update primary key reading the generated key + to.setPrimaryKey(id_s); + } catch (Exception e) { + log.error("BoL CHUNK DAO: unable to complete addNew! BoLChunkDataTO: {}; " + + "exception received: {}", to, e.getMessage(), e); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeResultSet(rs); + closeStatement(addReqQ); + closeStatement(addReqTP); + closeConnection(con); + } + } + + /** + * To be used inside a transaction + * + * @param to + * @param requestQueueID + * @return + * @throws SQLException + * @throws Exception + */ + private synchronized int fillBoLTables(Connection con, BoLChunkDataTO to, int requestQueueID) + throws SQLException, Exception { + + /* Result set containing the ID of the inserted */ + ResultSet rs_do = null; + /* Result set containing the ID of the inserted */ + ResultSet rs_b = null; + /* Result set containing the ID of the inserted */ + ResultSet rs_s = null; + /* insert TDirOption for request */ + PreparedStatement addDirOption = null; + /* insert request_Bol for request */ + PreparedStatement addBoL = null; + PreparedStatement addChild = null; + + try { + // first fill in TDirOption + addDirOption = con.prepareStatement(INSERT_INTO_REQUEST_DIR_OPTION, RETURN_GENERATED_KEYS); + addDirOption.setBoolean(1, to.getDirOption()); + addDirOption.setBoolean(2, to.getAllLevelRecursive()); + addDirOption.setInt(3, to.getNumLevel()); + log.trace("BoL CHUNK DAO: addNew; {}", addDirOption); + addDirOption.execute(); + + rs_do = addDirOption.getGeneratedKeys(); + int id_do = extractID(rs_do); + + // second fill in request_BoL... sourceSURL and TDirOption! + addBoL = con.prepareStatement(INSERT_INTO_REQUEST_BOL, RETURN_GENERATED_KEYS); + addBoL.setInt(1, id_do); + addBoL.setInt(2, requestQueueID); + addBoL.setString(3, to.getFromSURL()); + addBoL.setString(4, to.normalizedStFN()); + addBoL.setInt(5, to.sulrUniqueID()); + log.trace("BoL CHUNK DAO: addNew; {}", addBoL); + addBoL.execute(); + + rs_b = addBoL.getGeneratedKeys(); + int id_g = extractID(rs_b); + + // third fill in status_BoL... + addChild = con.prepareStatement(INSERT_INTO_STATUS_BOL, RETURN_GENERATED_KEYS); + addChild.setInt(1, id_g); + addChild.setInt(2, to.getStatus()); + addChild.setString(3, to.getErrString()); + log.trace("BoL CHUNK DAO: addNew; " + addChild); + addChild.execute(); + + return id_g; + } finally { + closeResultSet(rs_do); + closeResultSet(rs_b); + closeResultSet(rs_s); + closeStatement(addDirOption); + closeStatement(addBoL); + closeStatement(addChild); + } + } + + /** + * Method used to save the changes made to a retrieved BoLChunkDataTO, back into the MySQL DB. + * Only the fileSize, statusCode and explanation, of status_BoL table are written to the DB. + * Likewise for the request pinLifetime. In case of any error, an error message gets logged but no + * exception is thrown. + */ + public synchronized void update(BoLChunkDataTO to) { + + Connection con = null; + PreparedStatement updateFileReq = null; + try { + con = getConnection(); + // ready updateFileReq... + updateFileReq = con.prepareStatement(UPDATE_REQUEST_QUEUE_WHERE_ID); + updateFileReq.setLong(1, to.getFileSize()); + updateFileReq.setInt(2, to.getStatus()); + updateFileReq.setString(3, to.getErrString()); + updateFileReq.setInt(4, to.getLifeTime()); + updateFileReq.setString(5, to.normalizedStFN()); + updateFileReq.setInt(6, to.sulrUniqueID()); + updateFileReq.setLong(7, to.getPrimaryKey()); + // execute update + log.trace("BoL CHUNK DAO: update method; {}", updateFileReq); + updateFileReq.executeUpdate(); + } catch (SQLException e) { + log.error("BoL CHUNK DAO: Unable to complete update! {}", e.getMessage(), e); + } finally { + closeStatement(updateFileReq); + closeConnection(con); + } + } + + /** + * Updates the request_Bol represented by the received ReducedBoLChunkDataTO by setting its + * normalized_sourceSURL_StFN and sourceSURL_uniqueID + * + * @param chunkTO + */ + public synchronized void updateIncomplete(ReducedBoLChunkDataTO chunkTO) { + + Connection con = null; + PreparedStatement ps = null; + try { + con = getConnection(); + ps = con.prepareStatement(UPDATE_REQUEST_BOL_WHERE_ID); + ps.setString(1, chunkTO.normalizedStFN()); + ps.setInt(2, chunkTO.surlUniqueID()); + ps.setLong(3, chunkTO.primaryKey()); + log.trace("BoL CHUNK DAO - update incomplete: {}", ps); + ps.executeUpdate(); + } catch (SQLException e) { + log.error("BoL CHUNK DAO: Unable to complete update incomplete! {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(ps); + closeConnection(con); + } + } + + /** + * Method that queries the MySQL DB to find all entries matching the supplied TRequestToken. The + * Collection contains the corresponding BoLChunkDataTO objects. An initial simple query + * establishes the list of protocols associated with the request. A second complex query + * establishes all chunks associated with the request, by properly joining request_queue, + * request_BoL, status_BoL and request_DirOption. The considered fields are: (1) From status_BoL: + * the ID field which becomes the TOs primary key, and statusCode. (2) From request_BoL: + * sourceSURL (3) From request_queue: pinLifetime (4) From request_DirOption: isSourceADirectory, + * alLevelRecursive, numOfLevels In case of any error, a log gets written and an empty collection + * is returned. No exception is thrown. NOTE! Chunks in SRM_ABORTED status are NOT returned! + */ + public synchronized Collection find(TRequestToken requestToken) { + + Connection con = null; + PreparedStatement pps = null; + PreparedStatement rps = null; + ResultSet prs = null; + ResultSet rrs = null; + + try { + + con = getConnection(); + pps = con.prepareStatement(SELECT_REQUEST_PROTOCOLS_WHERE_TOKEN); + + List protocols = Lists.newArrayList(); + pps.setString(1, requestToken.getValue()); + log.trace("BoL CHUNK DAO: find method; {}", pps); + prs = pps.executeQuery(); + + while (prs.next()) { + protocols.add(prs.getString("tp.config_ProtocolsID")); + } + + rps = con.prepareStatement(SELECT_FULL_BOL_REQUEST_WITH_TOKEN_AND_STATUS); + List results = Lists.newArrayList(); + rps.setString(1, requestToken.getValue()); + rps.setInt(2, statusCodeConverter.toDB(SRM_ABORTED)); + log.trace("BoL CHUNK DAO: find method; {}", rps); + rrs = rps.executeQuery(); + + while (rrs.next()) { + + BoLChunkDataTO chunkDataTO = new BoLChunkDataTO(); + chunkDataTO.setStatus(rrs.getInt("sb.statusCode")); + chunkDataTO.setLifeTime(rrs.getInt("rq.pinLifetime")); + chunkDataTO.setDeferredStartTime(rrs.getInt("rq.deferredStartTime")); + chunkDataTO.setRequestToken(requestToken.getValue()); + chunkDataTO.setTimeStamp(rrs.getTimestamp("rq.timeStamp")); + chunkDataTO.setPrimaryKey(rrs.getLong("rb.ID")); + chunkDataTO.setFromSURL(rrs.getString("rb.sourceSURL")); + chunkDataTO.setNormalizedStFN(rrs.getString("rb.normalized_sourceSURL_StFN")); + + int uniqueID = rrs.getInt("rb.sourceSURL_uniqueID"); + if (!rrs.wasNull()) { + chunkDataTO.setSurlUniqueID(Integer.valueOf(uniqueID)); + } + + chunkDataTO.setDirOption(rrs.getBoolean("d.isSourceADirectory")); + chunkDataTO.setAllLevelRecursive(rrs.getBoolean("d.allLevelRecursive")); + chunkDataTO.setNumLevel(rrs.getInt("d.numOfLevels")); + chunkDataTO.setProtocolList(protocols); + results.add(chunkDataTO); + } + return results; + + } catch (SQLException e) { + + log.error("BOL CHUNK DAO: {}", e.getMessage(), e); + e.printStackTrace(); + return Lists.newArrayList(); + + } finally { + closeResultSet(prs); + closeResultSet(rrs); + closeStatement(pps); + closeStatement(rps); + closeConnection(con); + } + } + + /** + * Method that returns a Collection of ReducedBoLChunkDataTO associated to the given TRequestToken + * expressed as String. + */ + public synchronized Collection findReduced(TRequestToken requestToken) { + + Connection con = null; + PreparedStatement ps = null; + ResultSet rs = null; + + List results = Lists.newArrayList(); + + try { + + con = getConnection(); + + ps = con.prepareStatement(SELECT_FULL_BOL_REQUEST_WITH_TOKEN); + ps.setString(1, requestToken.getValue()); + log.trace("BoL CHUNK DAO! findReduced with request token; {}", ps); + rs = ps.executeQuery(); + + ReducedBoLChunkDataTO chunkDataTO = null; + while (rs.next()) { + chunkDataTO = new ReducedBoLChunkDataTO(); + chunkDataTO.setStatus(rs.getInt("sb.statusCode")); + chunkDataTO.setPrimaryKey(rs.getLong("rb.ID")); + chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL")); + chunkDataTO.setNormalizedStFN(rs.getString("rb.normalized_sourceSURL_StFN")); + int uniqueID = rs.getInt("rb.sourceSURL_uniqueID"); + if (!rs.wasNull()) { + chunkDataTO.setSurlUniqueID(uniqueID); + } + results.add(chunkDataTO); + } + return results; + + } catch (SQLException e) { + + log.error("BOL CHUNK DAO: {}", e.getMessage(), e); + e.printStackTrace(); + return results; + + } finally { + closeResultSet(rs); + closeStatement(ps); + closeConnection(con); + } + } + + /** + * Method that returns a Collection of ReducedBoLChunkDataTO associated to the given griduser, and + * whose SURLs are contained in the supplied array of Strings. + */ + public synchronized Collection findReduced(TRequestToken requestToken, + int[] surlUniqueIDs, String[] surls) { + + Connection con = null; + PreparedStatement find = null; + ResultSet rs = null; + Collection results = Lists.newArrayList(); + + try { + + con = getConnection(); + + /* + * NOTE: we search also on the fromSurl because otherwise we lost all request_Bol that have + * not the uniqueID set because are not yet been used by anybody + */ + // get reduced chunks + String str = + "SELECT sb.statusCode, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, rb.sourceSURL_uniqueID " + + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " + + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) " + + "WHERE rq.r_token=? AND ( rb.sourceSURL_uniqueID IN " + + makeSURLUniqueIDWhere(surlUniqueIDs) + " AND rb.sourceSURL IN " + + makeSurlString(surls) + " ) "; + find = con.prepareStatement(str); + find.setString(1, requestToken.getValue()); + + log.trace("BoL CHUNK DAO! findReduced with griduser+surlarray; {}", find); + rs = find.executeQuery(); + + ReducedBoLChunkDataTO chunkDataTO = null; + while (rs.next()) { + chunkDataTO = new ReducedBoLChunkDataTO(); + chunkDataTO.setStatus(rs.getInt("sb.statusCode")); + chunkDataTO.setPrimaryKey(rs.getLong("rb.ID")); + chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL")); + chunkDataTO.setNormalizedStFN(rs.getString("rb.normalized_sourceSURL_StFN")); + int uniqueID = rs.getInt("rb.sourceSURL_uniqueID"); + if (!rs.wasNull()) { + chunkDataTO.setSurlUniqueID(uniqueID); + } + results.add(chunkDataTO); + } + } catch (SQLException e) { + log.error("BoL CHUNK DAO: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(find); + closeConnection(con); + } + return results; + } + + /** + * Method that returns a Collection of ReducedBoLChunkDataTO associated to the given griduser, and + * whose SURLs are contained in the supplied array of Strings. + */ + public synchronized Collection findReduced(String griduser, + int[] surlUniqueIDs, String[] surls) { + + Connection con = null; + PreparedStatement find = null; + ResultSet rs = null; + + Collection results = Lists.newArrayList(); + + try { + + con = getConnection(); + + /* + * NOTE: we search also on the fromSurl because otherwise we lost all request_Bol that have + * not the uniqueID set because are not yet been used by anybody + */ + // get reduced chunks + String str = + "SELECT sb.statusCode, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, rb.sourceSURL_uniqueID " + + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " + + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) " + + "WHERE rq.client_dn=? AND ( rb.sourceSURL_uniqueID IN " + + makeSURLUniqueIDWhere(surlUniqueIDs) + " AND rb.sourceSURL IN " + + makeSurlString(surls) + " ) "; + find = con.prepareStatement(str); + find.setString(1, griduser); + log.trace("BoL CHUNK DAO! findReduced with griduser+surlarray; {}", find); + rs = find.executeQuery(); + + while (rs.next()) { + ReducedBoLChunkDataTO chunkDataTO = new ReducedBoLChunkDataTO(); + chunkDataTO.setStatus(rs.getInt("sb.statusCode")); + chunkDataTO.setPrimaryKey(rs.getLong("rb.ID")); + chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL")); + chunkDataTO.setNormalizedStFN(rs.getString("rb.normalized_sourceSURL_StFN")); + int uniqueID = rs.getInt("rb.sourceSURL_uniqueID"); + if (!rs.wasNull()) { + chunkDataTO.setSurlUniqueID(uniqueID); + } + results.add(chunkDataTO); + } + } catch (SQLException e) { + log.error("BoL CHUNK DAO: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(find); + closeConnection(con); + } + return results; + } + + public synchronized int updateStatus(BoLChunkDataTO to, TStatusCode status, String explanation) { + + Connection con = null; + PreparedStatement ps = null; + int result = 0; + + try { + con = getConnection(); + ps = con.prepareStatement(UPDATE_STATUS_WHERE_ID); + ps.setInt(1, statusCodeConverter.toDB(status)); + ps.setString(2, explanation); + ps.setLong(3, to.getPrimaryKey()); + log.trace("BoL CHUNK DAO: update status {}", ps); + result = ps.executeUpdate(); + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeStatement(ps); + closeConnection(con); + } + return result; + } + + /** + * Method that updates to SRM_RELEASED all the requests in SRM_SUCCESS status which have the + * requested pin lifetime expired. This is necessary when the client forgets to invoke + * srmReleaseFiles(). + * + * @return List of updated SURLs. + */ + public synchronized int releaseExpiredAndSuccessfulRequests() { + + Connection con = null; + PreparedStatement ps = null; + + int count = 0; + + try { + + // start transaction + con = getConnection(); + + /* Update status of all successful expired requests to SRM_RELEASED */ + ps = con.prepareStatement(UPDATE_STATUS_FOR_EXPIRED_PIN_REQUESTS_WITH_STATUS); + ps.setInt(1, statusCodeConverter.toDB(SRM_RELEASED)); + ps.setInt(2, statusCodeConverter.toDB(SRM_SUCCESS)); + log.trace("BoL CHUNK DAO - transitExpiredSRM_SUCCESS method: {}", ps); + + count = ps.executeUpdate(); + + if (count == 0) { + log.trace( + "BoLChunkDAO! No chunk of BoL request was transited from SRM_SUCCESS to SRM_RELEASED."); + } else { + log.info( + "BoLChunkDAO! {} chunks of BoL requests were transited from SRM_SUCCESS to SRM_RELEASED.", + count); + } + + } catch (SQLException e) { + + log.error("BoLChunkDAO! SQLException.", e.getMessage(), e); + e.printStackTrace(); + + } finally { + + closeStatement(ps); + closeConnection(con); + } + return count; + } + + public synchronized void updateStatusOnMatchingStatus(TRequestToken requestToken, + TStatusCode expectedStatusCode, TStatusCode newStatusCode, String explanation) { + + if (requestToken == null || requestToken.getValue().trim().isEmpty() || explanation == null) { + throw new IllegalArgumentException("Unable to perform the updateStatusOnMatchingStatus, " + + "invalid arguments: requestToken=" + requestToken + " explanation=" + explanation); + } + doUpdateStatusOnMatchingStatus(requestToken, null, null, expectedStatusCode, newStatusCode, + explanation, true, false, true); + } + + private synchronized int doUpdateStatusOnMatchingStatus(TRequestToken requestToken, + int[] surlUniqueIDs, String[] surls, TStatusCode expectedStatusCode, + TStatusCode newStatusCode, String explanation, boolean withRequestToken, boolean withSurls, + boolean withExplanation) throws IllegalArgumentException { + + if ((withRequestToken && requestToken == null) || (withExplanation && explanation == null) + || (withSurls && (surlUniqueIDs == null || surls == null))) { + throw new IllegalArgumentException("Unable to perform the doUpdateStatusOnMatchingStatus, " + + "invalid arguments: withRequestToken=" + withRequestToken + " requestToken=" + + requestToken + " withSurls=" + withSurls + " surlUniqueIDs=" + surlUniqueIDs + " surls=" + + surls + " withExplaination=" + withExplanation + " explanation=" + explanation); + } + String str = "UPDATE status_BoL sb JOIN (request_BoL rb, request_queue rq) " + + "ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID " + "SET sb.statusCode=? "; + if (withExplanation) { + str += " , " + buildExplanationSet(explanation); + } + str += " WHERE sb.statusCode=? "; + if (withRequestToken) { + str += " AND " + buildTokenWhereClause(requestToken); + } + if (withSurls) { + str += " AND " + buildSurlsWhereClause(surlUniqueIDs, surls); + } + + Connection con = null; + PreparedStatement stmt = null; + int count = 0; + + try { + con = getConnection(); + stmt = con.prepareStatement(str); + stmt.setInt(1, statusCodeConverter.toDB(newStatusCode)); + stmt.setInt(2, statusCodeConverter.toDB(expectedStatusCode)); + log.trace("BOL CHUNK DAO - updateStatusOnMatchingStatus: {}", stmt); + count = stmt.executeUpdate(); + + } catch (SQLException e) { + log.error("BOL CHUNK DAO! Unable to updated from {} to {}!", expectedStatusCode, + newStatusCode, e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + + return count; + } + + public Collection find(int[] surlsUniqueIDs, String[] surlsArray, String dn) + throws IllegalArgumentException { + + if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 || surlsArray == null + || surlsArray.length == 0 || dn == null) { + throw new IllegalArgumentException( + "Unable to perform the find, " + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs + + " surlsArray=" + surlsArray + " dn=" + dn); + } + return find(surlsUniqueIDs, surlsArray, dn, true); + } + + public Collection find(int[] surlsUniqueIDs, String[] surlsArray) + throws IllegalArgumentException { + + if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 || surlsArray == null + || surlsArray.length == 0) { + throw new IllegalArgumentException("Unable to perform the find, " + + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs + " surlsArray=" + surlsArray); + } + return find(surlsUniqueIDs, surlsArray, null, false); + } + + private synchronized Collection find(int[] surlsUniqueIDs, String[] surlsArray, + String dn, boolean withDn) throws IllegalArgumentException { + + if ((withDn && dn == null) || surlsUniqueIDs == null || surlsUniqueIDs.length == 0 + || surlsArray == null || surlsArray.length == 0) { + throw new IllegalArgumentException( + "Unable to perform the find, " + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs + + " surlsArray=" + surlsArray + " withDn=" + withDn + " dn=" + dn); + } + + Connection con = null; + PreparedStatement find = null; + ResultSet rs = null; + Collection results = Lists.newArrayList(); + + try { + + con = getConnection(); + + // get chunks of the request + String str = "SELECT rq.ID, rq.r_token, sb.statusCode, rq.timeStamp, rq.pinLifetime, " + + "rq.deferredStartTime, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, " + + "rb.sourceSURL_uniqueID, d.isSourceADirectory, d.allLevelRecursive, d.numOfLevels " + + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " + + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) " + + "LEFT JOIN request_DirOption d ON rb.request_DirOptionID=d.ID " + + "WHERE ( rb.sourceSURL_uniqueID IN " + makeSURLUniqueIDWhere(surlsUniqueIDs) + + " AND rb.sourceSURL IN " + makeSurlString(surlsArray) + " )"; + + if (withDn) { + str += " AND rq.client_dn=\'" + dn + "\'"; + } + find = con.prepareStatement(str); + + log.trace("BOL CHUNK DAO - find method: {}", find); + rs = find.executeQuery(); + + while (rs.next()) { + + BoLChunkDataTO chunkDataTO = new BoLChunkDataTO(); + chunkDataTO.setStatus(rs.getInt("sb.statusCode")); + chunkDataTO.setLifeTime(rs.getInt("rq.pinLifetime")); + chunkDataTO.setDeferredStartTime(rs.getInt("rq.deferredStartTime")); + chunkDataTO.setRequestToken(rs.getString("rq.r_token")); + chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); + chunkDataTO.setPrimaryKey(rs.getLong("rb.ID")); + chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL")); + chunkDataTO.setNormalizedStFN(rs.getString("rb.normalized_sourceSURL_StFN")); + + int uniqueID = rs.getInt("rb.sourceSURL_uniqueID"); + if (!rs.wasNull()) { + chunkDataTO.setSurlUniqueID(Integer.valueOf(uniqueID)); + } + + chunkDataTO.setDirOption(rs.getBoolean("d.isSourceADirectory")); + chunkDataTO.setAllLevelRecursive(rs.getBoolean("d.allLevelRecursive")); + chunkDataTO.setNumLevel(rs.getInt("d.numOfLevels")); + + results.add(chunkDataTO); + } + + } catch (SQLException e) { + + log.error("BOL CHUNK DAO: {}", e.getMessage(), e); + e.printStackTrace(); + + } finally { + + closeResultSet(rs); + closeStatement(find); + closeConnection(con); + + } + + return results; + } + + /** + * Private method that returns the generated ID: it throws an exception in case of any problem! + */ + private int extractID(ResultSet rs) throws Exception { + + if (rs == null) { + throw new Exception("BoL CHUNK DAO! Null ResultSet!"); + } + if (rs.next()) { + return rs.getInt(1); + } + String msg = + "BoL CHUNK DAO! It was not possible to establish the assigned autoincrement primary key!"; + log.error(msg); + throw new Exception(msg); + } + + /** + * Method that returns a String containing all Surl's IDs. + */ + private String makeSURLUniqueIDWhere(int[] surlUniqueIDs) { + + StringBuilder sb = new StringBuilder("("); + for (int i = 0; i < surlUniqueIDs.length; i++) { + if (i > 0) { + sb.append(","); + } + sb.append(surlUniqueIDs[i]); + } + sb.append(")"); + return sb.toString(); + } + + /** + * Method that returns a String containing all SURLs. + */ + private String makeSurlString(String[] surls) { + + StringBuilder sb = new StringBuilder("("); + int n = surls.length; + + for (int i = 0; i < n; i++) { + + SURL requestedSURL; + + try { + requestedSURL = SURL.makeSURLfromString(surls[i]); + } catch (NamespaceException e) { + log.error(e.getMessage()); + log.debug("Skip '{}' during query creation", surls[i]); + continue; + } + + sb.append("'"); + sb.append(requestedSURL.getNormalFormAsString()); + sb.append("','"); + sb.append(requestedSURL.getQueryFormAsString()); + sb.append("'"); + + if (i < (n - 1)) { + sb.append(","); + } + } + + sb.append(")"); + return sb.toString(); + } + + private String buildExplanationSet(String explanation) { + + return " sb.explanation='" + explanation + "' "; + } + + private String buildTokenWhereClause(TRequestToken requestToken) { + + return " rq.r_token='" + requestToken.toString() + "' "; + } + + private String buildSurlsWhereClause(int[] surlsUniqueIDs, String[] surls) { + + return " ( rb.sourceSURL_uniqueID IN " + makeSURLUniqueIDWhere(surlsUniqueIDs) + + " AND rb.sourceSURL IN " + makeSurlString(surls) + " ) "; + } + +} diff --git a/src/main/java/it/grid/storm/persistence/impl/mysql/PtGChunkDAOMySql.java b/src/main/java/it/grid/storm/persistence/impl/mysql/PtGChunkDAOMySql.java new file mode 100644 index 000000000..d254e5a35 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/impl/mysql/PtGChunkDAOMySql.java @@ -0,0 +1,1263 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.impl.mysql; + +import static it.grid.storm.srm.types.TRequestType.PREPARE_TO_GET; +import static it.grid.storm.srm.types.TStatusCode.SRM_ABORTED; +import static it.grid.storm.srm.types.TStatusCode.SRM_FAILURE; +import static it.grid.storm.srm.types.TStatusCode.SRM_FILE_PINNED; +import static it.grid.storm.srm.types.TStatusCode.SRM_RELEASED; +import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_INPROGRESS; +import static it.grid.storm.srm.types.TStatusCode.SRM_SUCCESS; +import static java.sql.Statement.RETURN_GENERATED_KEYS; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Timestamp; +import java.util.Collection; +import java.util.Date; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; + +import it.grid.storm.ea.StormEA; +import it.grid.storm.namespace.Namespace; +import it.grid.storm.namespace.NamespaceException; +import it.grid.storm.namespace.StoRI; +import it.grid.storm.namespace.naming.SURL; +import it.grid.storm.persistence.converter.RequestTypeConverter; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.dao.AbstractDAO; +import it.grid.storm.persistence.dao.PtGChunkDAO; +import it.grid.storm.persistence.model.PtGChunkDataTO; +import it.grid.storm.persistence.model.ReducedPtGChunkDataTO; +import it.grid.storm.persistence.pool.StormDbConnectionPool; +import it.grid.storm.srm.types.InvalidTSURLAttributesException; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TStatusCode; + +/** + * DAO class for PtGChunkCatalog. This DAO is specifically designed to connect to a MySQL DB. The + * raw data found in those tables is pre-treated in order to turn it into the Object Model of StoRM. + * See Method comments for further info. + * + * BEWARE! DAO Adjusts for extra fields in the DB that are not present in the object model. + * + * @author EGRID ICTP + * @version 3.0 + * @date June 2005 + */ +public class PtGChunkDAOMySql extends AbstractDAO implements PtGChunkDAO { + + private static final Logger log = LoggerFactory.getLogger(PtGChunkDAOMySql.class); + + private static final String SELECT_REQUEST_WHERE_TOKEN = + "SELECT * FROM request_queue WHERE r_token=?"; + + private static final String INSERT_REQUEST = + "INSERT INTO request_queue (config_RequestTypeID,client_dn,pinLifetime,status,errstring,r_token,nbreqfiles,timeStamp) " + + "VALUES (?,?,?,?,?,?,?,?)"; + + private static final String INSERT_REQUEST_TRASNFER_PROTOCOL = + "INSERT INTO request_TransferProtocols (request_queueID,config_ProtocolsID) " + + "VALUES (?,?)"; + + private static final String INSERT_REQUEST_DIR_OPTION = + "INSERT INTO request_DirOption (isSourceADirectory,allLevelRecursive,numOfLevels) " + + "VALUES (?,?,?)"; + + private static final String INSERT_REQUEST_GET = + "INSERT INTO request_Get (request_DirOptionID,request_queueID,sourceSURL,normalized_sourceSURL_StFN,sourceSURL_uniqueID) " + + "VALUES (?,?,?,?,?)"; + + private static final String INSERT_STATUS_GET = + "INSERT INTO status_Get (request_GetID,statusCode,explanation) VALUES (?,?,?)"; + + private static final String UPDATE_REQUEST_GET_STATUS_WHERE_ID = + "UPDATE request_queue rq JOIN (status_Get sg, request_Get rg) ON (rq.ID=rg.request_queueID AND sg.request_GetID=rg.ID) " + + "SET sg.fileSize=?, sg.transferURL=?, sg.statusCode=?, sg.explanation=?, rq.pinLifetime=?, rg.normalized_sourceSURL_StFN=?, rg.sourceSURL_uniqueID=? " + + "WHERE rg.ID=?"; + + private static final String UPDATE_REQUEST_GET_WHERE_ID = + "UPDATE request_Get rg SET rg.normalized_sourceSURL_StFN=?, rg.sourceSURL_uniqueID=? " + + "WHERE rg.ID=?"; + + private static final String SELECT_STATUS_GET_WHERE_GET_ID = + "SELECT statusCode, transferURL FROM status_Get WHERE request_GetID=?"; + + private static final String SELECT_REQUEST_GET_PROTOCOLS_WHERE_TOKEN = + "SELECT tp.config_ProtocolsID " + + "FROM request_TransferProtocols tp JOIN request_queue rq ON tp.request_queueID=rq.ID " + + "WHERE rq.r_token=?"; + + private static final String SELECT_REQUEST_GET_WHERE_TOKEN_AND_STATUS = + "SELECT sg.statusCode, rq.pinLifetime, rg.ID, rq.timeStamp, rq.client_dn, rq.proxy, rg.sourceSURL, " + + "rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID, d.isSourceADirectory, " + + "d.allLevelRecursive, d.numOfLevels " + + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " + + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " + + "LEFT JOIN request_DirOption d ON rg.request_DirOptionID=d.ID " + + "WHERE rq.r_token=? AND sg.statusCode<>?"; + + private static final String SELECT_REQUEST_GET_WHERE_TOKEN = + "SELECT sg.statusCode, rg.ID, rg.sourceSURL, rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID " + + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " + + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " + "WHERE rq.r_token=?"; + + private static final String UPDATE_STATUS_GET_WHERE_REQUEST_GET_ID_IS = + "UPDATE status_Get SET statusCode=?, explanation=? WHERE request_GetID=?"; + + private static final String COUNT_REQUEST_ON_SURL_WITH_STATUS = + "SELECT COUNT(rg.ID) FROM status_Get sg JOIN request_Get rg " + + "ON (sg.request_GetID=rg.ID) WHERE rg.sourceSURL_uniqueID=? AND sg.statusCode=?"; + + private static final String SELECT_EXPIRED_REQUESTS = + "SELECT rg.sourceSURL , rg.sourceSURL_uniqueID " + + "FROM request_Get rg JOIN (status_Get sg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " + + "WHERE sg.statusCode=?" + + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime"; + + private static final String UPDATE_STATUS_OF_EXPIRED_REQUESTS = + "UPDATE status_Get sg JOIN (request_Get rg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " + + "SET sg.statusCode=? " + + "WHERE sg.statusCode=? AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime "; + + private static final String SELECT_PTG_PINNED_SURLS = + "SELECT rg.sourceSURL , rg.sourceSURL_uniqueID FROM " + + "request_Get rg JOIN (status_Get sg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " + + "WHERE sg.statusCode=?" + + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) < rq.pinLifetime "; + + private static final String SELECT_BOL_PINNED_SURLS = + "SELECT rb.sourceSURL , rb.sourceSURL_uniqueID FROM " + + "request_BoL rb JOIN (status_BoL sb, request_queue rq) ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID " + + "WHERE sb.statusCode=?" + + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) < rq.pinLifetime "; + + private static PtGChunkDAOMySql instance; + + public static synchronized PtGChunkDAO getInstance() { + if (instance == null) { + instance = new PtGChunkDAOMySql(); + } + return instance; + } + + private final RequestTypeConverter requestTypeConverter; + private final StatusCodeConverter statusCodeConverter; + + private PtGChunkDAOMySql() { + + super(StormDbConnectionPool.getInstance()); + requestTypeConverter = RequestTypeConverter.getInstance(); + statusCodeConverter = StatusCodeConverter.getInstance(); + } + + /** + * Method used to add a new record to the DB: the supplied PtGChunkDataTO gets its primaryKey + * changed to the one assigned by the DB. + * + * The supplied PtGChunkData is used to fill in only the DB table where file specific info gets + * recorded: it does _not_ add a new request! So if spurious data is supplied, it will just stay + * there because of a lack of a parent request! + */ + public synchronized void addChild(PtGChunkDataTO to) { + + Connection con = null; + PreparedStatement id = null; + ResultSet rsid = null; + + try { + + // WARNING!!!! We are forced to run a query to get the ID of the request, + // which should NOT be so + // because the corresponding request object should have been changed with + // the extra field! However, it is not possible + // at the moment to perform such chage because of strict deadline and the + // change could wreak havoc + // the code. So we are forced to make this query!!! + + con = getManagedConnection(); + id = con.prepareStatement(SELECT_REQUEST_WHERE_TOKEN); + id.setString(1, to.requestToken()); + log.debug("PTG CHUNK DAO: addChild; {}", id); + rsid = id.executeQuery(); + + if (rsid.next()) { + + int requestId = rsid.getInt("ID"); + int id_s = fillPtGTables(con, to, requestId); + con.commit(); + to.setPrimaryKey(id_s); + + } else { + log.error("Unable to find queued request for token {}", to.requestToken()); + con.rollback(); + } + + } catch (SQLException e) { + log.error("PTG CHUNK DAO: unable to complete addChild! " + "PtGChunkDataTO: {}; error: {}", + to, e.getMessage(), e); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeResultSet(rsid); + closeStatement(id); + closeConnection(con); + } + } + + /** + * Method used to add a new record to the DB: the supplied PtGChunkDataTO gets its primaryKey + * changed to the one assigned by the DB. The client_dn must also be supplied as a String. + * + * The supplied PtGChunkData is used to fill in all the DB tables where file specific info gets + * recorded: it _adds_ a new request! + */ + public synchronized void addNew(PtGChunkDataTO to, String clientDn) { + + Connection con = null; + ResultSet rsNew = null; + PreparedStatement addNew = null; + PreparedStatement addProtocols = null; + + try { + + con = getManagedConnection(); + + addNew = con.prepareStatement(INSERT_REQUEST, RETURN_GENERATED_KEYS); + addNew.setString(1, requestTypeConverter.toDB(PREPARE_TO_GET)); + addNew.setString(2, clientDn); + addNew.setInt(3, to.lifeTime()); + addNew.setInt(4, statusCodeConverter.toDB(SRM_REQUEST_INPROGRESS)); + addNew.setString(5, "New PtG Request resulting from srmCopy invocation."); + addNew.setString(6, to.requestToken()); + addNew.setInt(7, 1); // number of requested files set to 1! + addNew.setTimestamp(8, new Timestamp(new Date().getTime())); + log.trace("PTG CHUNK DAO: addNew; {}", addNew); + addNew.execute(); + + rsNew = addNew.getGeneratedKeys(); + + if (!rsNew.next()) { + log.error("Unable to insert new request"); + con.rollback(); + return; + } + int idNew = rsNew.getInt(1); + + // add protocols... + addProtocols = con.prepareStatement(INSERT_REQUEST_TRASNFER_PROTOCOL); + for (Iterator i = to.protocolList().iterator(); i.hasNext();) { + addProtocols.setInt(1, idNew); + addProtocols.setString(2, i.next()); + log.trace("PTG CHUNK DAO: addNew; {}", addProtocols); + addProtocols.execute(); + } + + // addChild... + int id = fillPtGTables(con, to, idNew); + + // end transaction! + con.commit(); + + // update primary key reading the generated key + to.setPrimaryKey(id); + + } catch (SQLException e) { + log.error("PTG CHUNK DAO: Rolling back! Unable to complete addNew! " + + "PtGChunkDataTO: {}; error: {}", to, e.getMessage(), e); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeResultSet(rsNew); + closeStatement(addNew); + closeStatement(addProtocols); + closeConnection(con); + } + } + + /** + * To be used inside a transaction + * + * @param to + * @param requestQueueID + * @return + * @throws SQLException + * @throws Exception + */ + private synchronized int fillPtGTables(Connection con, PtGChunkDataTO to, int requestQueueID) + throws SQLException { + + ResultSet rsDo = null; + ResultSet rsG = null; + ResultSet rsS = null; + PreparedStatement addDirOption = null; + PreparedStatement addGet = null; + PreparedStatement addChild = null; + + try { + + // first fill in TDirOption + addDirOption = con.prepareStatement(INSERT_REQUEST_DIR_OPTION, RETURN_GENERATED_KEYS); + addDirOption.setBoolean(1, to.dirOption()); + addDirOption.setBoolean(2, to.allLevelRecursive()); + addDirOption.setInt(3, to.numLevel()); + log.trace("PTG CHUNK DAO: addNew; {}", addDirOption); + addDirOption.execute(); + + rsDo = addDirOption.getGeneratedKeys(); + + if (!rsDo.next()) { + throw new SQLException("Unable to get dir_option id"); + } + int idDo = rsDo.getInt(1); + + // second fill in request_Get... sourceSURL and TDirOption! + addGet = con.prepareStatement(INSERT_REQUEST_GET, RETURN_GENERATED_KEYS); + addGet.setInt(1, idDo); + addGet.setInt(2, requestQueueID); + addGet.setString(3, to.fromSURL()); + addGet.setString(4, to.normalizedStFN()); + addGet.setInt(5, to.surlUniqueID()); + log.trace("PTG CHUNK DAO: addNew; {}", addGet); + addGet.execute(); + + rsG = addGet.getGeneratedKeys(); + if (!rsG.next()) { + throw new SQLException("Unable to get request_get id"); + } + int idG = rsG.getInt(1); + + // third fill in status_Get... + addChild = con.prepareStatement(INSERT_STATUS_GET, RETURN_GENERATED_KEYS); + addChild.setInt(1, idG); + addChild.setInt(2, to.status()); + addChild.setString(3, to.errString()); + log.trace("PTG CHUNK DAO: addNew; {}", addChild); + addChild.execute(); + + return idG; + + } finally { + closeResultSet(rsDo); + closeResultSet(rsG); + closeResultSet(rsS); + closeStatement(addDirOption); + closeStatement(addGet); + closeStatement(addChild); + } + } + + /** + * Method used to save the changes made to a retrieved PtGChunkDataTO, back into the MySQL DB. + * + * Only the fileSize, transferURL, statusCode and explanation, of status_Get table are written to + * the DB. Likewise for the request pinLifetime. + * + * In case of any error, an error message gets logged but no exception is thrown. + */ + public synchronized void update(PtGChunkDataTO to) { + + Connection con = null; + PreparedStatement updateFileReq = null; + + try { + + con = getConnection(); + updateFileReq = con.prepareStatement(UPDATE_REQUEST_GET_STATUS_WHERE_ID); + updateFileReq.setLong(1, to.fileSize()); + updateFileReq.setString(2, to.turl()); + updateFileReq.setInt(3, to.status()); + updateFileReq.setString(4, to.errString()); + updateFileReq.setInt(5, to.lifeTime()); + updateFileReq.setString(6, to.normalizedStFN()); + updateFileReq.setInt(7, to.surlUniqueID()); + updateFileReq.setLong(8, to.primaryKey()); + // execute update + log.trace("PTG CHUNK DAO: update method; {}", updateFileReq); + updateFileReq.executeUpdate(); + + } catch (SQLException e) { + log.error("PtG CHUNK DAO: Unable to complete update! {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(updateFileReq); + closeConnection(con); + } + } + + /** + * Updates the request_Get represented by the received ReducedPtGChunkDataTO by setting its + * normalized_sourceSURL_StFN and sourceSURL_uniqueID + * + * @param chunkTO + */ + public synchronized void updateIncomplete(ReducedPtGChunkDataTO chunkTO) { + + Connection con = null; + PreparedStatement update = null; + + try { + con = getConnection(); + update = con.prepareStatement(UPDATE_REQUEST_GET_WHERE_ID); + update.setString(1, chunkTO.normalizedStFN()); + update.setInt(2, chunkTO.surlUniqueID()); + update.setLong(3, chunkTO.primaryKey()); + log.trace("PtG CHUNK DAO - update incomplete: {}", update); + update.executeUpdate(); + } catch (SQLException e) { + log.error("PtG CHUNK DAO: Unable to complete update incomplete! {}", e.getMessage(), e); + } finally { + closeStatement(update); + closeConnection(con); + } + } + + /** + * TODO WARNING! THIS IS A WORK IN PROGRESS!!! + * + * Method used to refresh the PtGChunkDataTO information from the MySQL DB. + * + * In this first version, only the statusCode and the TURL are reloaded from the DB. TODO The next + * version must contains all the information related to the Chunk! + * + * In case of any error, an error messagge gets logged but no exception is thrown. + */ + + public synchronized PtGChunkDataTO refresh(long primaryKey) { + + Connection con = null; + PreparedStatement find = null; + ResultSet rs = null; + PtGChunkDataTO chunkDataTO = null; + + try { + + con = getConnection(); + find = con.prepareStatement(SELECT_STATUS_GET_WHERE_GET_ID); + find.setLong(1, primaryKey); + log.trace("PTG CHUNK DAO: refresh status method; {}", find); + rs = find.executeQuery(); + + while (rs.next()) { + chunkDataTO = new PtGChunkDataTO(); + chunkDataTO.setStatus(rs.getInt("sg.statusCode")); + chunkDataTO.setTurl(rs.getString("sg.transferURL")); + } + return chunkDataTO; + + } catch (SQLException e) { + + log.error("PTG CHUNK DAO: {}", e.getMessage(), e); + e.printStackTrace(); + return null; + + } finally { + closeResultSet(rs); + closeStatement(find); + closeConnection(con); + } + } + + /** + * Method that queries the MySQL DB to find all entries matching the supplied TRequestToken. The + * Collection contains the corresponding PtGChunkDataTO objects. + * + * An initial simple query establishes the list of protocols associated with the request. A second + * complex query establishes all chunks associated with the request, by properly joining + * request_queue, request_Get, status_Get and request_DirOption. The considered fields are: + * + * (1) From status_Get: the ID field which becomes the TOs primary key, and statusCode. + * + * (2) From request_Get: sourceSURL + * + * (3) From request_queue: pinLifetime + * + * (4) From request_DirOption: isSourceADirectory, alLevelRecursive, numOfLevels + * + * In case of any error, a log gets written and an empty collection is returned. No exception is + * thrown. + * + * NOTE! Chunks in SRM_ABORTED status are NOT returned! + */ + public synchronized Collection find(TRequestToken requestToken) { + + Connection con = null; + PreparedStatement findProtocols = null; + PreparedStatement findRequest = null; + ResultSet rsProtocols = null; + ResultSet rsRequest = null; + Collection results = Lists.newArrayList(); + + try { + + con = getManagedConnection(); + findProtocols = con.prepareStatement(SELECT_REQUEST_GET_PROTOCOLS_WHERE_TOKEN); + + List protocols = Lists.newArrayList(); + findProtocols.setString(1, requestToken.getValue()); + log.trace("PTG CHUNK DAO: find method; {}", findProtocols); + rsProtocols = findProtocols.executeQuery(); + while (rsProtocols.next()) { + protocols.add(rsProtocols.getString("tp.config_ProtocolsID")); + } + + findRequest = con.prepareStatement(SELECT_REQUEST_GET_WHERE_TOKEN_AND_STATUS); + findRequest.setString(1, requestToken.getValue()); + findRequest.setInt(2, statusCodeConverter.toDB(SRM_ABORTED)); + log.trace("PTG CHUNK DAO: find method; {}", findRequest); + rsRequest = findRequest.executeQuery(); + + PtGChunkDataTO chunkDataTO; + while (rsRequest.next()) { + chunkDataTO = new PtGChunkDataTO(); + chunkDataTO.setStatus(rsRequest.getInt("sg.statusCode")); + chunkDataTO.setRequestToken(requestToken.getValue()); + chunkDataTO.setPrimaryKey(rsRequest.getLong("rg.ID")); + chunkDataTO.setFromSURL(rsRequest.getString("rg.sourceSURL")); + chunkDataTO.setNormalizedStFN(rsRequest.getString("rg.normalized_sourceSURL_StFN")); + int uniqueID = rsRequest.getInt("rg.sourceSURL_uniqueID"); + if (!rsRequest.wasNull()) { + chunkDataTO.setSurlUniqueID(Integer.valueOf(uniqueID)); + } + chunkDataTO.setClientDN(rsRequest.getString("rq.client_dn")); + + /** + * This code is only for the 1.3.18. This is a workaround to get FQANs using the proxy field + * on request_queue. The FE use the proxy field of request_queue to insert a single FQAN + * string containing all FQAN separeted by the "#" char. The proxy is a BLOB, hence it has + * to be properly conveted in string. + */ + java.sql.Blob blob = rsRequest.getBlob("rq.proxy"); + if (!rsRequest.wasNull() && blob != null) { + byte[] bdata = blob.getBytes(1, (int) blob.length()); + chunkDataTO.setVomsAttributes(new String(bdata)); + } + chunkDataTO.setTimeStamp(rsRequest.getTimestamp("rq.timeStamp")); + chunkDataTO.setLifeTime(rsRequest.getInt("rq.pinLifetime")); + chunkDataTO.setDirOption(rsRequest.getBoolean("d.isSourceADirectory")); + chunkDataTO.setAllLevelRecursive(rsRequest.getBoolean("d.allLevelRecursive")); + chunkDataTO.setNumLevel(rsRequest.getInt("d.numOfLevels")); + chunkDataTO.setProtocolList(protocols); + results.add(chunkDataTO); + } + con.commit(); + } catch (SQLException e) { + log.error("PTG CHUNK DAO: ", e.getMessage(), e); + e.printStackTrace(); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeResultSet(rsProtocols); + closeResultSet(rsRequest); + closeStatement(findProtocols); + closeStatement(findRequest); + closeConnection(con); + } + return results; + } + + /** + * Method that returns a Collection of ReducedPtGChunkDataTO associated to the given TRequestToken + * expressed as String. + */ + public synchronized Collection findReduced(TRequestToken requestToken) { + + Connection con = null; + PreparedStatement find = null; + ResultSet rs = null; + Collection results = Lists.newArrayList(); + + try { + + con = getConnection(); + find = con.prepareStatement(SELECT_REQUEST_GET_WHERE_TOKEN); + find.setString(1, requestToken.getValue()); + log.trace("PtG CHUNK DAO! findReduced with request token; {}", find); + rs = find.executeQuery(); + + while (rs.next()) { + ReducedPtGChunkDataTO reducedChunkDataTO = new ReducedPtGChunkDataTO(); + reducedChunkDataTO.setStatus(rs.getInt("sg.statusCode")); + reducedChunkDataTO.setPrimaryKey(rs.getLong("rg.ID")); + reducedChunkDataTO.setFromSURL(rs.getString("rg.sourceSURL")); + reducedChunkDataTO.setNormalizedStFN(rs.getString("rg.normalized_sourceSURL_StFN")); + int uniqueID = rs.getInt("rg.sourceSURL_uniqueID"); + if (!rs.wasNull()) { + reducedChunkDataTO.setSurlUniqueID(uniqueID); + } + results.add(reducedChunkDataTO); + } + + } catch (SQLException e) { + log.error("PTG CHUNK DAO: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(find); + closeConnection(con); + } + return results; + } + + public synchronized Collection findReduced(TRequestToken requestToken, + int[] surlsUniqueIDs, String[] surlsArray) { + + Connection con = null; + PreparedStatement find = null; + ResultSet rs = null; + Collection results = Lists.newArrayList(); + + try { + + String str = + "SELECT sg.statusCode, rg.ID, rg.sourceSURL, rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID " + + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " + + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " + + "WHERE rq.r_token=? AND ( rg.sourceSURL_uniqueID IN " + + makeSURLUniqueIDWhere(surlsUniqueIDs) + " AND rg.sourceSURL IN " + + makeSurlString(surlsArray) + " ) "; + + con = getConnection(); + find = con.prepareStatement(str); + find.setString(1, requestToken.getValue()); + log.trace("PtG CHUNK DAO! findReduced with griduser+surlarray; {}", find); + rs = find.executeQuery(); + + while (rs.next()) { + ReducedPtGChunkDataTO chunkDataTO = new ReducedPtGChunkDataTO(); + chunkDataTO.setStatus(rs.getInt("sg.statusCode")); + chunkDataTO.setPrimaryKey(rs.getLong("rg.ID")); + chunkDataTO.setFromSURL(rs.getString("rg.sourceSURL")); + chunkDataTO.setNormalizedStFN(rs.getString("rg.normalized_sourceSURL_StFN")); + int uniqueID = rs.getInt("rg.sourceSURL_uniqueID"); + if (!rs.wasNull()) { + chunkDataTO.setSurlUniqueID(uniqueID); + } + results.add(chunkDataTO); + } + } catch (SQLException e) { + log.error("PTG CHUNK DAO: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(find); + closeConnection(con); + } + return results; + } + + /** + * Method that returns a Collection of ReducedPtGChunkDataTO associated to the given griduser, and + * whose SURLs are contained in the supplied array of Strings. + */ + public synchronized Collection findReduced(String griduser, + int[] surlUniqueIDs, String[] surls) { + + Connection con = null; + PreparedStatement find = null; + ResultSet rs = null; + Collection results = Lists.newArrayList(); + + try { + /* + * NOTE: we search also on the fromSurl because otherwise we lost all request_get that have + * not the uniqueID set because are not yet been used by anybody + */ + con = getConnection(); + // get reduced chunks + String str = + "SELECT sg.statusCode, rg.ID, rg.sourceSURL, rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID " + + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " + + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " + + "WHERE rq.client_dn=? AND ( rg.sourceSURL_uniqueID IN " + + makeSURLUniqueIDWhere(surlUniqueIDs) + " AND rg.sourceSURL IN " + + makeSurlString(surls) + " ) "; + find = con.prepareStatement(str); + find.setString(1, griduser); + log.trace("PtG CHUNK DAO! findReduced with griduser+surlarray; {}", find); + rs = find.executeQuery(); + + while (rs.next()) { + ReducedPtGChunkDataTO chunkDataTO = new ReducedPtGChunkDataTO(); + chunkDataTO.setStatus(rs.getInt("sg.statusCode")); + chunkDataTO.setPrimaryKey(rs.getLong("rg.ID")); + chunkDataTO.setFromSURL(rs.getString("rg.sourceSURL")); + chunkDataTO.setNormalizedStFN(rs.getString("rg.normalized_sourceSURL_StFN")); + int uniqueID = rs.getInt("rg.sourceSURL_uniqueID"); + if (!rs.wasNull()) { + chunkDataTO.setSurlUniqueID(uniqueID); + } + results.add(chunkDataTO); + } + } catch (SQLException e) { + log.error("PTG CHUNK DAO: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(find); + closeConnection(con); + } + return results; + } + + /** + * Method used in extraordinary situations to signal that data retrieved from the DB was malformed + * and could not be translated into the StoRM object model. + * + * This method attempts to change the status of the request to SRM_FAILURE and record it in the + * DB. + * + * This operation could potentially fail because the source of the malformed problems could be a + * problematic DB; indeed, initially only log messagges where recorded. + * + * Yet it soon became clear that the source of malformed data were the clients and/or FE recording + * info in the DB. In these circumstances the client would see its request as being in the + * SRM_IN_PROGRESS state for ever. Hence the pressing need to inform it of the encountered + * problems. + */ + public synchronized void fail(PtGChunkDataTO auxTO) { + + Connection con = null; + PreparedStatement update = null; + + try { + + con = getConnection(); + update = con.prepareStatement(UPDATE_STATUS_GET_WHERE_REQUEST_GET_ID_IS); + update.setInt(1, statusCodeConverter.toDB(SRM_FAILURE)); + update.setString(2, "Request is malformed!"); + update.setLong(3, auxTO.primaryKey()); + log.trace("PTG CHUNK DAO: signalMalformed; {}", update); + update.executeUpdate(); + + } catch (SQLException e) { + log.error("PtGChunkDAO! Unable to signal in DB that the request was " + + "malformed! Request: {}; Exception: {}", auxTO.toString(), e.toString()); + e.printStackTrace(); + } finally { + closeStatement(update); + closeConnection(con); + } + } + + /** + * Method that returns the number of Get requests on the given SURL, that are in SRM_FILE_PINNED + * state. + * + * This method is intended to be used by PtGChunkCatalog in the isSRM_FILE_PINNED method + * invocation. + * + * In case of any error, 0 is returned. + */ + // request_Get table + public synchronized int numberInSRM_FILE_PINNED(int surlUniqueID) { + + return count(surlUniqueID, SRM_FILE_PINNED); + } + + public synchronized int count(int surlUniqueID, TStatusCode status) { + + Connection con = null; + PreparedStatement find = null; + ResultSet rs = null; + int count = 0; + + try { + con = getConnection(); + find = con.prepareStatement(COUNT_REQUEST_ON_SURL_WITH_STATUS); + find.setInt(1, surlUniqueID); + find.setInt(2, statusCodeConverter.toDB(status)); + log.trace("PtG CHUNK DAO - numberInSRM_FILE_PINNED method: {}", find); + rs = find.executeQuery(); + + if (rs.next()) { + count = rs.getInt(1); + } + } catch (SQLException e) { + log.error("PtG CHUNK DAO! Unable to determine numberInSRM_FILE_PINNED! " + "Returning 0! {}", + e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(find); + closeConnection(con); + } + return count; + } + + /** + * Method that updates all expired requests in SRM_FILE_PINNED state, into SRM_RELEASED. + * + * This is needed when the client forgets to invoke srmReleaseFiles(). + * + * @return + */ + public synchronized Collection transitExpiredSRM_FILE_PINNED() { + + Map expiredSurlMap = Maps.newHashMap(); + Set pinnedSurlSet = Sets.newHashSet(); + + Connection con = null; + PreparedStatement findExpired = null; + PreparedStatement updateExpired = null; + PreparedStatement findPtgPinnedSurls = null; + PreparedStatement findBolPinnedSurls = null; + ResultSet expired = null; + ResultSet ptgPinnedSurls = null; + ResultSet bolPinnedSurls = null; + + /* Find all expired SURLs */ + try { + // start transaction + con = getManagedConnection(); + + findExpired = con.prepareStatement(SELECT_EXPIRED_REQUESTS); + findExpired.setInt(1, statusCodeConverter.toDB(SRM_FILE_PINNED)); + + expired = findExpired.executeQuery(); + + while (expired.next()) { + String sourceSURL = expired.getString("rg.sourceSURL"); + Integer uniqueID = Integer.valueOf(expired.getInt("rg.sourceSURL_uniqueID")); + /* If the uniqueID is not set compute it */ + if (expired.wasNull()) { + try { + TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL); + uniqueID = tsurl.uniqueId(); + } catch (InvalidTSURLAttributesException e) { + log.warn("PtGChunkDAO! unable to build the TSURL from {}: " + + "InvalidTSURLAttributesException {}", sourceSURL, e.getMessage(), e); + } + } + expiredSurlMap.put(sourceSURL, uniqueID); + } + + if (expiredSurlMap.isEmpty()) { + con.commit(); + log.trace( + "PtGChunkDAO! No chunk of PtG request was transited from SRM_FILE_PINNED to SRM_RELEASED."); + return Lists.newArrayList(); + } + + updateExpired = con.prepareStatement(UPDATE_STATUS_OF_EXPIRED_REQUESTS); + updateExpired.setInt(1, statusCodeConverter.toDB(SRM_RELEASED)); + updateExpired.setInt(2, statusCodeConverter.toDB(SRM_FILE_PINNED)); + log.trace("PtG CHUNK DAO - transitExpiredSRM_FILE_PINNED method: {}", updateExpired); + int count = updateExpired.executeUpdate(); + + if (count == 0) { + log.trace("PtGChunkDAO! No chunk of PtG request was " + + "transited from SRM_FILE_PINNED to SRM_RELEASED."); + } else { + log.info("PtGChunkDAO! {} chunks of PtG requests were transited from" + + " SRM_FILE_PINNED to SRM_RELEASED.", count); + } + + /* + * in order to enhance performance here we can check if there is any file system with tape + * (T1D0, T1D1), if there is not any we can skip the following + */ + + /* Find all not expired SURLs from PtG and BoL */ + + findPtgPinnedSurls = con.prepareStatement(SELECT_PTG_PINNED_SURLS); + findPtgPinnedSurls.setInt(1, statusCodeConverter.toDB(SRM_FILE_PINNED)); + + ptgPinnedSurls = findPtgPinnedSurls.executeQuery(); + + while (ptgPinnedSurls.next()) { + String sourceSURL = ptgPinnedSurls.getString("rg.sourceSURL"); + Integer uniqueID = Integer.valueOf(ptgPinnedSurls.getInt("rg.sourceSURL_uniqueID")); + /* If the uniqueID is not setted compute it */ + if (ptgPinnedSurls.wasNull()) { + try { + TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL); + uniqueID = tsurl.uniqueId(); + } catch (InvalidTSURLAttributesException e) { + log.warn("PtGChunkDAO! unable to build the TSURL from {}. " + + "InvalidTSURLAttributesException: {}", sourceSURL, e.getMessage()); + } + } + pinnedSurlSet.add(uniqueID); + } + + // SURLs pinned by BoLs + findBolPinnedSurls = con.prepareStatement(SELECT_BOL_PINNED_SURLS); + findBolPinnedSurls.setInt(1, statusCodeConverter.toDB(SRM_SUCCESS)); + bolPinnedSurls = findBolPinnedSurls.executeQuery(); + + while (bolPinnedSurls.next()) { + String sourceSURL = bolPinnedSurls.getString("rb.sourceSURL"); + Integer uniqueID = Integer.valueOf(bolPinnedSurls.getInt("rb.sourceSURL_uniqueID")); + /* If the uniqueID is not setted compute it */ + if (bolPinnedSurls.wasNull()) { + try { + TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL); + uniqueID = tsurl.uniqueId(); + } catch (InvalidTSURLAttributesException e) { + log.warn("PtGChunkDAO! unable to build the TSURL from {}. " + + "InvalidTSURLAttributesException: {}", sourceSURL, e.getMessage(), e); + } + } + pinnedSurlSet.add(uniqueID); + } + + con.commit(); + } catch (SQLException e) { + log.error("PtGChunkDAO! SQLException. {}", e.getMessage(), e); + e.printStackTrace(); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeStatement(findExpired); + closeStatement(updateExpired); + closeStatement(findPtgPinnedSurls); + closeStatement(findBolPinnedSurls); + closeResultSet(expired); + closeResultSet(ptgPinnedSurls); + closeResultSet(bolPinnedSurls); + closeConnection(con); + } + + Collection expiredSurlList = Lists.newArrayList(); + /* Remove the Extended Attribute pinned if there is not a valid SURL on it */ + TSURL surl; + for (Entry surlEntry : expiredSurlMap.entrySet()) { + if (!pinnedSurlSet.contains(surlEntry.getValue())) { + try { + surl = TSURL.makeFromStringValidate(surlEntry.getKey()); + } catch (InvalidTSURLAttributesException e) { + log.error("Invalid SURL, cannot release the pin " + "(Extended Attribute): {}", + surlEntry.getKey()); + continue; + } + expiredSurlList.add(surl); + StoRI stori; + try { + stori = Namespace.getInstance().resolveStoRIbySURL(surl); + } catch (Throwable e) { + log.error("Invalid SURL {} cannot release the pin. {}: {}", surlEntry.getKey(), + e.getClass().getCanonicalName(), e.getMessage(), e); + continue; + } + + if (stori.getVirtualFileSystem().getStorageClassType().isTapeEnabled()) { + StormEA.removePinned(stori.getAbsolutePath()); + } + } + } + return expiredSurlList; + } + + /** + * Method that updates all chunks in SRM_FILE_PINNED state, into SRM_RELEASED. An array of long + * representing the primary key of each chunk is required: only they get the status changed + * provided their current status is SRM_FILE_PINNED. + * + * This method is used during srmReleaseFiles + * + * In case of any error nothing happens and no exception is thrown, but proper messagges get + * logged. + */ + public synchronized void transitSRM_FILE_PINNEDtoSRM_RELEASED(long[] ids) { + + String str = "UPDATE status_Get sg SET sg.statusCode=? " + + "WHERE sg.statusCode=? AND sg.request_GetID IN " + makeWhereString(ids); + + Connection con = null; + PreparedStatement stmt = null; + try { + + con = getConnection(); + stmt = con.prepareStatement(str); + stmt.setInt(1, statusCodeConverter.toDB(SRM_RELEASED)); + stmt.setInt(2, statusCodeConverter.toDB(SRM_FILE_PINNED)); + log.trace("PtG CHUNK DAO - transitSRM_FILE_PINNEDtoSRM_RELEASED: {}", stmt); + int count = stmt.executeUpdate(); + if (count == 0) { + log.trace("PtG CHUNK DAO! No chunk of PtG request was " + + "transited from SRM_FILE_PINNED to SRM_RELEASED."); + } else { + log.info("PtG CHUNK DAO! {} chunks of PtG requests were transited " + + "from SRM_FILE_PINNED to SRM_RELEASED.", count); + } + } catch (SQLException e) { + log.error( + "PtG CHUNK DAO! Unable to transit chunks" + " from SRM_FILE_PINNED to SRM_RELEASED! {}", + e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + } + + /** + * @param ids + * @param token + */ + public synchronized void transitSRM_FILE_PINNEDtoSRM_RELEASED(long[] ids, TRequestToken token) { + + if (token == null) { + transitSRM_FILE_PINNEDtoSRM_RELEASED(ids); + return; + } + + /* + * If a request token has been specified, only the related Get requests have to be released. + * This is done adding the r.r_token="..." clause in the where subquery. + */ + String str = "UPDATE " + + "status_Get sg JOIN (request_Get rg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " + + "SET sg.statusCode=? " + "WHERE sg.statusCode=? AND rq.r_token='" + token.getValue() + + "' AND rg.ID IN " + makeWhereString(ids); + + Connection con = null; + PreparedStatement stmt = null; + try { + con = getConnection(); + stmt = con.prepareStatement(str); + stmt.setInt(1, statusCodeConverter.toDB(SRM_RELEASED)); + stmt.setInt(2, statusCodeConverter.toDB(SRM_FILE_PINNED)); + log.trace("PtG CHUNK DAO - transitSRM_FILE_PINNEDtoSRM_RELEASED: {}", stmt); + int count = stmt.executeUpdate(); + if (count == 0) { + log.trace("PtG CHUNK DAO! No chunk of PtG request was" + + " transited from SRM_FILE_PINNED to SRM_RELEASED."); + } else { + log.info("PtG CHUNK DAO! {} chunks of PtG requests were transited from " + + "SRM_FILE_PINNED to SRM_RELEASED.", count); + } + } catch (SQLException e) { + log.error( + "PtG CHUNK DAO! Unable to transit chunks from " + "SRM_FILE_PINNED to SRM_RELEASED! {}", + e.getMessage(), e); + } finally { + closeStatement(stmt); + closeConnection(con); + } + } + + public synchronized void updateStatus(TRequestToken requestToken, int[] surlUniqueIDs, + String[] surls, TStatusCode statusCode, String explanation) { + + String str = "UPDATE " + + "status_Get sg JOIN (request_Get rg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " + + "SET sg.statusCode=? , sg.explanation=? " + "WHERE rq.r_token='" + requestToken.toString() + + "' AND ( rg.sourceSURL_uniqueID IN " + makeSURLUniqueIDWhere(surlUniqueIDs) + + " AND rg.sourceSURL IN " + makeSurlString(surls) + " ) "; + + Connection con = null; + PreparedStatement stmt = null; + + try { + con = getConnection(); + stmt = con.prepareStatement(str); + stmt.setInt(1, statusCodeConverter.toDB(statusCode)); + stmt.setString(2, (explanation != null ? explanation : "")); + log.trace("PtG CHUNK DAO - updateStatus: {}", stmt); + int count = stmt.executeUpdate(); + if (count == 0) { + log.trace("PtG CHUNK DAO! No chunk of PtG request was updated to {}.", statusCode); + } else { + log.info("PtG CHUNK DAO! {} chunks of PtG requests were updated to {}.", count, statusCode); + } + } catch (SQLException e) { + log.error("PtG CHUNK DAO! Unable to updated to {}! {}", statusCode, e.getMessage(), e); + } finally { + closeStatement(stmt); + closeConnection(con); + } + } + + public synchronized void updateStatusOnMatchingStatus(TRequestToken requestToken, + TStatusCode expectedStatusCode, TStatusCode newStatusCode, String explanation) { + + if (requestToken == null || requestToken.getValue().trim().isEmpty() || explanation == null) { + throw new IllegalArgumentException("Unable to perform the updateStatusOnMatchingStatus, " + + "invalid arguments: requestToken=" + requestToken + " explanation=" + explanation); + } + doUpdateStatusOnMatchingStatus(requestToken, null, null, expectedStatusCode, newStatusCode, + explanation, true, false, true); + } + + private synchronized void doUpdateStatusOnMatchingStatus(TRequestToken requestToken, + int[] surlUniqueIDs, String[] surls, TStatusCode expectedStatusCode, + TStatusCode newStatusCode, String explanation, boolean withRequestToken, boolean withSurls, + boolean withExplanation) throws IllegalArgumentException { + + if ((withRequestToken && requestToken == null) || (withExplanation && explanation == null) + || (withSurls && (surlUniqueIDs == null || surls == null))) { + + throw new IllegalArgumentException("Unable to perform the doUpdateStatusOnMatchingStatus, " + + "invalid arguments: withRequestToken=" + withRequestToken + " requestToken=" + + requestToken + " withSurls=" + withSurls + " surlUniqueIDs=" + surlUniqueIDs + " surls=" + + surls + " withExplaination=" + withExplanation + " explanation=" + explanation); + } + + String str = "UPDATE status_Get sg JOIN (request_Get rg, request_queue rq) " + + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " + "SET sg.statusCode=? "; + if (withExplanation) { + str += " , " + buildExpainationSet(explanation); + } + str += " WHERE sg.statusCode=? "; + if (withRequestToken) { + str += " AND " + buildTokenWhereClause(requestToken); + } + if (withSurls) { + str += " AND " + buildSurlsWhereClause(surlUniqueIDs, surls); + } + + Connection con = null; + PreparedStatement stmt = null; + + try { + con = getConnection(); + stmt = con.prepareStatement(str); + stmt.setInt(1, statusCodeConverter.toDB(newStatusCode)); + stmt.setInt(2, statusCodeConverter.toDB(expectedStatusCode)); + log.trace("PtG CHUNK DAO - updateStatusOnMatchingStatus: {}", stmt); + int count = stmt.executeUpdate(); + if (count == 0) { + log.trace("PtG CHUNK DAO! No chunk of PtG request was updated " + "from {} to {}.", + expectedStatusCode, newStatusCode); + } else { + log.debug("PtG CHUNK DAO! {} chunks of PtG requests were updated " + "from {} to {}.", + count, expectedStatusCode, newStatusCode); + } + } catch (SQLException e) { + log.error("PtG CHUNK DAO! Unable to updated from {} to {}! {}", expectedStatusCode, + newStatusCode, e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + } + + /** + * Method that returns a String containing all IDs. + */ + private String makeWhereString(long[] rowids) { + + StringBuilder sb = new StringBuilder("("); + int n = rowids.length; + for (int i = 0; i < n; i++) { + sb.append(rowids[i]); + if (i < (n - 1)) { + sb.append(","); + } + } + sb.append(")"); + return sb.toString(); + } + + /** + * Method that returns a String containing all SURL's IDs. + */ + private String makeSURLUniqueIDWhere(int[] surlUniqueIDs) { + + StringBuilder sb = new StringBuilder("("); + for (int i = 0; i < surlUniqueIDs.length; i++) { + if (i > 0) { + sb.append(","); + } + sb.append(surlUniqueIDs[i]); + } + sb.append(")"); + return sb.toString(); + } + + /** + * Method that returns a String containing all SURLs. + */ + private String makeSurlString(String[] surls) { + + StringBuilder sb = new StringBuilder("("); + int n = surls.length; + + for (int i = 0; i < n; i++) { + + SURL requestedSURL; + + try { + requestedSURL = SURL.makeSURLfromString(surls[i]); + } catch (NamespaceException e) { + log.error(e.getMessage()); + log.debug("Skip '{}' during query creation", surls[i]); + continue; + } + + sb.append("'"); + sb.append(requestedSURL.getNormalFormAsString()); + sb.append("','"); + sb.append(requestedSURL.getQueryFormAsString()); + sb.append("'"); + + if (i < (n - 1)) { + sb.append(","); + } + } + + sb.append(")"); + return sb.toString(); + } + + private String buildExpainationSet(String explanation) { + + return " sg.explanation='" + explanation + "' "; + } + + private String buildTokenWhereClause(TRequestToken requestToken) { + + return " rq.r_token='" + requestToken.toString() + "' "; + } + + private String buildSurlsWhereClause(int[] surlsUniqueIDs, String[] surls) { + + return " ( rg.sourceSURL_uniqueID IN " + makeSURLUniqueIDWhere(surlsUniqueIDs) + + " AND rg.sourceSURL IN " + makeSurlString(surls) + " ) "; + } + +} diff --git a/src/main/java/it/grid/storm/persistence/impl/mysql/PtPChunkDAOMySql.java b/src/main/java/it/grid/storm/persistence/impl/mysql/PtPChunkDAOMySql.java new file mode 100644 index 000000000..48eaceb0f --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/impl/mysql/PtPChunkDAOMySql.java @@ -0,0 +1,798 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.impl.mysql; + +import static it.grid.storm.catalogs.ChunkDAOUtils.buildInClauseForArray; +import static it.grid.storm.srm.types.TStatusCode.SRM_ABORTED; +import static it.grid.storm.srm.types.TStatusCode.SRM_FAILURE; +import static it.grid.storm.srm.types.TStatusCode.SRM_FILE_LIFETIME_EXPIRED; +import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_INPROGRESS; +import static it.grid.storm.srm.types.TStatusCode.SRM_SPACE_AVAILABLE; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +import org.apache.commons.lang.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; + +import it.grid.storm.namespace.NamespaceException; +import it.grid.storm.namespace.naming.SURL; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.dao.AbstractDAO; +import it.grid.storm.persistence.dao.PtPChunkDAO; +import it.grid.storm.persistence.model.PtPChunkDataTO; +import it.grid.storm.persistence.model.ReducedPtPChunkDataTO; +import it.grid.storm.persistence.pool.StormDbConnectionPool; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TStatusCode; + +/** + * DAO class for PtPChunkCatalog. This DAO is specifically designed to connect to a MySQL DB. The + * raw data found in those tables is pre-treated in order to turn it into the Object Model of StoRM. + * See Method comments for further info. BEWARE! DAO Adjusts for extra fields in the DB that are not + * present in the object model. + * + * @author EGRID ICTP + * @version 2.0 + * @date June 2005 + */ +public class PtPChunkDAOMySql extends AbstractDAO implements PtPChunkDAO { + + private static final Logger log = LoggerFactory.getLogger(PtPChunkDAOMySql.class); + + private static final String UPDATE_REQUEST_PUT_WHERE_ID_IS = "UPDATE " + + "request_queue rq JOIN (status_Put sp, request_Put rp) ON " + + "(rq.ID=rp.request_queueID AND sp.request_PutID=rp.ID) " + + "SET sp.transferURL=?, sp.statusCode=?, sp.explanation=?, rq.pinLifetime=?, rq.fileLifetime=?, " + + "rq.config_FileStorageTypeID=?, rq.config_OverwriteID=?, " + + "rp.normalized_targetSURL_StFN=?, rp.targetSURL_uniqueID=? " + "WHERE rp.ID=?"; + + private static final String UPDATE_REDUCED_REQUEST_PUT_WHERE_ID_IS = + "UPDATE request_Put SET normalized_targetSURL_StFN=?, targetSURL_uniqueID=? " + "WHERE ID=?"; + + private static final String SELECT_REQUEST_PROTOCOLS_WHERE_TOKEN_IS = + "SELECT tp.config_ProtocolsID " + + "FROM request_TransferProtocols tp JOIN request_queue rq ON tp.request_queueID=rq.ID " + + "WHERE rq.r_token=?"; + + private static final String SELECT_FULL_REQUEST_PUT_WHERE_TOKEN_AND_STATUS = + "SELECT rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.timeStamp, rq.pinLifetime, rq.fileLifetime, rq.s_token, rq.client_dn, rq.proxy, rp.ID, rp.targetSURL, rp.expectedFileSize, rp.normalized_targetSURL_StFN, rp.targetSURL_uniqueID, sp.statusCode " + + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " + + "ON (rp.request_queueID=rq.ID AND sp.request_PutID=rp.ID) " + + "WHERE rq.r_token=? AND sp.statusCode<>?"; + + private static final String UPDATE_STATUS_PUT_WHERE_ID_IS = + "UPDATE status_Put sp SET sp.statusCode=?, sp.explanation=? WHERE sp.request_PutID=?"; + + private static final String SELECT_EXPIRED_REQUEST_PUT_WHERE_STATUS_IS = + "SELECT rp.ID, rp.targetSURL " + + "FROM status_Put sp JOIN (request_Put rp, request_queue rq) ON sp.request_PutID=rp.ID AND rp.request_queueID=rq.ID " + + "WHERE sp.statusCode=? AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime "; + + private static PtPChunkDAO instance; + + public static synchronized PtPChunkDAO getInstance() { + if (instance == null) { + instance = new PtPChunkDAOMySql(); + } + return instance; + } + + private StatusCodeConverter statusCodeConverter; + + private PtPChunkDAOMySql() { + + super(StormDbConnectionPool.getInstance()); + statusCodeConverter = StatusCodeConverter.getInstance(); + } + + /** + * Method used to save the changes made to a retrieved PtPChunkDataTO, back into the MySQL DB. + * Only the transferURL, statusCode and explanation, of status_Put table get written to the DB. + * Likewise for the pinLifetime and fileLifetime of request_queue. In case of any error, an error + * message gets logged but no exception is thrown. + */ + public synchronized void update(PtPChunkDataTO to) { + + Connection con = null; + PreparedStatement updatePut = null; + try { + con = getConnection(); + updatePut = con.prepareStatement(UPDATE_REQUEST_PUT_WHERE_ID_IS); + + updatePut.setString(1, to.transferURL()); + updatePut.setInt(2, to.status()); + updatePut.setString(3, to.errString()); + updatePut.setInt(4, to.pinLifetime()); + updatePut.setInt(5, to.fileLifetime()); + updatePut.setString(6, to.fileStorageType()); + updatePut.setString(7, to.overwriteOption()); + updatePut.setString(8, to.normalizedStFN()); + updatePut.setInt(9, to.surlUniqueID()); + updatePut.setLong(10, to.primaryKey()); + // run updateStatusPut... + log.trace("PtP CHUNK DAO - update method: {}", updatePut); + updatePut.executeUpdate(); + } catch (SQLException e) { + log.error("PtP CHUNK DAO: Unable to complete update! {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(updatePut); + closeConnection(con); + } + } + + /** + * Updates the request_Put represented by the received ReducedPtPChunkDataTO by setting its + * normalized_targetSURL_StFN and targetSURL_uniqueID + * + * @param chunkTO + */ + public synchronized void updateIncomplete(ReducedPtPChunkDataTO chunkTO) { + + Connection con = null; + PreparedStatement stmt = null; + + try { + con = getConnection(); + stmt = con.prepareStatement(UPDATE_REDUCED_REQUEST_PUT_WHERE_ID_IS); + stmt.setString(1, chunkTO.normalizedStFN()); + stmt.setInt(2, chunkTO.surlUniqueID()); + stmt.setLong(3, chunkTO.primaryKey()); + log.trace("PtP CHUNK DAO - update incomplete: {}", stmt); + stmt.executeUpdate(); + } catch (SQLException e) { + log.error("PtP CHUNK DAO: Unable to complete update incomplete! {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + } + + /** + * Method that queries the MySQL DB to find all entries matching the supplied TRequestToken. The + * Collection contains the corresponding PtPChunkDataTO objects. An initial simple query + * establishes the list of protocols associated with the request. A second complex query + * establishes all chunks associated with the request, by properly joining request_queue, + * request_Put and status_Put. The considered fields are: (1) From status_Put: the ID field which + * becomes the TOs primary key, and statusCode. (2) From request_Put: targetSURL and + * expectedFileSize. (3) From request_queue: pinLifetime, fileLifetime, config_FileStorageTypeID, + * s_token, config_OverwriteID. In case of any error, a log gets written and an empty collection + * is returned. No exception is returned. NOTE! Chunks in SRM_ABORTED status are NOT returned! + * This is important because this method is intended to be used by the Feeders to fetch all chunks + * in the request, and aborted chunks should not be picked up for processing! + */ + public synchronized Collection find(TRequestToken requestToken) { + + Connection con = null; + PreparedStatement findProtocols = null; + PreparedStatement findRequest = null; + ResultSet rsProtocols = null; + ResultSet rsRequest = null; + + Collection results = Lists.newArrayList(); + + try { + + con = getManagedConnection(); + findProtocols = con.prepareStatement(SELECT_REQUEST_PROTOCOLS_WHERE_TOKEN_IS); + + findProtocols.setString(1, requestToken.getValue()); + + log.trace("PtP CHUNK DAO - find method: {}", findProtocols); + rsProtocols = findProtocols.executeQuery(); + + List protocols = Lists.newArrayList(); + while (rsProtocols.next()) { + protocols.add(rsProtocols.getString("tp.config_ProtocolsID")); + } + + // get chunks of the request + findRequest = con.prepareStatement(SELECT_FULL_REQUEST_PUT_WHERE_TOKEN_AND_STATUS); + findRequest.setString(1, requestToken.getValue()); + findRequest.setInt(2, statusCodeConverter.toDB(SRM_ABORTED)); + log.trace("PtP CHUNK DAO - find method: {}", findRequest); + rsRequest = findRequest.executeQuery(); + + while (rsRequest.next()) { + PtPChunkDataTO chunkDataTO = new PtPChunkDataTO(); + chunkDataTO.setFileStorageType(rsRequest.getString("rq.config_FileStorageTypeID")); + chunkDataTO.setOverwriteOption(rsRequest.getString("rq.config_OverwriteID")); + chunkDataTO.setTimeStamp(rsRequest.getTimestamp("rq.timeStamp")); + chunkDataTO.setPinLifetime(rsRequest.getInt("rq.pinLifetime")); + chunkDataTO.setFileLifetime(rsRequest.getInt("rq.fileLifetime")); + chunkDataTO.setSpaceToken(rsRequest.getString("rq.s_token")); + chunkDataTO.setClientDN(rsRequest.getString("rq.client_dn")); + + /** + * This code is only for the 1.3.18. This is a workaround to get FQANs using the proxy field + * on request_queue. The FE use the proxy field of request_queue to insert a single FQAN + * string containing all FQAN separated by the "#" char. The proxy is a BLOB, hence it has + * to be properly converted in string. + */ + java.sql.Blob blob = rsRequest.getBlob("rq.proxy"); + if (!rsRequest.wasNull() && blob != null) { + byte[] bdata = blob.getBytes(1, (int) blob.length()); + chunkDataTO.setVomsAttributes(new String(bdata)); + } + chunkDataTO.setPrimaryKey(rsRequest.getLong("rp.ID")); + chunkDataTO.setToSURL(rsRequest.getString("rp.targetSURL")); + + chunkDataTO.setNormalizedStFN(rsRequest.getString("rp.normalized_targetSURL_StFN")); + int uniqueID = rsRequest.getInt("rp.targetSURL_uniqueID"); + if (!rsRequest.wasNull()) { + chunkDataTO.setSurlUniqueID(Integer.valueOf(uniqueID)); + } + + chunkDataTO.setExpectedFileSize(rsRequest.getLong("rp.expectedFileSize")); + chunkDataTO.setProtocolList(protocols); + chunkDataTO.setRequestToken(requestToken.getValue()); + chunkDataTO.setStatus(rsRequest.getInt("sp.statusCode")); + results.add(chunkDataTO); + } + con.commit(); + } catch (SQLException e) { + log.error("PTP CHUNK DAO: {}", e.getMessage(), e); + e.printStackTrace(); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeResultSet(rsProtocols); + closeResultSet(rsRequest); + closeStatement(findProtocols); + closeStatement(findRequest); + closeConnection(con); + } + return results; + } + + /** + * Method used in extraordinary situations to signal that data retrieved from the DB was malformed + * and could not be translated into the StoRM object model. This method attempts to change the + * status of the chunk to SRM_FAILURE and record it in the DB, in the status_Put table. This + * operation could potentially fail because the source of the malformed problems could be a + * problematic DB; indeed, initially only log messages were recorded. Yet it soon became clear + * that the source of malformed data were actually the clients themselves and/or FE recording in + * the DB. In these circumstances the client would find its request as being in the + * SRM_IN_PROGRESS state for ever. Hence the pressing need to inform it of the encountered + * problems. + */ + public synchronized int fail(PtPChunkDataTO auxTO) { + + Connection con = null; + PreparedStatement signal = null; + int updated = 0; + + try { + con = getConnection(); + signal = con.prepareStatement(UPDATE_STATUS_PUT_WHERE_ID_IS); + signal.setInt(1, statusCodeConverter.toDB(SRM_FAILURE)); + signal.setString(2, "This chunk of the request is malformed!"); + signal.setLong(3, auxTO.primaryKey()); + log.trace("PtP CHUNK DAO - signalMalformedPtPChunk method: {}", signal); + updated = signal.executeUpdate(); + } catch (SQLException e) { + log.error( + "PtPChunkDAO! Unable to signal in DB that a chunk of " + + "the request was malformed! Request: {}; Error: {}", + auxTO.toString(), e.getMessage(), e); + e.printStackTrace(); + updated = 0; + } finally { + closeStatement(signal); + closeConnection(con); + } + return updated; + } + + /** + * Method that retrieves all expired requests in SRM_SPACE_AVAILABLE state. + * + * @return a Map containing the ID of the request as key and the relative SURL as value + */ + public synchronized Map getExpiredSRM_SPACE_AVAILABLE() { + + return getExpired(SRM_SPACE_AVAILABLE); + } + + public synchronized Map getExpired(TStatusCode status) { + + Map expiredRequests = Maps.newHashMap(); + + Connection con = null; + PreparedStatement find = null; + ResultSet rs = null; + + try { + + con = getConnection(); + find = con.prepareStatement(SELECT_EXPIRED_REQUEST_PUT_WHERE_STATUS_IS); + find.setInt(1, statusCodeConverter.toDB(status)); + log.trace("PtP CHUNK DAO - getExpiredSRM_SPACE_AVAILABLE: {}", find); + rs = find.executeQuery(); + while (rs.next()) { + expiredRequests.put(rs.getLong("rp.ID"), rs.getString("rp.targetSURL")); + } + + } catch (SQLException e) { + + log.error("PtPChunkDAO! Unable to select expired " + + "SRM_SPACE_AVAILABLE chunks of PtP requests. {}", e.getMessage(), e); + e.printStackTrace(); + + } finally { + + closeResultSet(rs); + closeStatement(find); + closeConnection(con); + } + return expiredRequests; + } + + /** + * Method that updates chunks in SRM_SPACE_AVAILABLE state, into SRM_FILE_LIFETIME_EXPIRED. An + * array of Long representing the primary key of each chunk is required. This is needed when the + * client forgets to invoke srmPutDone(). In case of any error or exception, the returned int + * value will be zero or less than the input List size. + * + * @param the list of the request id to update + * + * @return The number of the updated records into the db + */ + public synchronized int transitExpiredSRM_SPACE_AVAILABLEtoSRM_FILE_LIFETIME_EXPIRED( + Collection ids) { + + Preconditions.checkNotNull(ids, "Invalid list of id"); + + String querySQL = "UPDATE status_Put sp " + + "JOIN (request_Put rp, request_queue rq) ON sp.request_PutID=rp.ID AND rp.request_queueID=rq.ID " + + "SET sp.statusCode=?, sp.explanation=? " + + "WHERE sp.statusCode=? AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime "; + + if (!ids.isEmpty()) { + querySQL += "AND rp.ID IN (" + StringUtils.join(ids.toArray(), ',') + ")"; + } + + Connection con = null; + PreparedStatement stmt = null; + int count = 0; + + try { + con = getConnection(); + stmt = con.prepareStatement(querySQL); + stmt.setInt(1, statusCodeConverter.toDB(SRM_FILE_LIFETIME_EXPIRED)); + stmt.setString(2, "Expired pinLifetime"); + stmt.setInt(3, statusCodeConverter.toDB(SRM_SPACE_AVAILABLE)); + + log.trace("PtP CHUNK DAO - transit SRM_SPACE_AVAILABLE to SRM_FILE_LIFETIME_EXPIRED: {}", + stmt); + + count = stmt.executeUpdate(); + + } catch (SQLException e) { + log.error("PtPChunkDAO! Unable to transit chunks from " + + "SRM_SPACE_AVAILABLE to SRM_FILE_LIFETIME_EXPIRED! {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + log.trace("PtPChunkDAO! {} chunks of PtP requests were transited " + + "from SRM_SPACE_AVAILABLE to SRM_FILE_LIFETIME_EXPIRED.", count); + return count; + } + + public synchronized int transitLongTimeInProgressRequestsToStatus(long expirationTime, TStatusCode status, String explanation) { + + String sql = "UPDATE request_queue rq, request_Put rp, status_Put sp " + + "SET rq.status=?, sp.statusCode=?, sp.explanation=? " + + "WHERE rq.ID = rp.request_queueID and rp.ID = sp.request_PutID " + + "AND rq.status=? AND rq.timeStamp <= DATE_SUB(CURRENT_TIMESTAMP(), INTERVAL ? SECOND)"; + + Connection con = null; + PreparedStatement stmt = null; + int count = 0; + + try { + con = getConnection(); + stmt = con.prepareStatement(sql); + stmt.setInt(1, statusCodeConverter.toDB(status)); + stmt.setInt(2, statusCodeConverter.toDB(status)); + stmt.setString(3, explanation); + stmt.setInt(4, statusCodeConverter.toDB(SRM_REQUEST_INPROGRESS)); + stmt.setLong(5, expirationTime); + log.trace("PtP CHUNK DAO - transit SRM_REQUEST_INPROGRESS to {}: {}", status, stmt); + count = stmt.executeUpdate(); + + } catch (SQLException e) { + log.error("PtPChunkDAO! Unable to transit chunks from " + + "SRM_REQUEST_INPROGRESS to SRM_FAILURE! {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + return count; + } + + public synchronized int updateStatus(Collection ids, TStatusCode fromStatus, + TStatusCode toStatus, String explanation) { + + Preconditions.checkNotNull(ids, "Invalid list of id"); + + if (ids.isEmpty()) { + return 0; + } + + String querySQL = "UPDATE request_queue rq, request_Put rp, status_Put sp " + + "SET rq.status=?, sp.statusCode=?, sp.explanation=? " + + "WHERE rq.ID = rp.request_queueID and rp.ID = sp.request_PutID " + + "AND rq.status=? AND rq.ID IN (" + buildInClauseForArray(ids.size()) + ")"; + + Connection con = null; + PreparedStatement stmt = null; + int count = 0; + + try { + con = getConnection(); + stmt = con.prepareStatement(querySQL); + stmt.setInt(1, statusCodeConverter.toDB(toStatus)); + stmt.setInt(2, statusCodeConverter.toDB(toStatus)); + stmt.setString(3, explanation); + stmt.setInt(4, statusCodeConverter.toDB(fromStatus)); + int i = 5; + for (Long id : ids) { + stmt.setLong(i, id); + i++; + } + log.trace("PtP CHUNK DAO - transit SRM_REQUEST_INPROGRESS to SRM_FAILURE: {}", stmt); + count = stmt.executeUpdate(); + + } catch (SQLException e) { + log.error("PtPChunkDAO! Unable to transit chunks from " + + "SRM_REQUEST_INPROGRESS to SRM_FAILURE! {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + log.trace("PtPChunkDAO! {} chunks of PtP requests were transited " + + "from SRM_REQUEST_INPROGRESS to SRM_FAILURE.", count); + return count; + } + + public synchronized int updateStatus(TRequestToken requestToken, int[] surlsUniqueIDs, + String[] surls, TStatusCode statusCode, String explanation) { + + if (requestToken == null || requestToken.getValue().trim().isEmpty() || explanation == null) { + throw new IllegalArgumentException("Unable to perform the updateStatus, " + + "invalid arguments: requestToken=" + requestToken + " explanation=" + explanation); + } + return doUpdateStatus(requestToken, surlsUniqueIDs, surls, statusCode, explanation, true, true); + } + + private int doUpdateStatus(TRequestToken requestToken, int[] surlsUniqueIDs, String[] surls, + TStatusCode statusCode, String explanation, boolean withRequestToken, + boolean withExplaination) throws IllegalArgumentException { + + if ((withRequestToken && requestToken == null) || (withExplaination && explanation == null)) { + throw new IllegalArgumentException( + "Unable to perform the updateStatus, " + "invalid arguments: withRequestToken=" + + withRequestToken + " requestToken=" + requestToken + " withExplaination=" + + withExplaination + " explaination=" + explanation); + } + + String str = + "UPDATE status_Put sp JOIN (request_Put rp, request_queue rq) ON sp.request_PutID=rp.ID AND " + + "rp.request_queueID=rq.ID " + "SET sp.statusCode=? "; + if (withExplaination) { + str += " , " + buildExpainationSet(explanation); + } + str += " WHERE "; + if (withRequestToken) { + str += buildTokenWhereClause(requestToken) + " AND "; + } + str += " ( rp.targetSURL_uniqueID IN " + makeSURLUniqueIDWhere(surlsUniqueIDs) + + " AND rp.targetSURL IN " + makeSurlString(surls) + " ) "; + + Connection con = null; + PreparedStatement stmt = null; + int count = 0; + + try { + con = getConnection(); + stmt = con.prepareStatement(str); + stmt.setInt(1, statusCodeConverter.toDB(statusCode)); + + log.trace("PTP CHUNK DAO - updateStatus: {}", stmt); + count = stmt.executeUpdate(); + if (count == 0) { + log.trace("PTP CHUNK DAO! No chunk of PTP request was updated to {}.", statusCode); + } else { + log.info("PTP CHUNK DAO! {} chunks of PTP requests were updated " + "to {}.", count, + statusCode); + } + } catch (SQLException e) { + log.error("PTP CHUNK DAO! Unable to updated from to {}! {}", statusCode, e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + return count; + } + + public synchronized int updateStatusOnMatchingStatus(TRequestToken requestToken, + TStatusCode expectedStatusCode, TStatusCode newStatusCode, String explanation) { + + if (requestToken == null || requestToken.getValue().trim().isEmpty() || explanation == null) { + throw new IllegalArgumentException("Unable to perform the updateStatusOnMatchingStatus, " + + "invalid arguments: requestToken=" + requestToken + " explanation=" + explanation); + } + return doUpdateStatusOnMatchingStatus(requestToken, null, null, expectedStatusCode, + newStatusCode, explanation, true, false, true); + } + + public synchronized int updateStatusOnMatchingStatus(TRequestToken requestToken, + int[] surlsUniqueIDs, String[] surls, TStatusCode expectedStatusCode, + TStatusCode newStatusCode) { + + if (requestToken == null || requestToken.getValue().trim().isEmpty() || surlsUniqueIDs == null + || surls == null || surlsUniqueIDs.length == 0 || surls.length == 0 + || surlsUniqueIDs.length != surls.length) { + throw new IllegalArgumentException("Unable to perform the updateStatusOnMatchingStatus, " + + "invalid arguments: requestToken=" + requestToken + "surlsUniqueIDs=" + surlsUniqueIDs + + " surls=" + surls); + } + return doUpdateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls, expectedStatusCode, + newStatusCode, null, true, true, false); + } + + private int doUpdateStatusOnMatchingStatus(TRequestToken requestToken, int[] surlsUniqueIDs, + String[] surls, TStatusCode expectedStatusCode, TStatusCode newStatusCode, String explanation, + boolean withRequestToken, boolean withSurls, boolean withExplanation) { + + if ((withRequestToken && requestToken == null) || (withExplanation && explanation == null) + || (withSurls && (surlsUniqueIDs == null || surls == null))) { + throw new IllegalArgumentException("Unable to perform the doUpdateStatusOnMatchingStatus, " + + "invalid arguments: withRequestToken=" + withRequestToken + " requestToken=" + + requestToken + " withSurls=" + withSurls + " surlsUniqueIDs=" + surlsUniqueIDs + + " surls=" + surls + " withExplaination=" + withExplanation + " explanation=" + + explanation); + } + + String str = "UPDATE " + + "status_Put sp JOIN (request_Put rp, request_queue rq) ON sp.request_PutID=rp.ID AND rp.request_queueID=rq.ID " + + "SET sp.statusCode=? "; + if (withExplanation) { + str += " , " + buildExpainationSet(explanation); + } + str += " WHERE sp.statusCode=? "; + if (withRequestToken) { + str += " AND " + buildTokenWhereClause(requestToken); + } + if (withSurls) { + str += " AND " + buildSurlsWhereClause(surlsUniqueIDs, surls); + } + + Connection con = null; + PreparedStatement stmt = null; + int count = 0; + + try { + con = getConnection(); + stmt = con.prepareStatement(str); + stmt.setInt(1, statusCodeConverter.toDB(newStatusCode)); + stmt.setInt(2, statusCodeConverter.toDB(expectedStatusCode)); + log.trace("PTP CHUNK DAO - updateStatusOnMatchingStatus: {}", stmt); + count = stmt.executeUpdate(); + if (count == 0) { + log.trace("PTP CHUNK DAO! No chunk of PTP request was updated " + "from {} to {}.", + expectedStatusCode, newStatusCode); + } else { + log.debug("PTP CHUNK DAO! {} chunks of PTP requests were updated " + "from {} to {}.", + count, expectedStatusCode, newStatusCode); + } + } catch (SQLException e) { + log.error("PTP CHUNK DAO! Unable to updated from {} to {}! Error: {}", expectedStatusCode, + newStatusCode, e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + return count; + } + + public Collection find(int[] surlsUniqueIDs, String[] surlsArray, String dn) { + + if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 || surlsArray == null + || surlsArray.length == 0 || dn == null) { + throw new IllegalArgumentException( + "Unable to perform the find, " + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs + + " surlsArray=" + surlsArray + " dn=" + dn); + } + return find(surlsUniqueIDs, surlsArray, dn, true); + } + + private synchronized Collection find(int[] surlsUniqueIDs, String[] surlsArray, + String dn, boolean withDn) throws IllegalArgumentException { + + if ((withDn && dn == null) || surlsUniqueIDs == null || surlsUniqueIDs.length == 0 + || surlsArray == null || surlsArray.length == 0) { + throw new IllegalArgumentException( + "Unable to perform the find, " + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs + + " surlsArray=" + surlsArray + " withDn=" + withDn + " dn=" + dn); + } + + Connection con = null; + PreparedStatement find = null; + ResultSet rs = null; + + try { + // get chunks of the request + String str = + "SELECT rq.ID, rq.r_token, rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.timeStamp, rq.pinLifetime, rq.fileLifetime, " + + "rq.s_token, rq.client_dn, rq.proxy, rp.ID, rp.targetSURL, rp.expectedFileSize, rp.normalized_targetSURL_StFN, rp.targetSURL_uniqueID, " + + "sp.statusCode " + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " + + "ON (rp.request_queueID=rq.ID AND sp.request_PutID=rp.ID) " + + "WHERE ( rp.targetSURL_uniqueID IN " + makeSURLUniqueIDWhere(surlsUniqueIDs) + + " AND rp.targetSURL IN " + makeSurlString(surlsArray) + " )"; + + if (withDn) { + str += " AND rq.client_dn=\'" + dn + "\'"; + } + + con = getConnection(); + find = con.prepareStatement(str); + + List list = Lists.newArrayList(); + + log.trace("PtP CHUNK DAO - find method: {}", find); + rs = find.executeQuery(); + + while (rs.next()) { + + PtPChunkDataTO chunkDataTO = new PtPChunkDataTO(); + chunkDataTO.setFileStorageType(rs.getString("rq.config_FileStorageTypeID")); + chunkDataTO.setOverwriteOption(rs.getString("rq.config_OverwriteID")); + chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); + chunkDataTO.setPinLifetime(rs.getInt("rq.pinLifetime")); + chunkDataTO.setFileLifetime(rs.getInt("rq.fileLifetime")); + chunkDataTO.setSpaceToken(rs.getString("rq.s_token")); + chunkDataTO.setClientDN(rs.getString("rq.client_dn")); + + /** + * This code is only for the 1.3.18. This is a workaround to get FQANs using the proxy field + * on request_queue. The FE use the proxy field of request_queue to insert a single FQAN + * string containing all FQAN separated by the "#" char. The proxy is a BLOB, hence it has + * to be properly converted in string. + */ + java.sql.Blob blob = rs.getBlob("rq.proxy"); + if (!rs.wasNull() && blob != null) { + byte[] bdata = blob.getBytes(1, (int) blob.length()); + chunkDataTO.setVomsAttributes(new String(bdata)); + } + chunkDataTO.setPrimaryKey(rs.getLong("rp.ID")); + chunkDataTO.setToSURL(rs.getString("rp.targetSURL")); + + chunkDataTO.setNormalizedStFN(rs.getString("rp.normalized_targetSURL_StFN")); + int uniqueID = rs.getInt("rp.targetSURL_uniqueID"); + if (!rs.wasNull()) { + chunkDataTO.setSurlUniqueID(Integer.valueOf(uniqueID)); + } + + chunkDataTO.setExpectedFileSize(rs.getLong("rp.expectedFileSize")); + chunkDataTO.setRequestToken(rs.getString("rq.r_token")); + chunkDataTO.setStatus(rs.getInt("sp.statusCode")); + list.add(chunkDataTO); + } + return list; + } catch (SQLException e) { + log.error("PTP CHUNK DAO: {}", e.getMessage(), e); + /* return empty Collection! */ + return Lists.newArrayList(); + } finally { + closeResultSet(rs); + closeStatement(find); + closeConnection(con); + } + } + + private String buildExpainationSet(String explanation) { + + return " sp.explanation='" + explanation + "' "; + } + + private String buildTokenWhereClause(TRequestToken requestToken) { + + return " rq.r_token='" + requestToken.toString() + "' "; + } + + private String buildSurlsWhereClause(int[] surlsUniqueIDs, String[] surls) { + + return " ( rp.targetSURL_uniqueID IN " + makeSURLUniqueIDWhere(surlsUniqueIDs) + + " AND rp.targetSURL IN " + makeSurlString(surls) + " ) "; + } + + /** + * Method that returns a String containing all Surl's IDs. + */ + private String makeSURLUniqueIDWhere(int[] surlUniqueIDs) { + + StringBuilder sb = new StringBuilder("("); + for (int i = 0; i < surlUniqueIDs.length; i++) { + if (i > 0) { + sb.append(","); + } + sb.append(surlUniqueIDs[i]); + } + sb.append(")"); + return sb.toString(); + } + + /** + * Method that returns a String containing all Surls. + */ + private String makeSurlString(String[] surls) { + + StringBuilder sb = new StringBuilder("("); + int n = surls.length; + + for (int i = 0; i < n; i++) { + + SURL requestedSURL; + + try { + requestedSURL = SURL.makeSURLfromString(surls[i]); + } catch (NamespaceException e) { + log.error(e.getMessage(), e); + log.debug("Skip '{}' during query creation", surls[i]); + continue; + } + + sb.append("'"); + sb.append(requestedSURL.getNormalFormAsString()); + sb.append("','"); + sb.append(requestedSURL.getQueryFormAsString()); + sb.append("'"); + + if (i < (n - 1)) { + sb.append(","); + } + } + + sb.append(")"); + return sb.toString(); + } + +} diff --git a/src/main/java/it/grid/storm/persistence/impl/mysql/RequestSummaryDAOMySql.java b/src/main/java/it/grid/storm/persistence/impl/mysql/RequestSummaryDAOMySql.java new file mode 100644 index 000000000..d762c7454 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/impl/mysql/RequestSummaryDAOMySql.java @@ -0,0 +1,918 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.impl.mysql; + +import static it.grid.storm.srm.types.TRequestType.EMPTY; +import static it.grid.storm.srm.types.TRequestType.PREPARE_TO_GET; +import static it.grid.storm.srm.types.TRequestType.PREPARE_TO_PUT; +import static it.grid.storm.srm.types.TStatusCode.SRM_ABORTED; +import static it.grid.storm.srm.types.TStatusCode.SRM_FAILURE; +import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_INPROGRESS; +import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_QUEUED; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.Collection; +import java.util.Iterator; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Lists; + +import it.grid.storm.config.Configuration; +import it.grid.storm.persistence.converter.RequestTypeConverter; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.dao.AbstractDAO; +import it.grid.storm.persistence.dao.RequestSummaryDAO; +import it.grid.storm.persistence.model.RequestSummaryDataTO; +import it.grid.storm.persistence.pool.StormDbConnectionPool; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TRequestType; +import it.grid.storm.srm.types.TStatusCode; + +/** + * DAO class for RequestSummaryCatalog. This DAO is specifically designed to connect to a MySQL DB. + * + * @author EGRID ICTP + * @version 3.0 + * @date May 2005 + */ +public class RequestSummaryDAOMySql extends AbstractDAO implements RequestSummaryDAO { + + private static final Logger log = LoggerFactory.getLogger(RequestSummaryDAOMySql.class); + + private static final String SELECT_REQUEST_WHERE_STATUS_WITH_LIMIT = + "SELECT ID, config_RequestTypeID, r_token, timeStamp, client_dn, proxy " + + "FROM request_queue WHERE status=? LIMIT ?"; + + private static final String UPDATE_REQUEST_STATUS_WHERE_ID_IS = + "UPDATE request_queue SET status=?, errstring=? WHERE ID=?"; + + private static final String UPDATE_REQUEST_STATUS_WHERE_TOKEN_IS = + "UPDATE request_queue SET status=?, errstring=? WHERE r_token=?"; + + private static final String UPDATE_REQUEST_STATUS_WHERE_TOKEN_AND_STATUS_ARE = + "UPDATE request_queue SET status=?, errstring=? WHERE r_token=? AND status=?"; + + private static final String UPDATE_REQUEST_STATUS_AND_PINLIFETIME_WHERE_TOKEN_IS = + "UPDATE request_queue " + + "SET status=?, errstring=?, pinLifetime=pinLifetime+(UNIX_TIMESTAMP()-UNIX_TIMESTAMP(timeStamp)) " + + "WHERE r_token=?"; + + private static final String SELECT_REQUEST_WHERE_TOKEN_IS = + "SELECT ID, config_RequestTypeID from request_queue WHERE r_token=?"; + + private static final String SELECT_FULL_REQUEST_WHERE_TOKEN_IS = + "SELECT * from request_queue WHERE r_token=?"; + + private static final String SELECT_REQUEST_WHERE_TOKEN_AND_STATUS = + "SELECT ID, config_RequestTypeID FROM request_queue WHERE r_token=? AND status=?"; + + private static final String UPDATE_REQUEST_GET_STATUS_WHERE_ID_IS = "UPDATE status_Get s " + + "JOIN (request_queue r, request_Get t) ON (s.request_GetID=t.ID AND t.request_queueID=r.ID) " + + "SET s.statusCode=?, s.explanation=? WHERE r.ID=?"; + + private static final String UPDATE_REQUEST_PUT_STATUS_WHERE_ID_IS = "UPDATE status_Put s " + + "JOIN (request_queue r, request_Put t) ON (s.request_PutID=t.ID AND t.request_queueID=r.ID) " + + "SET s.statusCode=?, s.explanation=? WHERE r.ID=?"; + + private static final String UPDATE_REQUEST_BOL_STATUS_WHERE_ID_IS = "UPDATE status_BoL s " + + "JOIN (request_queue r, request_BoL t) ON (s.request_BoLID=t.ID AND t.request_queueID=r.ID) " + + "SET s.statusCode=?, s.explanation=? WHERE r.ID=?"; + + private static final String UPDATE_REQUEST_GET_STATUS_WHERE_ID_IS_AND_SURL_IN = + "UPDATE status_Get s " + + "JOIN (request_queue r, request_Get t) ON (s.request_GetID=t.ID AND t.request_queueID=r.ID) " + + "SET s.statusCode=?, s.explanation=? WHERE r.ID=? AND sourceSURL IN "; + + private static final String UPDATE_REQUEST_PUT_STATUS_WHERE_ID_IS_AND_SURL_IN = + "UPDATE status_Put s " + + "JOIN (request_queue r, request_Put t) ON (s.request_PutID=t.ID AND t.request_queueID=r.ID) " + + "SET s.statusCode=?, s.explanation=? WHERE r.ID=? AND targetSURL IN "; + + private static final String UPDATE_REQUEST_BOL_STATUS_WHERE_ID_IS_AND_SURL_IN = + "UPDATE status_BoL s " + + "JOIN (request_queue r, request_BoL t) ON (s.request_BoLID=t.ID AND t.request_queueID=r.ID) " + + "SET s.statusCode=?, s.explanation=? WHERE r.ID=? AND sourceSURL IN "; + + private static final String SELECT_PURGEABLE_REQUESTS_WITH_LIMIT = + "SELECT ID, r_token FROM request_queue " + + "WHERE UNIX_TIMESTAMP(NOW()) - UNIX_TIMESTAMP(timeStamp) > ? AND status <> ? AND status <> ? LIMIT ?"; + + private static final String COUNT_PURGEABLE_REQUESTS = "SELECT count(*) FROM request_queue " + + "WHERE UNIX_TIMESTAMP(NOW()) - UNIX_TIMESTAMP(timeStamp) > ? AND status <> ? AND status <> ? "; + + private static final String DELETE_ORPHANS_DIR_OPTION = + "DELETE request_DirOption FROM request_DirOption " + + " LEFT JOIN request_Get ON request_DirOption.ID = request_Get.request_DirOptionID" + + " LEFT JOIN request_BoL ON request_DirOption.ID = request_BoL.request_DirOptionID " + + " LEFT JOIN request_Copy ON request_DirOption.ID = request_Copy.request_DirOptionID" + + " WHERE request_Copy.request_DirOptionID IS NULL AND" + + " request_Get.request_DirOptionID IS NULL AND" + + " request_BoL.request_DirOptionID IS NULL;"; + + private static RequestSummaryDAO instance; + + private final StatusCodeConverter statusCodeConverter; + private final RequestTypeConverter requestTypeConverter; + private final int MAX_FETCHED_REQUESTS = Configuration.getInstance().getRequestsPickerAgentMaxFetchedSize(); + + public static synchronized RequestSummaryDAO getInstance() { + if (instance == null) { + instance = new RequestSummaryDAOMySql(); + } + return instance; + } + + private RequestSummaryDAOMySql() { + super(StormDbConnectionPool.getInstance()); + statusCodeConverter = StatusCodeConverter.getInstance(); + requestTypeConverter = RequestTypeConverter.getInstance(); + } + + /** + * Method that retrieves requests in the SRM_REQUEST_QUEUED status: retrieved requests are limited + * to the number specified by the Configuration method getPicker2MaxBatchSize. All retrieved + * requests get their global status transited to SRM_REQUEST_INPROGRESS. A Collection of + * RequestSummaryDataTO is returned: if none are found, an empty collection is returned. + */ + public synchronized Collection fetchNewRequests(int limit) { + + Connection con = null; + PreparedStatement fetch = null; + PreparedStatement update = null; + ResultSet fetched = null; + Collection results = Lists.newArrayList(); + int howMuch = limit > MAX_FETCHED_REQUESTS ? MAX_FETCHED_REQUESTS : limit; + + try { + con = getManagedConnection(); + + // get id, request type, request token and client_DN of newly added + // requests, which must be in SRM_REQUEST_QUEUED state + fetch = con.prepareStatement(SELECT_REQUEST_WHERE_STATUS_WITH_LIMIT); + fetch.setInt(1, statusCodeConverter.toDB(SRM_REQUEST_QUEUED)); + fetch.setInt(2, howMuch); + fetched = fetch.executeQuery(); + + Collection rowids = Lists.newArrayList(); + + while (fetched.next()) { + long id = fetched.getLong("ID"); + rowids.add(Long.valueOf(id)); + RequestSummaryDataTO aux = new RequestSummaryDataTO(); + aux.setPrimaryKey(id); + aux.setRequestType(fetched.getString("config_RequestTypeID")); + aux.setRequestToken(fetched.getString("r_token")); + aux.setClientDN(fetched.getString("client_dn")); + aux.setTimestamp(fetched.getTimestamp("timeStamp")); + + /** + * This code is only for the 1.3.18. This is a workaround to get FQANs using the proxy field + * on request_queue. The FE use the proxy field of request_queue to insert a single FQAN + * string containing all FQAN separated by the "#" char. The proxy is a BLOB, hence it has + * to be properly converted in string. + */ + java.sql.Blob blob = fetched.getBlob("proxy"); + if (blob != null) { + byte[] bdata = blob.getBytes(1, (int) blob.length()); + aux.setVomsAttributes(new String(bdata)); + } + + results.add(aux); + } + + // transit state from SRM_REQUEST_QUEUED to SRM_REQUEST_INPROGRESS + if (!results.isEmpty()) { + String updateQuery = + "UPDATE request_queue SET status=?, errstring=? WHERE ID IN " + makeWhereString(rowids); + update = con.prepareStatement(updateQuery); + update.setInt(1, statusCodeConverter.toDB(SRM_REQUEST_INPROGRESS)); + update.setString(2, "Request handled!"); + log.trace("REQUEST SUMMARY DAO - findNew: executing {}", update); + update.executeUpdate(); + } + + // commit and finish transaction + con.commit(); + + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO - findNew: Unable to complete picking. " + + "Error: {}. Rolling back!", e.getMessage(), e); + e.printStackTrace(); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + + } finally { + closeResultSet(fetched); + closeStatement(fetch); + closeStatement(update); + closeConnection(con); + } + + return results; + } + + /** + * Method used to signal in the DB that a request failed: the status of the request identified by + * the primary key index is transited to SRM_FAILURE, with the supplied explanation String. The + * supplied index is the primary key of the global request. In case of any error, nothing gets + * done and no exception is thrown, but proper error messages get logged. + */ + public void failRequest(long requestId, String explanation) { + + Connection con = null; + PreparedStatement ps = null; + try { + con = getConnection(); + ps = con.prepareStatement(UPDATE_REQUEST_STATUS_WHERE_ID_IS); + ps.setInt(1, statusCodeConverter.toDB(SRM_FAILURE)); + ps.setString(2, explanation); + ps.setLong(3, requestId); + log.trace("REQUEST SUMMARY DAO! failRequest executing: {}", ps); + ps.executeUpdate(); + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO! Unable to transit request identified by " + + "ID {} to SRM_FAILURE! Error: {}", requestId, e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(ps); + closeConnection(con); + } + } + + /** + * Method used to signal in the DB that a PtGRequest failed. The global status transits to + * SRM_FAILURE, as well as that of each chunk associated to the request. The supplied explanation + * string is used both for the global status as well as for each individual chunk. The supplied + * index is the primary key of the global request. In case of any error, nothing gets done and no + * exception is thrown, but proper error messages get logged. + */ + public void failPtGRequest(long requestId, String explanation) { + + Connection con = null; + PreparedStatement updateReq = null; + PreparedStatement updateChunk = null; + + int failCode = statusCodeConverter.toDB(SRM_FAILURE); + try { + // start transaction + con = getManagedConnection(); + + // update global status + updateReq = con.prepareStatement(UPDATE_REQUEST_STATUS_WHERE_ID_IS); + updateReq.setInt(1, failCode); + updateReq.setString(2, explanation); + updateReq.setLong(3, requestId); + log.trace("REQUEST SUMMARY DAO! failPtGRequest executing: {}", updateReq); + updateReq.executeUpdate(); + + // update each chunk status + updateChunk = con.prepareStatement(UPDATE_REQUEST_GET_STATUS_WHERE_ID_IS); + updateChunk.setInt(1, failCode); + updateChunk.setString(2, explanation); + updateChunk.setLong(3, requestId); + log.trace("REQUEST SUMMARY DAO! failPtGRequest executing: {}", updateChunk); + updateChunk.executeUpdate(); + + // commit and finish transaction + con.commit(); + } catch (SQLException e) { + log.error( + "REQUEST SUMMARY DAO! Unable to transit PtG request identified " + + "by ID {} to SRM_FAILURE! Error: {}\nRolling back...", + requestId, e.getMessage(), e); + e.printStackTrace(); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeStatement(updateReq); + closeStatement(updateChunk); + closeConnection(con); + } + } + + /** + * Method used to signal in the DB that a PtPRequest failed. The global status transits to + * SRM_FAILURE, as well as that of each chunk associated to the request. The supplied explanation + * string is used both for the global status as well as for each individual chunk. The supplied + * index is the primary key of the global request. In case of any error, nothing gets done and no + * exception is thrown, but proper error messagges get logged. + */ + public void failPtPRequest(long requestId, String explanation) { + + Connection con = null; + PreparedStatement updateReq = null; + PreparedStatement updateChunk = null; + int failCode = statusCodeConverter.toDB(SRM_FAILURE); + try { + // start transaction + con = getManagedConnection(); + + // update global status + updateReq = con.prepareStatement(UPDATE_REQUEST_STATUS_WHERE_ID_IS); + updateReq.setInt(1, failCode); + updateReq.setString(2, explanation); + updateReq.setLong(3, requestId); + log.trace("REQUEST SUMMARY DAO! failPtPRequest executing: {}", updateReq); + updateReq.executeUpdate(); + + // update each chunk status + updateChunk = con.prepareStatement(UPDATE_REQUEST_PUT_STATUS_WHERE_ID_IS); + updateChunk.setInt(1, failCode); + updateChunk.setString(2, explanation); + updateChunk.setLong(3, requestId); + log.trace("REQUEST SUMMARY DAO! failPtPRequest executing: {}", updateChunk); + updateChunk.executeUpdate(); + + // commit and finish transaction + con.commit(); + + } catch (SQLException e) { + log.error( + "REQUEST SUMMARY DAO! Unable to transit PtP request identified " + + "by ID {} to SRM_FAILURE! Error: {}\nRolling back...", + requestId, e.getMessage(), e); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeStatement(updateReq); + closeStatement(updateChunk); + closeConnection(con); + } + } + + /** + * Method used to update the global status of the request identified by the RequestToken rt. It + * gets updated the supplied status, with the supplied explanation String. If the supplied request + * token does not exist, nothing happens. + */ + public void updateGlobalStatus(TRequestToken requestToken, TStatusCode status, + String explanation) { + + Connection con = null; + PreparedStatement update = null; + try { + con = getConnection(); + update = con.prepareStatement(UPDATE_REQUEST_STATUS_WHERE_TOKEN_IS); + update.setInt(1, statusCodeConverter.toDB(status)); + update.setString(2, explanation); + update.setString(3, requestToken.getValue()); + log.trace("REQUEST SUMMARY DAO - updateGlobalStatus: executing {}", update); + update.executeUpdate(); + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(update); + closeConnection(con); + } + } + + public void updateGlobalStatusOnMatchingGlobalStatus(TRequestToken requestToken, + TStatusCode expectedStatusCode, TStatusCode newStatusCode, String explanation) { + + Connection con = null; + PreparedStatement update = null; + try { + con = getConnection(); + update = con.prepareStatement(UPDATE_REQUEST_STATUS_WHERE_TOKEN_AND_STATUS_ARE); + update.setInt(1, statusCodeConverter.toDB(newStatusCode)); + update.setString(2, explanation); + update.setString(3, requestToken.getValue()); + update.setInt(4, statusCodeConverter.toDB(expectedStatusCode)); + log.trace("REQUEST SUMMARY DAO - updateGlobalStatusOnMatchingGlobalStatus: executing {}", + update); + update.executeUpdate(); + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO: {}", e.getMessage(), e); + } finally { + closeStatement(update); + closeConnection(con); + } + } + + /** + * Method used to update the global status of the request identified by the RequestToken rt. It + * gets updated the supplied status, with the supplied explanation String and pin and file + * lifetimes are updated in order to start the countdown from now. If the supplied request token + * does not exist, nothing happens. + */ + public void updateGlobalStatusPinFileLifetime(TRequestToken requestToken, TStatusCode status, + String explanation) { + + Connection con = null; + PreparedStatement update = null; + + try { + con = getConnection(); + update = con.prepareStatement(UPDATE_REQUEST_STATUS_AND_PINLIFETIME_WHERE_TOKEN_IS); + update.setInt(1, statusCodeConverter.toDB(status)); + update.setString(2, explanation); + update.setString(3, requestToken.getValue()); + log.trace("REQUEST SUMMARY DAO - updateGlobalStatus: executing {}", update); + update.executeUpdate(); + + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(update); + closeConnection(con); + } + } + + /** + * Method used to transit the status of a request that is in SRM_REQUEST_QUEUED state, to + * SRM_ABORTED. All files associated with the request will also get their status changed to + * SRM_ABORTED. If the supplied token is null, or not found, or not in the SRM_REQUEST_QUEUED + * state, then nothing happens. + */ + public void abortRequest(TRequestToken requestToken) { + + Connection con = null; + PreparedStatement update = null; + PreparedStatement query = null; + ResultSet rs = null; + + try { + con = getManagedConnection(); + + query = con.prepareStatement(SELECT_REQUEST_WHERE_TOKEN_AND_STATUS); + query.setString(1, requestToken.getValue()); + query.setInt(2, statusCodeConverter.toDB(SRM_REQUEST_QUEUED)); + log.trace("REQUEST SUMMARY DAO - abortRequest - {}", query); + rs = query.executeQuery(); + + if (rs.next()) { + long id = rs.getLong("ID"); + String type = rs.getString("config_RequestTypeID"); + update = con.prepareStatement(UPDATE_REQUEST_STATUS_WHERE_ID_IS); + update.setInt(1, statusCodeConverter.toDB(SRM_ABORTED)); + update.setString(2, "User aborted request!"); + update.setLong(3, id); + log.trace("REQUEST SUMMARY DAO - abortRequest - {}", update); + update.executeUpdate(); + + // update single chunk file statuses + TRequestType rtyp = requestTypeConverter.toSTORM(type); + if (EMPTY.equals(rtyp)) { + log.error("REQUEST SUMMARY DAO - Unable to complete abortRequest: " + + "could not update file statuses because the request type could " + + "not be translated from the DB!"); + con.rollback(); + } else { + if (PREPARE_TO_GET.equals(rtyp)) { + update = con.prepareStatement(UPDATE_REQUEST_GET_STATUS_WHERE_ID_IS); + } else if (PREPARE_TO_PUT.equals(rtyp)) { + update = con.prepareStatement(UPDATE_REQUEST_PUT_STATUS_WHERE_ID_IS); + } else { + update = con.prepareStatement(UPDATE_REQUEST_BOL_STATUS_WHERE_ID_IS); + } + update.setInt(1, statusCodeConverter.toDB(SRM_ABORTED)); + update.setString(2, "User aborted request!"); + update.setLong(3, id); + log.trace("REQUEST SUMMARY DAO - abortRequest - {}", update); + update.executeUpdate(); + con.commit(); + } + } else { + con.rollback(); + } + } catch (SQLException e) { + + log.error("REQUEST SUMMARY DAO - abortRequest: {}", e.getMessage(), e); + e.printStackTrace(); + + } finally { + closeResultSet(rs); + closeStatement(update); + closeStatement(query); + closeConnection(con); + } + } + + /** + * Method used to transit the status of a request that is in SRM_REQUEST_INPROGRESS state, to + * SRM_ABORTED. All files associated with the request will also get their status changed to + * SRM_ABORTED. If the supplied token is null, or not found, or not in the SRM_REQUEST_INPROGRESS + * state, then nothing happens. + */ + public void abortInProgressRequest(TRequestToken rt) { + + Connection con = null; + PreparedStatement updateReq = null; + PreparedStatement updateChunk = null; + PreparedStatement query = null; + ResultSet rs = null; + + try { + con = getManagedConnection(); + + query = con.prepareStatement(SELECT_REQUEST_WHERE_TOKEN_AND_STATUS); + query.setString(1, rt.getValue()); + query.setInt(2, statusCodeConverter.toDB(SRM_REQUEST_INPROGRESS)); + log.trace("REQUEST SUMMARY DAO - abortInProgressRequest - {}", query); + rs = query.executeQuery(); + + if (rs.next()) { + // token found... + // get ID + long id = rs.getLong("ID"); + String type = rs.getString("config_RequestTypeID"); + // update global request status + updateReq = con.prepareStatement(UPDATE_REQUEST_STATUS_WHERE_ID_IS); + updateReq.setInt(1, statusCodeConverter.toDB(SRM_ABORTED)); + updateReq.setString(2, "User aborted request!"); + updateReq.setLong(3, id); + log.trace("REQUEST SUMMARY DAO - abortInProgressRequest - {}", updateReq); + updateReq.executeUpdate(); + + // update single chunk file statuses + TRequestType rtyp = requestTypeConverter.toSTORM(type); + if (EMPTY.equals(rtyp)) { + log.error("REQUEST SUMMARY DAO - Unable to complete abortRequest: " + + "could not update file statuses because the request type could " + + "not be translated from the DB!"); + con.rollback(); + } else { + if (PREPARE_TO_GET.equals(rtyp)) { + updateChunk = con.prepareStatement(UPDATE_REQUEST_GET_STATUS_WHERE_ID_IS); + } else if (PREPARE_TO_PUT.equals(rtyp)) { + updateChunk = con.prepareStatement(UPDATE_REQUEST_PUT_STATUS_WHERE_ID_IS); + } else { + updateChunk = con.prepareStatement(UPDATE_REQUEST_BOL_STATUS_WHERE_ID_IS); + } + } + updateChunk.setInt(1, statusCodeConverter.toDB(SRM_ABORTED)); + updateChunk.setString(2, "User aborted request!"); + updateChunk.setLong(3, id); + log.trace("REQUEST SUMMARY DAO - abortInProgressRequest - {}", updateChunk); + updateChunk.executeUpdate(); + } else { + con.rollback(); + } + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO - abortInProgressRequest: {}", e.getMessage(), e); + e.printStackTrace(); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeResultSet(rs); + closeStatement(query); + closeStatement(updateReq); + closeStatement(updateChunk); + closeConnection(con); + } + } + + /** + * Method used to transit the status of chunks of a request that is in SRM_REQUEST_INPROGRESS + * state, to SRM_ABORTED. If the supplied token is null, or not found, or not in the + * SRM_REQUEST_INPROGRESS state, then nothing happens. + */ + public void abortChunksOfInProgressRequest(TRequestToken requestToken, Collection surls) { + + Connection con = null; + PreparedStatement update = null; + PreparedStatement query = null; + ResultSet rs = null; + + try { + con = getManagedConnection(); + query = con.prepareStatement(SELECT_REQUEST_WHERE_TOKEN_AND_STATUS); + query.setString(1, requestToken.getValue()); + query.setInt(2, statusCodeConverter.toDB(SRM_REQUEST_INPROGRESS)); + log.trace("REQUEST SUMMARY DAO - abortChunksOfInProgressRequest - {}", query); + rs = query.executeQuery(); + + if (rs.next()) { + long id = rs.getLong("ID"); + String type = rs.getString("config_RequestTypeID"); + // update single chunk file statuses + TRequestType rtyp = requestTypeConverter.toSTORM(type); + if (EMPTY.equals(rtyp)) { + log.error("REQUEST SUMMARY DAO - Unable to complete abortRequest: " + + "could not update file statuses because the request type could " + + "not be translated from the DB!"); + con.rollback(); + } else { + String updateQuery; + if (PREPARE_TO_GET.equals(rtyp)) { + updateQuery = UPDATE_REQUEST_GET_STATUS_WHERE_ID_IS_AND_SURL_IN + makeInString(surls); + } else if (PREPARE_TO_PUT.equals(rtyp)) { + updateQuery = UPDATE_REQUEST_PUT_STATUS_WHERE_ID_IS_AND_SURL_IN + makeInString(surls); + } else { + updateQuery = UPDATE_REQUEST_BOL_STATUS_WHERE_ID_IS_AND_SURL_IN + makeInString(surls); + } + update = con.prepareStatement(updateQuery); + } + update.setInt(1, statusCodeConverter.toDB(SRM_ABORTED)); + update.setString(2, "User aborted request!"); + update.setLong(3, id); + log.trace("REQUEST SUMMARY DAO - abortChunksOfInProgressRequest - {}", update); + update.executeUpdate(); + con.commit(); + } + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO - abortChunksOfInProgressRequest: {}", e.getMessage(), e); + e.printStackTrace(); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeResultSet(rs); + closeStatement(query); + closeStatement(update); + closeConnection(con); + } + } + + /** + * Private method that returns a String of all SURLS in the collection of String. + */ + private String makeInString(Collection c) { + + StringBuilder sb = new StringBuilder("("); + for (Iterator i = c.iterator(); i.hasNext();) { + sb.append(i.next()); + if (i.hasNext()) { + sb.append(","); + } + } + sb.append(")"); + return sb.toString(); + } + + /** + * Method that returns the config_RequestTypeID field present in request_queue table, for the + * request with the specified request token rt. In case of any error, the empty String "" is + * returned. + */ + public TRequestType getRequestType(TRequestToken requestToken) { + + Connection con = null; + PreparedStatement query = null; + ResultSet rs = null; + TRequestType result = EMPTY; + + try { + con = getConnection(); + query = con.prepareStatement(SELECT_REQUEST_WHERE_TOKEN_IS); + query.setString(1, requestToken.getValue()); + log.trace("REQUEST SUMMARY DAO - typeOf - {}", query); + rs = query.executeQuery(); + if (rs.next()) { + result = requestTypeConverter.toSTORM(rs.getString("config_RequestTypeID")); + } + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO - typeOf - {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(query); + closeConnection(con); + } + return result; + } + + /** + * Method that returns the config_RequestTypeID field present in request_queue table, for the + * request with the specified request token rt. In case of any error, the empty String "" is + * returned. + */ + public RequestSummaryDataTO find(TRequestToken requestToken) { + + Connection con = null; + PreparedStatement query = null; + ResultSet rs = null; + RequestSummaryDataTO to = null; + + try { + con = getConnection(); + query = con.prepareStatement(SELECT_FULL_REQUEST_WHERE_TOKEN_IS); + query.setString(1, requestToken.getValue()); + rs = query.executeQuery(); + + if (rs.first()) { + to = new RequestSummaryDataTO(); + to.setPrimaryKey(rs.getLong("ID")); + to.setRequestType(rs.getString("config_RequestTypeID")); + to.setClientDN(rs.getString("client_dn")); + to.setUserToken(rs.getString("u_token")); + to.setRetrytime(rs.getInt("retrytime")); + to.setPinLifetime(rs.getInt("pinLifetime")); + to.setSpaceToken(rs.getString("s_token")); + to.setStatus(rs.getInt("status")); + to.setErrstring(rs.getString("errstring")); + to.setRequestToken(rs.getString("r_token")); + to.setRemainingTotalTime(rs.getInt("remainingTotalTime")); + to.setFileLifetime(rs.getInt("fileLifetime")); + to.setNbreqfiles(rs.getInt("nbreqfiles")); + to.setNumOfCompleted(rs.getInt("numOfCompleted")); + to.setNumOfWaiting(rs.getInt("numOfWaiting")); + to.setNumOfFailed(rs.getInt("numOfFailed")); + to.setTimestamp(rs.getTimestamp("timeStamp")); + + java.sql.Blob blob = rs.getBlob("proxy"); + if (blob != null) { + byte[] bdata = blob.getBytes(1, (int) blob.length()); + to.setVomsAttributes(new String(bdata)); + } + to.setDeferredStartTime(rs.getInt("deferredStartTime")); + to.setRemainingDeferredStartTime(rs.getInt("remainingDeferredStartTime")); + + if (rs.next()) { + log.warn("More than a row matches token {}", requestToken); + } + } + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO - find - {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(query); + closeConnection(con); + } + return to; + } + + /** + * Method that purges expired requests: it only removes up to a fixed value of expired requests at + * a time. The value is configured and obtained from the configuration property getPurgeBatchSize. + * A List of Strings with the request tokens removed is returned. In order to completely remove + * all expired requests, simply keep invoking this method until an empty List is returned. This + * batch processing is needed because there could be millions of expired requests which are likely + * to result in out-of-memory problems. Notice that in case of errors only error messages get + * logged. An empty List is also returned. + */ + public Collection purgeExpiredRequests(long expiredRequestTime, int purgeSize) { + + Connection con = null; + PreparedStatement fetch = null; + PreparedStatement deleteReq = null; + PreparedStatement deleteOrphans = null; + ResultSet rs = null; + Collection requestTokens = Lists.newArrayList(); + + try { + // start transaction + con = getManagedConnection(); + + fetch = con.prepareStatement(SELECT_PURGEABLE_REQUESTS_WITH_LIMIT); + fetch.setLong(1, expiredRequestTime); + fetch.setInt(2, statusCodeConverter.toDB(SRM_REQUEST_QUEUED)); + fetch.setInt(3, statusCodeConverter.toDB(SRM_REQUEST_INPROGRESS)); + fetch.setInt(4, purgeSize); + log.trace("REQUEST SUMMARY DAO - purgeExpiredRequests - {}", fetch); + rs = fetch.executeQuery(); + + Collection ids = Lists.newArrayList(); + + while (rs.next()) { + requestTokens.add(rs.getString("r_token")); + ids.add(Long.valueOf(rs.getLong("ID"))); + } + + if (!ids.isEmpty()) { + // REMOVE BATCH OF EXPIRED REQUESTS! + + String deleteQuery = "DELETE FROM request_queue WHERE ID in " + makeWhereString(ids); + deleteReq = con.prepareStatement(deleteQuery); + log.trace("REQUEST SUMMARY DAO - purgeExpiredRequests - {}", deleteReq); + + int deleted = deleteReq.executeUpdate(); + if (deleted > 0) { + log.info("REQUEST SUMMARY DAO - purgeExpiredRequests - Deleted {} expired requests.", + deleted); + } else { + log.trace("REQUEST SUMMARY DAO - purgeExpiredRequests - No deleted expired requests."); + } + + deleteOrphans = con.prepareStatement(DELETE_ORPHANS_DIR_OPTION); + log.trace("REQUEST SUMMARY DAO - purgeExpiredRequests - {}", deleteOrphans); + deleted = deleteOrphans.executeUpdate(); + + if (deleted > 0) { + log.info("REQUEST SUMMARY DAO - purgeExpiredRequests - Deleted {} " + + "DirOption related to expired requests.", deleted); + } else { + log.trace("REQUEST SUMMARY DAO - purgeExpiredRequests - No Deleted " + + "DirOption related to expired requests."); + } + } + // commit and finish transaction + con.commit(); + + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO - purgeExpiredRequests - Rolling back because of error: {}", + e.getMessage(), e); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeResultSet(rs); + closeStatement(fetch); + closeStatement(deleteReq); + closeStatement(deleteOrphans); + closeConnection(con); + } + return requestTokens; + } + + /** + * Retrieve the total number of expired requests. + * + * @return + */ + public int getNumberExpired() { + + int rowCount = 0; + + Connection con = null; + PreparedStatement ps = null; + ResultSet rs = null; + + try { + // start transaction + con = getConnection(); + + ps = con.prepareStatement(COUNT_PURGEABLE_REQUESTS); + ps.setLong(1, Configuration.getInstance().getCompletedRequestsAgentPurgeAge()); + ps.setInt(2, statusCodeConverter.toDB(SRM_REQUEST_QUEUED)); + ps.setInt(3, statusCodeConverter.toDB(SRM_REQUEST_INPROGRESS)); + + log.trace("REQUEST SUMMARY DAO - Number of expired requests: {}", ps); + rs = ps.executeQuery(); + + // Get the number of rows from the result set + if (rs.next()) { + rowCount = rs.getInt(1); + } + log.debug("Nr of expired requests is: {}", rowCount); + + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO - purgeExpiredRequests - Rolling back because of error: {}", + e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(ps); + closeConnection(con); + } + + return rowCount; + + } + + /** + * Private method that returns a String of all IDs retrieved by the last SELECT. + */ + private String makeWhereString(Collection rowids) { + + StringBuilder sb = new StringBuilder("("); + for (Iterator i = rowids.iterator(); i.hasNext();) { + sb.append(i.next()); + if (i.hasNext()) { + sb.append(","); + } + } + sb.append(")"); + return sb.toString(); + } + +} diff --git a/src/main/java/it/grid/storm/catalogs/surl/SURLStatusDAO.java b/src/main/java/it/grid/storm/persistence/impl/mysql/SURLStatusDAOMySql.java similarity index 58% rename from src/main/java/it/grid/storm/catalogs/surl/SURLStatusDAO.java rename to src/main/java/it/grid/storm/persistence/impl/mysql/SURLStatusDAOMySql.java index e70e3b23b..a43bf57a0 100644 --- a/src/main/java/it/grid/storm/catalogs/surl/SURLStatusDAO.java +++ b/src/main/java/it/grid/storm/persistence/impl/mysql/SURLStatusDAOMySql.java @@ -1,10 +1,29 @@ -package it.grid.storm.catalogs.surl; +package it.grid.storm.persistence.impl.mysql; + +import static it.grid.storm.srm.types.TStatusCode.SRM_SPACE_AVAILABLE; +import static it.grid.storm.srm.types.TStatusCode.SRM_SUCCESS; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Maps; import it.grid.storm.catalogs.PtPChunkCatalog; import it.grid.storm.catalogs.RequestSummaryCatalog; -import it.grid.storm.catalogs.StatusCodeConverter; -import it.grid.storm.catalogs.StoRMDataSource; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.dao.AbstractDAO; +import it.grid.storm.persistence.dao.SURLStatusDAO; +import it.grid.storm.persistence.pool.StormDbConnectionPool; import it.grid.storm.srm.types.InvalidTSURLAttributesException; import it.grid.storm.srm.types.TRequestToken; import it.grid.storm.srm.types.TRequestType; @@ -14,41 +33,46 @@ import it.grid.storm.synchcall.surl.ExpiredTokenException; import it.grid.storm.synchcall.surl.UnknownTokenException; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +public class SURLStatusDAOMySql extends AbstractDAO implements SURLStatusDAO { -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + public static final Logger LOGGER = LoggerFactory.getLogger(SURLStatusDAOMySql.class); -public class SURLStatusDAO { + private static SURLStatusDAO instance; - public static final Logger LOGGER = LoggerFactory - .getLogger(SURLStatusDAO.class); + public static synchronized SURLStatusDAO getInstance() { + if (instance == null) { + instance = new SURLStatusDAOMySql(); + } + return instance; + } - public boolean abortActivePtGsForSURL(GridUserInterface user, TSURL surl, - String explanation) { + private final StatusCodeConverter converter; + private final RequestSummaryCatalog requestSummaryCatalog; + private final PtPChunkCatalog ptpChunkCatalog; + + private SURLStatusDAOMySql() { + super(StormDbConnectionPool.getInstance()); + converter = StatusCodeConverter.getInstance(); + requestSummaryCatalog = RequestSummaryCatalog.getInstance(); + ptpChunkCatalog = PtPChunkCatalog.getInstance(); + } + + public boolean abortActivePtGsForSURL(GridUserInterface user, TSURL surl, String explanation) { surlSanityChecks(surl); - PreparedStatement stat = null; Connection con = null; + PreparedStatement stat = null; + int updateCount = 0; try { con = getConnection(); - String query = "UPDATE status_Get sg " - + "JOIN (request_Get rg, request_queue rq) " - + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "SET sg.statusCode=20, rq.status=20, sg.explanation=? " - + "WHERE rg.sourceSURL = ? and rg.sourceSURL_uniqueID = ? " - + "AND (sg.statusCode=22 OR sg.statusCode=17) "; + String query = "UPDATE status_Get sg JOIN (request_Get rg, request_queue rq) " + + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " + + "SET sg.statusCode=20, rq.status=20, sg.explanation=? " + + "WHERE rg.sourceSURL = ? and rg.sourceSURL_uniqueID = ? " + + "AND (sg.statusCode=22 OR sg.statusCode=17) "; if (user != null) { query += "AND rq.client_dn = ?"; @@ -63,43 +87,39 @@ public boolean abortActivePtGsForSURL(GridUserInterface user, TSURL surl, stat.setString(4, user.getDn()); } - final int updateCount = stat.executeUpdate(); - LOGGER.debug("abortActivePtGsForSURL: surl={}, numOfAbortedRequests={}", - surl, updateCount); - - return (updateCount != 0); + updateCount = stat.executeUpdate(); + LOGGER.debug("abortActivePtGsForSURL: surl={}, numOfAbortedRequests={}", surl, updateCount); } catch (SQLException e) { - String msg = String.format("abortActivePtGsForSURL: SQL error: %s", - e.getMessage()); - LOGGER.error(msg, e); - throw new RuntimeException(msg, e); + String msg = String.format("abortActivePtGsForSURL: SQL error: %s", e.getMessage()); + LOGGER.error(msg, e); } finally { - closeStatetement(stat); + + closeStatement(stat); closeConnection(con); } + return (updateCount != 0); } - public boolean abortActivePtPsForSURL(GridUserInterface user, TSURL surl, - String explanation) { + public boolean abortActivePtPsForSURL(GridUserInterface user, TSURL surl, String explanation) { surlSanityChecks(surl); - PreparedStatement stat = null; Connection con = null; + PreparedStatement stat = null; + int updateCount = 0; try { con = getConnection(); - String query = "UPDATE status_Put sp " - + "JOIN (request_Put rp, request_queue rq) " - + "ON sp.request_PutID=rp.ID AND rp.request_queueID=rq.ID " - + "SET sp.statusCode=20, rq.status=20, sp.explanation=? " - + "WHERE rp.targetSURL = ? and rp.targetSURL_uniqueID = ? " - + "AND (sp.statusCode=24 OR sp.statusCode=17)"; + String query = "UPDATE status_Put sp JOIN (request_Put rp, request_queue rq) " + + "ON sp.request_PutID=rp.ID AND rp.request_queueID=rq.ID " + + "SET sp.statusCode=20, rq.status=20, sp.explanation=? " + + "WHERE rp.targetSURL = ? and rp.targetSURL_uniqueID = ? " + + "AND (sp.statusCode=24 OR sp.statusCode=17)"; if (user != null) { query += "AND rq.client_dn = ?"; @@ -114,36 +134,30 @@ public boolean abortActivePtPsForSURL(GridUserInterface user, TSURL surl, stat.setString(4, user.getDn()); } - final int updateCount = stat.executeUpdate(); - - LOGGER.debug("abortActivePtPsForSURL: surl={}, numOfAbortedRequests={}", - surl, updateCount); + updateCount = stat.executeUpdate(); - return (updateCount != 0); + LOGGER.debug("abortActivePtPsForSURL: surl={}, numOfAbortedRequests={}", surl, updateCount); } catch (SQLException e) { - String msg = String.format("abortActivePtPsForSURL: SQL error: %s", - e.getMessage()); - LOGGER.error(msg, e); - throw new RuntimeException(msg, e); + String msg = String.format("abortActivePtPsForSURL: SQL error: %s", e.getMessage()); + LOGGER.error(msg, e); } finally { - closeStatetement(stat); + + closeStatement(stat); closeConnection(con); } - + return (updateCount != 0); } - private Map buildStatusMap(ResultSet rs) - throws SQLException { + private Map buildStatusMap(ResultSet rs) throws SQLException { if (rs == null) { throw new IllegalArgumentException("rs cannot be null"); } Map statusMap = new HashMap(); - StatusCodeConverter converter = StatusCodeConverter.getInstance(); while (rs.next()) { TSURL surl = surlFromString(rs.getString(1)); TStatusCode sc = converter.toSTORM(rs.getInt(2)); @@ -155,42 +169,8 @@ private Map buildStatusMap(ResultSet rs) } - private void closeConnection(Connection conn) { - - if (conn != null) { - try { - conn.close(); - } catch (SQLException e) { - LOGGER.error("Error closing connection: {}.", e.getMessage(), e); - } - } - } - - private void closeResultSet(ResultSet rs) { - - if (rs != null) { - - try { - rs.close(); - } catch (SQLException e) { - LOGGER.error("Error closing result set: {}", e.getMessage(), e); - } - } - } - - private void closeStatetement(Statement stat) { - - if (stat != null) { - try { - stat.close(); - } catch (SQLException e) { - LOGGER.error("Error closing statement: {}.", e.getMessage(), e); - } - } - } - - private Map filterSURLStatuses( - Map statuses, List surls) { + private Map filterSURLStatuses(Map statuses, + List surls) { if (surls == null) { return statuses; @@ -209,8 +189,8 @@ private Map filterSURLStatuses( // Add a failure state for the surls that were // requested but are not linked to the token for (TSURL s : surlsCopy) { - statuses.put(s, new TReturnStatus(TStatusCode.SRM_FAILURE, - "SURL not linked to passed request token.")); + statuses.put(s, + new TReturnStatus(TStatusCode.SRM_FAILURE, "SURL not linked to passed request token.")); } return statuses; @@ -223,47 +203,40 @@ private Map getBoLSURLStatuses(TRequestToken token) { ResultSet rs = null; PreparedStatement stat = null; Connection con = null; + Map result = null; try { con = getConnection(); String query = "SELECT rb.sourceSURL, sb.statusCode " - + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " - + "ON (rb.request_queueID = rq.ID AND sb.request_BoLID = rb.ID)" - + "WHERE ( rq.r_token = ? )"; + + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " + + "ON (rb.request_queueID = rq.ID AND sb.request_BoLID = rb.ID)" + + "WHERE ( rq.r_token = ? )"; stat = con.prepareStatement(query); stat.setString(1, token.getValue()); rs = stat.executeQuery(); - return buildStatusMap(rs); + result = buildStatusMap(rs); } catch (SQLException e) { - String msg = String.format("getBoLSURLStatuses: SQL error: %s", - e.getMessage()); - + String msg = String.format("getBoLSURLStatuses: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); + result = Maps.newHashMap(); } finally { - closeStatetement(stat); + closeResultSet(rs); + closeStatement(stat); closeConnection(con); } + return result; } - private Connection getConnection() throws SQLException { - - if (StoRMDataSource.getInstance() == null) { - throw new IllegalStateException("SToRM Data source not initialized!"); - } - return StoRMDataSource.getInstance().getConnection(); - } - - public Map getPinnedSURLsForUser( - GridUserInterface user, List surls) { + public Map getPinnedSURLsForUser(GridUserInterface user, + List surls) { if (user == null) { throw new NullPointerException("getPinnedSURLsForUser: null user!"); @@ -272,23 +245,22 @@ public Map getPinnedSURLsForUser( ResultSet rs = null; PreparedStatement stat = null; Connection con = null; - - StatusCodeConverter converter = StatusCodeConverter.getInstance(); + Map result = Maps.newHashMap(); try { con = getConnection(); String query = "SELECT rg.sourceSURL, rg.sourceSURL_uniqueID, sg.statusCode " - + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " - + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " - + "WHERE ( sg.statusCode = 22 and rq.client_dn = ? )"; + + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " + + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " + + "WHERE ( sg.statusCode = 22 and rq.client_dn = ? )"; stat = con.prepareStatement(query); stat.setString(1, user.getDn()); rs = stat.executeQuery(); - Map statusMap = new HashMap(); + Map statusMap = Maps.newHashMap(); while (rs.next()) { @@ -298,22 +270,25 @@ public Map getPinnedSURLsForUser( } - return filterSURLStatuses(statusMap, surls); + result = filterSURLStatuses(statusMap, surls); } catch (SQLException e) { - String msg = String.format("getPinnedSURLsForUser: SQL error: %s", - e.getMessage()); + + String msg = String.format("getPinnedSURLsForUser: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); + } finally { - closeStatetement(stat); + + closeStatement(stat); closeResultSet(rs); closeConnection(con); } + + return result; } - public Map getPinnedSURLsForUser( - GridUserInterface user, TRequestToken token, List surls) { + public Map getPinnedSURLsForUser(GridUserInterface user, + TRequestToken token, List surls) { userSanityChecks(user); tokenSanityChecks(token); @@ -323,22 +298,22 @@ public Map getPinnedSURLsForUser( PreparedStatement stat = null; Connection con = null; - StatusCodeConverter converter = StatusCodeConverter.getInstance(); + Map result = Maps.newHashMap(); try { con = getConnection(); String query = "SELECT rg.sourceSURL, rg.sourceSURL_uniqueID, sg.statusCode " - + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " - + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " - + "WHERE ( sg.statusCode = 22 and rq.client_dn = ? and rq.r_token = ? )"; + + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " + + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " + + "WHERE ( sg.statusCode = 22 and rq.client_dn = ? and rq.r_token = ? )"; stat = con.prepareStatement(query); stat.setString(1, user.getDn()); stat.setString(2, token.getValue()); rs = stat.executeQuery(); - Map statusMap = new HashMap(); + Map statusMap = Maps.newHashMap(); while (rs.next()) { @@ -348,18 +323,20 @@ public Map getPinnedSURLsForUser( } - return filterSURLStatuses(statusMap, surls); + result = filterSURLStatuses(statusMap, surls); } catch (SQLException e) { - String msg = String.format("getPinnedSURLsForUser: SQL error: %s", - e.getMessage()); + + String msg = String.format("getPinnedSURLsForUser: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); + } finally { - closeStatetement(stat); + + closeStatement(stat); closeResultSet(rs); closeConnection(con); } + return result; } private Map getPtGSURLStatuses(TRequestToken token) { @@ -370,30 +347,34 @@ private Map getPtGSURLStatuses(TRequestToken token) { PreparedStatement stat = null; Connection con = null; + Map result = Maps.newHashMap(); + try { con = getConnection(); String query = "SELECT rg.sourceSURL, sg.statusCode " - + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " - + "ON (rg.request_queueID = rq.ID AND sg.request_GetID=rg.ID) " - + "WHERE ( rq.r_token = ? )"; + + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " + + "ON (rg.request_queueID = rq.ID AND sg.request_GetID=rg.ID) " + + "WHERE ( rq.r_token = ? )"; stat = con.prepareStatement(query); stat.setString(1, token.getValue()); rs = stat.executeQuery(); - return buildStatusMap(rs); + result = buildStatusMap(rs); } catch (SQLException e) { - String msg = String.format("getPtGSURLStatuses: SQL error: %s", - e.getMessage()); + + String msg = String.format("getPtGSURLStatuses: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); + } finally { - closeStatetement(stat); + + closeStatement(stat); closeResultSet(rs); closeConnection(con); } + return result; } private Map getPtPSURLStatuses(TRequestToken token) { @@ -403,39 +384,39 @@ private Map getPtPSURLStatuses(TRequestToken token) { ResultSet rs = null; PreparedStatement stat = null; Connection con = null; + Map result = Maps.newHashMap(); try { con = getConnection(); String query = "SELECT rp.targetSURL, sp.statusCode " - + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " - + "ON (rp.request_queueID = rq.ID AND sp.request_PutID = rp.ID)" - + "WHERE ( rq.r_token = ? )"; + + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " + + "ON (rp.request_queueID = rq.ID AND sp.request_PutID = rp.ID)" + + "WHERE ( rq.r_token = ? )"; stat = con.prepareStatement(query); stat.setString(1, token.getValue()); rs = stat.executeQuery(); - return buildStatusMap(rs); + result = buildStatusMap(rs); } catch (SQLException e) { - String msg = String.format("getPtPSURLStatuses: SQL error: %s", - e.getMessage()); + String msg = String.format("getPtPSURLStatuses: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); } finally { - closeStatetement(stat); + + closeStatement(stat); closeResultSet(rs); closeConnection(con); } - + return result; } public Map getSURLStatuses(TRequestToken token) { - TRequestType rt = RequestSummaryCatalog.getInstance().typeOf(token); + TRequestType rt = requestSummaryCatalog.typeOf(token); if (rt.isEmpty()) throw new UnknownTokenException(token.getValue()); @@ -444,41 +425,38 @@ public Map getSURLStatuses(TRequestToken token) { throw new ExpiredTokenException(token.getValue()); switch (rt) { - case PREPARE_TO_GET: - return getPtGSURLStatuses(token); + case PREPARE_TO_GET: + return getPtGSURLStatuses(token); - case PREPARE_TO_PUT: - return getPtPSURLStatuses(token); + case PREPARE_TO_PUT: + return getPtPSURLStatuses(token); - case BRING_ON_LINE: - return getBoLSURLStatuses(token); + case BRING_ON_LINE: + return getBoLSURLStatuses(token); - default: - String msg = String.format("Invalid request type for token %s: %s", - token, rt.toString()); - throw new IllegalArgumentException(msg); + default: + String msg = String.format("Invalid request type for token %s: %s", token, rt.toString()); + throw new IllegalArgumentException(msg); } } - public Map getSURLStatuses(TRequestToken token, - List surls) { + public Map getSURLStatuses(TRequestToken token, List surls) { - TRequestType rt = RequestSummaryCatalog.getInstance().typeOf(token); + TRequestType rt = requestSummaryCatalog.typeOf(token); switch (rt) { - case PREPARE_TO_GET: - return filterSURLStatuses(getPtGSURLStatuses(token), surls); + case PREPARE_TO_GET: + return filterSURLStatuses(getPtGSURLStatuses(token), surls); - case PREPARE_TO_PUT: - return filterSURLStatuses(getPtPSURLStatuses(token), surls); + case PREPARE_TO_PUT: + return filterSURLStatuses(getPtPSURLStatuses(token), surls); - case BRING_ON_LINE: - return filterSURLStatuses(getBoLSURLStatuses(token), surls); + case BRING_ON_LINE: + return filterSURLStatuses(getBoLSURLStatuses(token), surls); - default: - String msg = String.format("Invalid request type for token %s: %s", - token, rt.toString()); - throw new IllegalArgumentException(msg); + default: + String msg = String.format("Invalid request type for token %s: %s", token, rt.toString()); + throw new IllegalArgumentException(msg); } } @@ -487,9 +465,8 @@ public int markSURLsReadyForRead(TRequestToken token, List surls) { tokenSanityChecks(token); surlSanityChecks(surls); - // I am not reimplementing the whole catalog functions - return PtPChunkCatalog.getInstance().updateFromPreviousStatus(token, surls, - TStatusCode.SRM_SPACE_AVAILABLE, TStatusCode.SRM_SUCCESS); + // I am not re-implementing the whole catalog functions + return ptpChunkCatalog.updateFromPreviousStatus(token, surls, SRM_SPACE_AVAILABLE, SRM_SUCCESS); } @@ -534,25 +511,25 @@ public void releaseSURL(TSURL surl) { try { con = getConnection(); - String query = "UPDATE status_Get sg " - + "JOIN (request_Get rg, request_queue rq) " - + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "SET sg.statusCode=21" - + "WHERE (sg.statusCode=22 OR sg.statusCode=0) " - + "AND rg.sourceSURL = ? and rg.sourceSURL_uniqueID = ?"; + String query = "UPDATE status_Get sg JOIN (request_Get rg, request_queue rq) " + + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID SET sg.statusCode=21" + + "WHERE (sg.statusCode=22 OR sg.statusCode=0) " + + "AND rg.sourceSURL = ? and rg.sourceSURL_uniqueID = ?"; stat = con.prepareStatement(query); stat.setString(1, surl.getSURLString()); stat.setInt(2, surl.uniqueId()); stat.executeUpdate(); + } catch (SQLException e) { + String msg = String.format("releaseSURL: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); } finally { - closeStatetement(stat); + + closeStatement(stat); closeConnection(con); } } @@ -568,14 +545,11 @@ public void releaseSURLs(GridUserInterface user, List surls) { try { con = getConnection(); - String query = "UPDATE status_Get sg " - + "JOIN (request_Get rg, request_queue rq) " - + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "SET sg.statusCode=21 " - + "WHERE (sg.statusCode=22 OR sg.statusCode=0) " - + "AND rg.sourceSURL_uniqueID IN (" + quoteSURLUniqueIDs(surls) + ") " - + "AND rg.sourceSURL IN (" + quoteSURLList(surls) + ") " - + "AND rq.client_dn = ?"; + String query = "UPDATE status_Get sg JOIN (request_Get rg, request_queue rq) " + + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID SET sg.statusCode=21 " + + "WHERE (sg.statusCode=22 OR sg.statusCode=0) AND rg.sourceSURL_uniqueID IN (" + + quoteSURLUniqueIDs(surls) + ") AND rg.sourceSURL IN (" + quoteSURLList(surls) + + ") AND rq.client_dn = ?"; stat = con.prepareStatement(query); stat.setString(1, user.getDn()); @@ -584,15 +558,15 @@ public void releaseSURLs(GridUserInterface user, List surls) { LOGGER.debug("releaseSURLs: released {} surls", releasedSURLsCount); } catch (SQLException e) { + String msg = String.format("releaseSURLs: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); } finally { - closeStatetement(stat); + + closeStatement(stat); closeConnection(con); } - } public void releaseSURLs(List surls) { @@ -605,24 +579,23 @@ public void releaseSURLs(List surls) { try { con = getConnection(); - String query = "UPDATE status_Get sg " - + "JOIN (request_Get rg, request_queue rq) " - + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "SET sg.statusCode=21 " - + "WHERE (sg.statusCode=22 OR sg.statusCode=0) " - + "AND rg.sourceSURL_uniqueID IN (" + quoteSURLUniqueIDs(surls) + ") " - + "AND rg.sourceSURL IN (" + quoteSURLList(surls) + ")"; + String query = "UPDATE status_Get sg JOIN (request_Get rg, request_queue rq) " + + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID SET sg.statusCode=21 " + + "WHERE (sg.statusCode=22 OR sg.statusCode=0) AND rg.sourceSURL_uniqueID IN (" + + quoteSURLUniqueIDs(surls) + ") AND rg.sourceSURL IN (" + quoteSURLList(surls) + + ")"; stat = con.prepareStatement(query); stat.executeUpdate(); } catch (SQLException e) { + String msg = String.format("releaseSURLs: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); } finally { - closeStatetement(stat); + + closeStatement(stat); closeConnection(con); } } @@ -638,26 +611,24 @@ public void releaseSURLs(TRequestToken token, List surls) { try { con = getConnection(); - String query = "UPDATE status_Get sg " - + "JOIN (request_Get rg, request_queue rq) " - + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "SET sg.statusCode=21 " - + "WHERE (sg.statusCode=22 OR sg.statusCode=0) " - + "AND rg.sourceSURL_uniqueID IN (" + quoteSURLUniqueIDs(surls) + ") " - + "AND rg.sourceSURL IN (" + quoteSURLList(surls) + ") " - + "AND rq.r_token = ?"; + String query = "UPDATE status_Get sg JOIN (request_Get rg, request_queue rq) " + + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID SET sg.statusCode=21 " + + "WHERE (sg.statusCode=22 OR sg.statusCode=0) AND rg.sourceSURL_uniqueID IN (" + + quoteSURLUniqueIDs(surls) + ") AND rg.sourceSURL IN (" + quoteSURLList(surls) + + ") AND rq.r_token = ?"; stat = con.prepareStatement(query); stat.setString(1, token.getValue()); stat.executeUpdate(); } catch (SQLException e) { + String msg = String.format("releaseSURLs: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); } finally { - closeStatetement(stat); + + closeStatement(stat); closeConnection(con); } } @@ -669,8 +640,7 @@ private TSURL surlFromString(String s) { return TSURL.makeFromStringWellFormed(s); } catch (InvalidTSURLAttributesException e) { - throw new IllegalArgumentException("Error creating surl from string: " - + s, e); + throw new IllegalArgumentException("Error creating surl from string: " + s, e); } } @@ -681,6 +651,7 @@ public boolean surlHasOngoingPtGs(TSURL surl) { ResultSet rs = null; PreparedStatement stat = null; Connection con = null; + boolean result = false; try { con = getConnection(); @@ -688,27 +659,28 @@ public boolean surlHasOngoingPtGs(TSURL surl) { // We basically check whether there are active requests // that have the SURL in SRM_FILE_PINNED status String query = "SELECT rq.ID, rg.ID, sg.statusCode " - + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " - + "ON (rg.request_queueID = rq.ID AND sg.request_GetID = rg.ID) " - + "WHERE ( rg.sourceSURL = ? and rg.sourceSURL_uniqueID = ? " - + "and sg.statusCode = 22 )"; + + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " + + "ON (rg.request_queueID = rq.ID AND sg.request_GetID = rg.ID) " + + "WHERE ( rg.sourceSURL_uniqueID = ? and sg.statusCode = 22 )"; stat = con.prepareStatement(query); - stat.setString(1, surl.getSURLString()); - stat.setInt(2, surl.uniqueId()); + stat.setInt(1, surl.uniqueId()); rs = stat.executeQuery(); - return rs.next(); + result = rs.next(); + } catch (SQLException e) { - String msg = String.format("surlHasOngoingPtGs: SQL error: %s", - e.getMessage()); + + String msg = String.format("surlHasOngoingPtGs: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); + } finally { - closeStatetement(stat); + + closeStatement(stat); closeResultSet(rs); closeConnection(con); } + return result; } public boolean surlHasOngoingPtPs(TSURL surl, TRequestToken ptpRequestToken) { @@ -718,6 +690,7 @@ public boolean surlHasOngoingPtPs(TSURL surl, TRequestToken ptpRequestToken) { ResultSet rs = null; PreparedStatement stat = null; Connection con = null; + boolean result = false; try { @@ -725,36 +698,36 @@ public boolean surlHasOngoingPtPs(TSURL surl, TRequestToken ptpRequestToken) { // We basically check whether there are active requests // that have the SURL in SRM_SPACE_AVAILABLE status String query = "SELECT rq.ID, rp.ID, sp.statusCode " - + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " - + "ON (rp.request_queueID=rq.ID AND sp.request_PutID=rp.ID) " - + "WHERE ( rp.targetSURL = ? and rp.targetSURL_uniqueID = ? " - + "and sp.statusCode=24 )"; + + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " + + "ON (rp.request_queueID=rq.ID AND sp.request_PutID=rp.ID) " + + "WHERE ( rp.targetSURL_uniqueID = ? and sp.statusCode = 24 )"; if (ptpRequestToken != null) { - query += " AND rq.r_token != ?"; + query += " AND ( rq.r_token != ? )"; } stat = con.prepareStatement(query); - stat.setString(1, surl.getSURLString()); - stat.setInt(2, surl.uniqueId()); + stat.setInt(1, surl.uniqueId()); if (ptpRequestToken != null) { - stat.setString(3, ptpRequestToken.getValue()); + stat.setString(2, ptpRequestToken.getValue()); } rs = stat.executeQuery(); - return rs.next(); + result = rs.next(); + } catch (SQLException e) { - String msg = String.format("surlHasOngoingPtPs: SQL error: %s", - e.getMessage()); + + String msg = String.format("surlHasOngoingPtPs: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); + } finally { - closeStatetement(stat); + + closeStatement(stat); closeResultSet(rs); closeConnection(con); } - + return result; } private void surlSanityChecks(List surls) { diff --git a/src/main/java/it/grid/storm/persistence/impl/mysql/StorageSpaceDAOMySql.java b/src/main/java/it/grid/storm/persistence/impl/mysql/StorageSpaceDAOMySql.java index 8e35b51f9..b8fcbe4d9 100644 --- a/src/main/java/it/grid/storm/persistence/impl/mysql/StorageSpaceDAOMySql.java +++ b/src/main/java/it/grid/storm/persistence/impl/mysql/StorageSpaceDAOMySql.java @@ -17,14 +17,6 @@ package it.grid.storm.persistence.impl.mysql; -import it.grid.storm.griduser.GridUserInterface; -import it.grid.storm.persistence.PersistenceDirector; -import it.grid.storm.persistence.dao.AbstractDAO; -import it.grid.storm.persistence.dao.StorageSpaceDAO; -import it.grid.storm.persistence.exceptions.DataAccessException; -import it.grid.storm.persistence.model.StorageSpaceTO; -import it.grid.storm.persistence.util.helper.StorageSpaceSQLHelper; - import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; @@ -36,604 +28,586 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -/** - * find = con.prepareStatement( - * "SELECT storm_get_filereq.rowid, storm_req.r_token, storm_get_filereq.from_surl, storm_get_filereq.lifetime, storm_get_filereq.s_token, storm_get_filereq.flags, storm_req.protocol, storm_get_filereq.actual_size, storm_get_filereq.status, storm_get_filereq.errstring, storm_get_filereq.pfn FROM storm_get_filereq, storm_req WHERE storm_get_filereq.r_token=storm_req.r_token AND storm_get_filereq.r_token=?" - * ); - **/ - -public class StorageSpaceDAOMySql extends AbstractDAO implements - StorageSpaceDAO { - - private static final Logger log = LoggerFactory - .getLogger(StorageSpaceDAOMySql.class); - - private StorageSpaceSQLHelper helper; - - /** - * CONSTRUCTOR - */ - public StorageSpaceDAOMySql() { - - helper = new StorageSpaceSQLHelper(PersistenceDirector.getDataBase() - .getDbmsVendor()); - } - - /** - * addStorageSpace - * - * @param ss - * StorageSpace - * @throws DataAccessException - */ - - public void addStorageSpace(StorageSpaceTO ss) throws DataAccessException { - - Connection conn = getConnection(); - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.insertQuery(conn, ss); - log.info("INSERT query = {}", prepStatement.toString()); - - int res = prepStatement.executeUpdate(); - log.debug("INSERT result = {}", res); - if (res <= 0) { - log - .error("No row inserted for statement : {}", prepStatement.toString()); - throw new DataAccessException("No rows inserted for Storage Space"); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing INSERT query", e); - } finally { - releaseConnection(null, prepStatement, conn); - } - } - - /** - * getStorageSpaceById - * - * @param ssId - * Long - * @return StorageSpace - * @throws DataAccessException - */ - public StorageSpaceTO getStorageSpaceById(Long ssId) - throws DataAccessException { - - throw new DataAccessException("getStorageSpaceById: Unimplemented method!"); - } - - public Collection findAll() throws DataAccessException { - - throw new DataAccessException("findAll: Unimplemented method!"); - } - - /** - * Returns a Collection of StorageSpaceTO owned by 'user' and with the - * specified alias ('spaceAlias'). 'spaceAlias' can be NULL or empty and in - * these cases a Collection of all the StorageSpaceTO owned by 'user' is - * returned. - * - * @param owner - * VomsGridUser. - * @param spaceAlias - * String. - * @return Collection of StorageSpaceTO. - * @throws DataAccessException - */ - public Collection getStorageSpaceByOwner( - GridUserInterface owner, String spaceAlias) throws DataAccessException { - - StorageSpaceTO ssTO = null; - Collection result = new LinkedList(); - - Connection conn = getConnection(); - ResultSet res = null; - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.selectBySpaceAliasQuery(conn, owner, spaceAlias); - log.debug("DB query = {}", prepStatement.toString()); - - res = prepStatement.executeQuery(); - - log.debug("query result = {}", res); - if (res.first() == false) { - log.debug("No rows found for query : {}", prepStatement.toString()); - } else { - do { - ssTO = helper.makeStorageSpaceTO(res); - result.add(ssTO); - } while (res.next()); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing DB query", e); - } finally { - releaseConnection(res, prepStatement, conn); - } - return result; - } - - /** - * Returns a Collection of StorageSpaceTO owned by 'VO'. - * - * @param voname - * Vo. - * @return Collection of StorageSpaceTO. - * @throws DataAccessException - */ - - public Collection getStorageSpaceBySpaceType(String stype) - throws DataAccessException { - - StorageSpaceTO ssTO = null; - Collection result = new LinkedList(); - - PreparedStatement prepStatement = null; - - Connection conn = getConnection(); - ResultSet res = null; - - try { - prepStatement = helper.selectBySpaceType(conn, stype); - log.debug("DB query = {}", prepStatement.toString()); - - res = prepStatement.executeQuery(); - log.debug("query result = {}", res); - if (res.first() == false) { - log.info("No rows found for query : {}", prepStatement.toString()); - } else { - // Fetch each row from the result set - do { - ssTO = helper.makeStorageSpaceTO(res); - result.add(ssTO); - } while (res.next()); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing DB query", e); - } finally { - releaseConnection(res, prepStatement, conn); - } - return result; - } - - /** - * Returns a Collection of StorageSpaceTO with the specified alias - * ('spaceAlias'). 'spaceAlias' can not be be NULL or empty. - * - * @param spaceAlias - * String. - * @return Collection of StorageSpaceTO. - * @throws DataAccessException - */ - public Collection getStorageSpaceByAliasOnly(String spaceAlias) - throws DataAccessException { - - StorageSpaceTO ssTO = null; - Collection result = new LinkedList(); - Connection conn = getConnection(); - ResultSet res = null; - - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.selectBySpaceAliasOnlyQuery(conn, spaceAlias); - log.debug("DB query = {}" , prepStatement.toString()); - - res = prepStatement.executeQuery(); - log.debug("query result = {}" , res); - - if (res.first() == false) { - log.info("No rows found for query : {}" , prepStatement.toString()); - } else { - // Fetch each row from the result set - do { - ssTO = helper.makeStorageSpaceTO(res); - result.add(ssTO); - } while (res.next()); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing DB query", e); - } finally { - releaseConnection(res, prepStatement, conn); - } - return result; - } - - /** - * getStorageSpaceByToken - * - * @param token - * TSpaceToken - * @return StorageSpace , null if not row found on that token - * @throws DataAccessException - */ - public StorageSpaceTO getStorageSpaceByToken(String token) - throws DataAccessException { - - StorageSpaceTO ssTO = null; - - Connection conn = getConnection(); - ResultSet res = null; - - PreparedStatement prepStatement = null; - try { - prepStatement = helper.selectByTokenQuery(conn, token); - log.debug("SELECT query = {}" , prepStatement.toString()); - - res = prepStatement.executeQuery(); - - log.debug("SELECT result = {}" , res); - if (res.first() == false) { - log.info("No rows found for query : {}" , prepStatement.toString()); - } else { - // take the first - ssTO = helper.makeStorageSpaceTO(res); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing INSERT query", e); - } finally { - releaseConnection(res, prepStatement, conn); - } - return ssTO; - } - - @Override - public Collection getStorageSpaceByUnavailableUsedSpace( - long unavailableSizeValue) throws DataAccessException { - - StorageSpaceTO ssTO = null; - Collection result = new LinkedList(); - - Connection conn = getConnection(); - ResultSet res = null; - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.selectByUnavailableUsedSpaceSizeQuery(conn, - unavailableSizeValue); - log.debug("SELECT query = {}" , prepStatement.toString()); - - res = prepStatement.executeQuery(); - log.debug("SELECT result = {}" , res); - if (res.first() == false) { - log.info("No rows found for query : {}" , prepStatement.toString()); - } else { - // Fetch each row from the result set - do { - ssTO = helper.makeStorageSpaceTO(res); - result.add(ssTO); - } while (res.next()); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing INSERT query", e); - } finally { - releaseConnection(res, prepStatement, conn); - } - return result; - } - - @Override - public Collection getStorageSpaceByPreviousLastUpdate( - Date lastUpdateTimestamp) throws DataAccessException { - - StorageSpaceTO ssTO = null; - Collection result = new LinkedList(); - - Connection conn = getConnection(); - ResultSet res = null; - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.selectByPreviousOrNullLastUpdateQuery(conn, - lastUpdateTimestamp.getTime()); - log.debug("SELECT query = {}" , prepStatement.toString()); - - res = prepStatement.executeQuery(); - log.debug("SELECT result = {}" , res); - if (res.first() == false) { - log.info("No rows found for query : {}" , prepStatement.toString()); - } else { - // Fetch each row from the result set - do { - ssTO = helper.makeStorageSpaceTO(res); - result.add(ssTO); - } while (res.next()); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing INSERT query", e); - } finally { - releaseConnection(res, prepStatement, conn); - } - return result; - } - - /** - * removeStorageSpace - * - * @param ss - * StorageSpace - * @throws DataAccessException - */ - public void removeStorageSpace(GridUserInterface user, String spaceToken) - throws DataAccessException { - - Connection conn = getConnection(); - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.removeByTokenQuery(conn, user, spaceToken); - log.debug("query = {}" , prepStatement.toString()); - - int res = prepStatement.executeUpdate(); - log.debug("Number of rows removed: {}" , res); - if (res <= 0) { - log.error("Error removing Storage Space with token = {} for " - + "user {} not found", spaceToken, user.getDn()); - - throw new DataAccessException("Storage Space with token = '" - + spaceToken + "' for user '" + user.getDn() + "' not found!"); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing DELETE query", e); - } finally { - releaseConnection(null, prepStatement, conn); - } - } - - /** - * removeStorageSpace only by spaceToken - * - * @param ss - * StorageSpace - * @throws DataAccessException - */ - public void removeStorageSpace(String spaceToken) throws DataAccessException { - - Connection conn = getConnection(); - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.removeByTokenQuery(conn, spaceToken); - - log.debug("query = {}" , prepStatement.toString()); - - int res = prepStatement.executeUpdate(); - log.debug("Number of rows removed: {}" , res); - - if (res <= 0) { - log.error("Error removing Storage Space with token = {}. Space not found", - spaceToken); - - throw new DataAccessException("Storage Space with token = '" - + spaceToken + "' not found!"); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing DELETE query", e); - } finally { - releaseConnection(null, prepStatement, conn); - } - } - - /** - * - * @param ssTO - * StorageSpaceTO - * @throws DataAccessException - */ - public void updateStorageSpace(StorageSpaceTO ssTO) - throws DataAccessException { - - Connection conn = getConnection(); - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.updateByAliasAndTokenQuery(conn, ssTO); - log.debug("UPDATE query = {}" , prepStatement.toString()); - - int res = prepStatement.executeUpdate(); - log.debug("UPDATE row count = {}" , res); - - if (res != 1) { - if (res < 1) { - log.error("No storage space rows updated by query : {}" - , prepStatement.toString()); - } else { - log.warn("More than a single storage space rows updated by " - + "query : {}. updated {} rows.", - prepStatement.toString(), res); - } - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing UPDATE query", e); - } finally { - releaseConnection(null, prepStatement, conn); - } - } - - /** - * - * @param ssTO - * StorageSpaceTO - * @throws DataAccessException - */ - public void updateStorageSpaceFreeSpace(StorageSpaceTO ssTO) - throws DataAccessException { - - long freeSpace = ssTO.getFreeSize(); - - Connection conn = getConnection(); - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.updateFreeSpaceByTokenQuery(conn, - ssTO.getSpaceToken(), freeSpace, new Date()); - - log.debug("UPDATE query = {}" , prepStatement.toString()); - - int res = prepStatement.executeUpdate(); - log.debug("UPDATE row count = {}", res); - if (res <= 0) { - log.error("No storage space rows updated by query : {}" - , prepStatement.toString()); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing UPDATE query", e); - } finally { - releaseConnection(null, prepStatement, conn); - } - } - - /** - * - * @param ssTO - * StorageSpaceTO - * @throws DataAccessException - */ - public void updateAllStorageSpace(StorageSpaceTO ssTO) - throws DataAccessException { - - Connection conn = getConnection(); - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.updateByTokenQuery(conn, ssTO); - log.debug("UPDATE query = {}", prepStatement.toString()); - - int res = prepStatement.executeUpdate(); - log.debug("UPDATE row count = {}" , res); - if (res != 1) { - if (res < 1) { - log.error("No storage space rows updated by query {}" - , prepStatement.toString()); - } else { - log.warn("More than a single storage space rows updated " - + "by query : {}. updated {} rows" - ,prepStatement.toString(), res); - } - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing UPDATE query", e); - } finally { - releaseConnection(null, prepStatement, conn); - } - } - - /** - * Method used to retrieve the set of StorageTO for expired space. - * - * @param long timeInSecond - * @return Collection of transfer object - */ - public Collection getExpired(long currentTimeInSecond) - throws DataAccessException { - - StorageSpaceTO ssTO = null; - Collection result = new LinkedList(); - - Connection conn = getConnection(); - ResultSet res = null; - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.selectExpiredQuery(conn, currentTimeInSecond); - log.debug("DB query = {}" , prepStatement.toString()); - - res = prepStatement.executeQuery(); - - log.debug("query result = {}" , res); - if (res.first() == false) { - log.debug("No rows found for query : {}" , prepStatement.toString()); - throw new DataAccessException("No storage space expired found at time " - + currentTimeInSecond); - } else { - // Fetch each row from the result set - do { - ssTO = helper.makeStorageSpaceTO(res); - result.add(ssTO); - } while (res.next()); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing DB query", e); - } finally { - releaseConnection(res, prepStatement, conn); - } - return result; - } - - @Override - public int increaseUsedSpace(String spaceToken, long usedSpaceToAdd) - throws DataAccessException { - - Connection conn = getConnection(); - ResultSet res = null; - PreparedStatement prepStatement = null; - - int n = 0; - - try { - prepStatement = helper.increaseUsedSpaceByTokenQuery(conn, spaceToken, usedSpaceToAdd); - log.debug("DB query = {}" , prepStatement.toString()); - - n = prepStatement.executeUpdate(); - - log.debug("query result = {}" , n); - if (n == 0) { - log.debug("No rows updated for query : {}" , prepStatement.toString()); - throw new DataAccessException("No storage space updated!"); - } - - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing DB query", e); - } finally { - releaseConnection(res, prepStatement, conn); - } - return n; +import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.dao.AbstractDAO; +import it.grid.storm.persistence.dao.StorageSpaceDAO; +import it.grid.storm.persistence.exceptions.DataAccessException; +import it.grid.storm.persistence.model.StorageSpaceTO; +import it.grid.storm.persistence.pool.StormBeIsamConnectionPool; +import it.grid.storm.persistence.util.helper.StorageSpaceSQLHelper; + +public class StorageSpaceDAOMySql extends AbstractDAO implements StorageSpaceDAO { + + private static final Logger log = LoggerFactory.getLogger(StorageSpaceDAOMySql.class); + + private static StorageSpaceDAO instance; + + public static synchronized StorageSpaceDAO getInstance() { + if (instance == null) { + instance = new StorageSpaceDAOMySql(); + } + return instance; + } + + private StorageSpaceSQLHelper helper; + + private StorageSpaceDAOMySql() { + super(StormBeIsamConnectionPool.getInstance()); + helper = new StorageSpaceSQLHelper(); + } + + /** + * addStorageSpace + * + * @param ss StorageSpace + */ + public void addStorageSpace(StorageSpaceTO ss) { + + Connection con = null; + PreparedStatement ps = null; + int res = 0; + + try { + + con = getConnection(); + ps = helper.insertQuery(con, ss); + + log.debug("INSERT query = {}", ps); + res = ps.executeUpdate(); + log.debug("INSERT result = {}", res); + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeStatement(ps); + closeConnection(con); + } + + if (res <= 0) { + log.error("No rows inserted for Storage Space: {}", ss.toString()); + } + } + + /** + * getStorageSpaceById + * + * @param ssId Long + * @return StorageSpace + * @throws DataAccessException + */ + public StorageSpaceTO getStorageSpaceById(Long ssId) throws DataAccessException { + + throw new DataAccessException("getStorageSpaceById: Unimplemented method!"); + } + + public Collection findAll() throws DataAccessException { + + throw new DataAccessException("findAll: Unimplemented method!"); + } + + /** + * Returns a Collection of StorageSpaceTO owned by 'user' and with the specified alias + * ('spaceAlias'). 'spaceAlias' can be NULL or empty and in these cases a Collection of all the + * StorageSpaceTO owned by 'user' is returned. + * + * @param owner VomsGridUser. + * @param spaceAlias String. + * @return Collection of StorageSpaceTO. + */ + public Collection getStorageSpaceByOwner(GridUserInterface owner, + String spaceAlias) { + + StorageSpaceTO ssTO = null; + Collection result = new LinkedList(); + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + con = getConnection(); + ps = helper.selectBySpaceAliasQuery(con, owner, spaceAlias); + + log.debug("DB query = {}", ps); + res = ps.executeQuery(); + log.debug("query result = {}", res); + + if (res.first()) { + do { + ssTO = helper.makeStorageSpaceTO(res); + result.add(ssTO); + } while (res.next()); + } else { + log.debug("No rows found for query : {}", ps); + } + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); } - - @Override - public int decreaseUsedSpace(String spaceToken, long usedSpaceToRemove) - throws DataAccessException { - - Connection conn = getConnection(); - ResultSet res = null; - PreparedStatement prepStatement = null; - - int n = 0; - - try { - prepStatement = helper.decreaseUsedSpaceByTokenQuery(conn, spaceToken, usedSpaceToRemove); - log.debug("DB query = {}" , prepStatement.toString()); - - n = prepStatement.executeUpdate(); - - log.debug("query result = {}" , n); - if (n == 0) { - log.debug("No rows updated for query : {}" , prepStatement.toString()); - throw new DataAccessException("No storage space updated!"); - } - - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing DB query", e); - } finally { - releaseConnection(res, prepStatement, conn); - } - return n; + return result; + } + + /** + * Returns a Collection of StorageSpaceTO owned by 'VO'. + * + * @param stype. + * @return Collection of StorageSpaceTO. + */ + + public Collection getStorageSpaceBySpaceType(String stype) { + + StorageSpaceTO ssTO = null; + Collection result = new LinkedList(); + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + con = getConnection(); + ps = helper.selectBySpaceType(con, stype); + + log.debug("DB query = {}", ps); + res = ps.executeQuery(); + log.debug("query result = {}", res); + + if (res.first()) { + do { + ssTO = helper.makeStorageSpaceTO(res); + result.add(ssTO); + } while (res.next()); + } else { + log.info("No rows found for query : {}", ps); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); } + return result; + } + + /** + * Returns a Collection of StorageSpaceTO with the specified alias ('spaceAlias'). 'spaceAlias' + * can not be be NULL or empty. + * + * @param spaceAlias String. + * @return Collection of StorageSpaceTO. + */ + public Collection getStorageSpaceByAliasOnly(String spaceAlias) { + + StorageSpaceTO ssTO = null; + Collection result = new LinkedList(); + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + con = getConnection(); + ps = helper.selectBySpaceAliasOnlyQuery(con, spaceAlias); + + log.debug("DB query = {}", ps); + res = ps.executeQuery(); + log.debug("query result = {}", res); + + if (res.first()) { + do { + ssTO = helper.makeStorageSpaceTO(res); + result.add(ssTO); + } while (res.next()); + } else { + log.info("No rows found for query : {}", ps); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return result; + } + + /** + * getStorageSpaceByToken + * + * @param token TSpaceToken + * @return StorageSpace , null if not row found on that token + */ + public StorageSpaceTO getStorageSpaceByToken(String token) { + + StorageSpaceTO ssTO = null; + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + con = getConnection(); + ps = helper.selectByTokenQuery(con, token); + + log.debug("SELECT query = {}", ps); + res = ps.executeQuery(); + log.debug("SELECT result = {}", res); + + if (res.first()) { + ssTO = helper.makeStorageSpaceTO(res); + } else { + log.info("No rows found for query : {}", ps); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return ssTO; + } + + @Override + public Collection getStorageSpaceByUnavailableUsedSpace( + long unavailableSizeValue) { + + StorageSpaceTO ssTO = null; + Collection result = new LinkedList(); + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + con = getConnection(); + ps = helper.selectByUnavailableUsedSpaceSizeQuery(con, unavailableSizeValue); + + log.debug("SELECT query = {}", ps); + res = ps.executeQuery(); + log.debug("SELECT result = {}", res); + + if (res.first()) { + do { + ssTO = helper.makeStorageSpaceTO(res); + result.add(ssTO); + } while (res.next()); + } else { + log.debug("No rows found for query : {}", ps); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return result; + } + + @Override + public Collection getStorageSpaceByPreviousLastUpdate(Date lastUpdateTimestamp) { + + StorageSpaceTO ssTO = null; + Collection result = new LinkedList(); + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + con = getConnection(); + ps = helper.selectByPreviousOrNullLastUpdateQuery(con, lastUpdateTimestamp.getTime()); + + log.debug("SELECT query = {}", ps); + res = ps.executeQuery(); + log.debug("SELECT result = {}", res); + + if (res.first()) { + do { + ssTO = helper.makeStorageSpaceTO(res); + result.add(ssTO); + } while (res.next()); + } else { + log.info("No rows found for query : {}", ps); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return result; + } + + /** + * removeStorageSpace + * + * @param ss StorageSpace + */ + public void removeStorageSpace(GridUserInterface user, String spaceToken) { + + Connection con = null; + PreparedStatement ps = null; + int res = 0; + + try { + con = getConnection(); + ps = helper.removeByTokenQuery(con, user, spaceToken); + log.debug("query = {}", ps); + + res = ps.executeUpdate(); + log.debug("Number of rows removed: {}", res); + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeStatement(ps); + closeConnection(con); + } + } + + /** + * removeStorageSpace only by spaceToken + * + * @param ss StorageSpace + * @throws DataAccessException + */ + public void removeStorageSpace(String spaceToken) throws DataAccessException { + + Connection con = null; + PreparedStatement ps = null; + int res = 0; + + try { + con = getConnection(); + ps = helper.removeByTokenQuery(con, spaceToken); + + log.debug("query = {}", ps); + res = ps.executeUpdate(); + log.debug("Number of rows removed: {}", res); + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeStatement(ps); + closeConnection(con); + } + } + + /** + * + * @param ssTO StorageSpaceTO + */ + public void updateStorageSpace(StorageSpaceTO ssTO) { + + Connection con = null; + PreparedStatement ps = null; + int res = 0; + + try { + con = getConnection(); + ps = helper.updateByAliasAndTokenQuery(con, ssTO); + + log.debug("UPDATE query = {}", ps); + res = ps.executeUpdate(); + log.debug("UPDATE row count = {}", res); + + if (res == 0) { + log.warn("No storage space rows updated by query : {}", ps); + } + if (res > 1) { + log.warn( + "More than a single storage space rows updated by " + "query : {}. updated {} rows.", + ps, res); + } + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeStatement(ps); + closeConnection(con); + } + + } + + /** + * + * @param ssTO StorageSpaceTO + * @throws DataAccessException + */ + public void updateStorageSpaceFreeSpace(StorageSpaceTO ssTO) throws DataAccessException { + + long freeSpace = ssTO.getFreeSize(); + + Connection con = null; + PreparedStatement ps = null; + int res = 0; + + try { + + con = getConnection(); + ps = helper.updateFreeSpaceByTokenQuery(con, ssTO.getSpaceToken(), freeSpace, new Date()); + + log.debug("UPDATE query = {}", ps); + res = ps.executeUpdate(); + log.debug("UPDATE row count = {}", res); + + if (res <= 0) { + log.warn("No storage space rows updated by query : {}", ps); + } + } catch (SQLException e) { + log.error(e.getMessage(), e); + throw new DataAccessException("Error while executing UPDATE query", e); + } finally { + closeStatement(ps); + closeConnection(con); } + } + + /** + * + * @param ssTO StorageSpaceTO + */ + public void updateAllStorageSpace(StorageSpaceTO ssTO) { + + Connection con = null; + PreparedStatement ps = null; + int res = 0; + + try { + con = getConnection(); + ps = helper.updateByTokenQuery(con, ssTO); + + log.debug("UPDATE query = {}", ps); + res = ps.executeUpdate(); + log.debug("UPDATE row count = {}", res); + + if (res == 0) { + log.warn("No storage space rows updated by query {}", ps); + } + if (res > 1) { + log.warn( + "More than a single storage space rows updated " + "by query : {}. updated {} rows", ps, + res); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeStatement(ps); + closeConnection(con); } + } + + /** + * Method used to retrieve the set of StorageTO for expired space. + * + * @param long timeInSecond + * @return Collection of transfer object + */ + public Collection getExpired(long currentTimeInSecond) { + + StorageSpaceTO ssTO = null; + Collection result = new LinkedList(); + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + + con = getConnection(); + ps = helper.selectExpiredQuery(con, currentTimeInSecond); + + log.debug("DB query = {}", ps); + res = ps.executeQuery(); + log.debug("query result = {}", res); + + if (res.first()) { + do { + ssTO = helper.makeStorageSpaceTO(res); + result.add(ssTO); + } while (res.next()); + } else { + log.debug("No rows found for query : {}", ps); + log.debug("No storage space expired found at time " + currentTimeInSecond); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + + return result; + } + + @Override + public int increaseUsedSpace(String spaceToken, long usedSpaceToAdd) throws DataAccessException { + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + int n = 0; + + try { + + con = getConnection(); + ps = helper.increaseUsedSpaceByTokenQuery(con, spaceToken, usedSpaceToAdd); + + log.debug("DB query = {}", ps); + n = ps.executeUpdate(); + log.debug("query result = {}", n); + + if (n == 0) { + log.debug("No storage space updated!"); + } + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + + return n; + } + + @Override + public int decreaseUsedSpace(String spaceToken, long usedSpaceToRemove) + throws DataAccessException { + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + int n = 0; + + try { + + con = getConnection(); + ps = helper.decreaseUsedSpaceByTokenQuery(con, spaceToken, usedSpaceToRemove); + + log.debug("DB query = {}", ps); + n = ps.executeUpdate(); + log.debug("query result = {}", n); + + if (n == 0) { + log.debug("No storage space updated!"); + } + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + + return n; + } } diff --git a/src/main/java/it/grid/storm/persistence/impl/mysql/TapeRecallDAOMySql.java b/src/main/java/it/grid/storm/persistence/impl/mysql/TapeRecallDAOMySql.java index bafab8df9..c4f3d744e 100644 --- a/src/main/java/it/grid/storm/persistence/impl/mysql/TapeRecallDAOMySql.java +++ b/src/main/java/it/grid/storm/persistence/impl/mysql/TapeRecallDAOMySql.java @@ -18,747 +18,707 @@ package it.grid.storm.persistence.impl.mysql; import static it.grid.storm.persistence.model.TapeRecallTO.RecallTaskType.valueOf; - -import com.google.common.collect.Lists; - -import it.grid.storm.persistence.PersistenceDirector; -import it.grid.storm.persistence.dao.TapeRecallDAO; -import it.grid.storm.persistence.exceptions.DataAccessException; -import it.grid.storm.persistence.model.TapeRecallTO; -import it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper; -import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.tape.recalltable.model.TapeRecallStatus; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_DATE; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_DEFERRED_STARTTIME; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_FILE_NAME; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_FINAL_STATUS_DATE; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_GROUP_TASK_ID; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_IN_PROGRESS_DATE; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_PIN_LIFETIME; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_REQUEST_TYPE; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_RETRY_ATTEMPT; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_STATUS; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_TASK_ID; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_USER_ID; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_VO_NAME; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; -import java.sql.Statement; import java.sql.Timestamp; import java.util.Calendar; import java.util.Date; import java.util.GregorianCalendar; import java.util.List; +import java.util.Optional; import java.util.UUID; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class TapeRecallDAOMySql extends TapeRecallDAO { +import com.google.common.collect.Lists; + +import it.grid.storm.persistence.dao.AbstractDAO; +import it.grid.storm.persistence.dao.TapeRecallDAO; +import it.grid.storm.persistence.exceptions.DataAccessException; +import it.grid.storm.persistence.model.TapeRecallTO; +import it.grid.storm.persistence.pool.StormBeIsamConnectionPool; +import it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper; +import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.tape.recalltable.model.TapeRecallStatus; + +public class TapeRecallDAOMySql extends AbstractDAO implements TapeRecallDAO { + + private static final Logger log = LoggerFactory.getLogger(TapeRecallDAOMySql.class); + + private static TapeRecallDAO instance; + + public static synchronized TapeRecallDAO getInstance() { + if (instance == null) { + instance = new TapeRecallDAOMySql(); + } + return instance; + } + + private final TapeRecallMySQLHelper sqlHelper; + + private TapeRecallDAOMySql() { + + super(StormBeIsamConnectionPool.getInstance()); + sqlHelper = new TapeRecallMySQLHelper(); + } + + @Override + public int getNumberInProgress() throws DataAccessException { + + return getNumberInProgress(null); + } + + @Override + public int getNumberInProgress(String voName) throws DataAccessException { - private static final Logger log = LoggerFactory - .getLogger(TapeRecallDAOMySql.class); - - private final TapeRecallMySQLHelper sqlHelper; - - public TapeRecallDAOMySql() { - - sqlHelper = new TapeRecallMySQLHelper(PersistenceDirector.getDataBase() - .getDbmsVendor()); - } - - @Override - public int getNumberInProgress() throws DataAccessException { - - return getNumberInProgress(null); - } - - @Override - public int getNumberInProgress(String voName) throws DataAccessException { - - Connection dbConnection = getConnection(); - int status = 0; - ResultSet res = null; - PreparedStatement prepStatement = null; - - try { - if (voName == null) { - prepStatement = sqlHelper.getQueryNumberInProgress(dbConnection); - } else { - prepStatement = sqlHelper - .getQueryNumberInProgress(dbConnection, voName); - } - - log.debug("QUERY: {}", prepStatement); - - res = prepStatement.executeQuery(); - - if (res.first()) { - status = res.getInt(1); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - } finally { - releaseConnection(res, prepStatement, dbConnection); - } - return status; - } - - @Override - public int getNumberQueued() throws DataAccessException { - - return getNumberQueued(null); - } - - @Override - public int getNumberQueued(String voName) throws DataAccessException { - - Connection dbConnection = getConnection(); - int status = 0; - ResultSet res = null; - PreparedStatement prepStatement = null; - - try { - if (voName == null) { - prepStatement = sqlHelper.getQueryNumberQueued(dbConnection); - } else { - prepStatement = sqlHelper.getQueryNumberQueued(dbConnection, voName); - } - - log.debug("QUERY: {}", prepStatement); - res = prepStatement.executeQuery(); - - if (res.first()) { - status = res.getInt(1); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - } finally { - releaseConnection(res, prepStatement, dbConnection); - } - return status; - } - - @Override - public int getReadyForTakeOver() throws DataAccessException { - - return getReadyForTakeOver(null); - } - - @Override - public int getReadyForTakeOver(String voName) throws DataAccessException { - - Connection dbConnection = getConnection(); - int status = 0; - ResultSet res = null; - PreparedStatement prepStatement = null; - - try { - if (voName == null) { - prepStatement = sqlHelper.getQueryReadyForTakeOver(dbConnection); - } else { - prepStatement = sqlHelper - .getQueryReadyForTakeOver(dbConnection, voName); - } - - log.debug("QUERY: {}", prepStatement); - res = prepStatement.executeQuery(); - - if (res.first()) { - status = res.getInt(1); - } - } catch (SQLException e) { - throw new DataAccessException("Error executing query: '" - + prepStatement + "' " + e.getMessage(), e); - } finally { - releaseConnection(res, prepStatement, dbConnection); - } - return status; - } - - @Override - public List getGroupTasks(UUID groupTaskId) - throws DataAccessException { - - TapeRecallTO task = null; - List taskList = Lists.newArrayList(); - - Connection dbConnection = getConnection(); - ResultSet res = null; - PreparedStatement prepStatement = null; - - try { - prepStatement = sqlHelper - .getQueryGetGroupTasks(dbConnection, groupTaskId); - - log.debug("QUERY: {}", prepStatement); - res = prepStatement.executeQuery(); - - if (!res.first()) { - log.error("No tasks with GroupTaskId='{}'", groupTaskId); - throw new DataAccessException( - "No recall table row retrieved executing query: '" - + prepStatement + "'"); - } - do { - task = new TapeRecallTO(); - setTaskInfo(task, res); - taskList.add(task); - } while (res.next()); - } catch (SQLException e) { - throw new DataAccessException("Error executing query: '" - + prepStatement + "' " + e.getMessage(), e); - } finally { - releaseConnection(res, prepStatement, dbConnection); - } - return taskList; - } - - @Override - public boolean existsGroupTask(UUID groupTaskId) throws DataAccessException { - - boolean response = false; - - Connection dbConnection = getConnection(); - ResultSet res = null; - PreparedStatement prepStatement = null; - - try { - prepStatement = sqlHelper - .getQueryGetGroupTasks(dbConnection, groupTaskId); - - log.debug("QUERY: {}", prepStatement); - res = prepStatement.executeQuery(); - response = res.first(); - if (!response) { - log.info("No tasks found with GroupTaskId='{}'",groupTaskId); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error executing query: '" - + prepStatement + "' " + e.getMessage(), e); - } finally { - releaseConnection(res, prepStatement, dbConnection); - } - return response; - } - - @Override - public TapeRecallTO getTask(UUID taskId, String requestToken) - throws DataAccessException { - - TapeRecallTO task; - Connection dbConnection = getConnection(); - ResultSet res = null; - PreparedStatement prepStatement = null; - - try { - prepStatement = sqlHelper.getQueryGetTask(dbConnection, taskId, - requestToken); - log.debug("QUERY: {}", prepStatement); - res = prepStatement.executeQuery(); - - if (!res.first()) { - log.error("No task found for requestToken={} taskId={}. Query={}", requestToken, taskId, prepStatement); - - throw new DataAccessException("No task found for requestToken=" - + requestToken + " " + "taskId=" + taskId + ". Query = " - + prepStatement); - } - task = new TapeRecallTO(); - setTaskInfo(task, res); - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error executing query: '" - + prepStatement + "' " + e.getMessage(), e); - } finally { - releaseConnection(res, prepStatement, dbConnection); - } - return task; - } - - @Override - public boolean existsTask(UUID taskId, String requestToken) - throws DataAccessException { - - boolean response; - - Connection dbConnection = getConnection(); - ResultSet res = null; - - PreparedStatement prepStatement = null; - - try { - prepStatement = sqlHelper.getQueryGetTask(dbConnection, taskId, - requestToken); - - log.debug("QUERY: {}", prepStatement); - res = prepStatement.executeQuery(); - response = res.first(); - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error executing query: '" - + prepStatement + "' " + e.getMessage(), e); - } finally { - releaseConnection(res, prepStatement, dbConnection); - } - return response; - } - - @Override - public UUID insertCloneTask(TapeRecallTO task, int[] statuses, - UUID proposedGroupTaskId) throws DataAccessException { - - if (task.getTaskId() == null || task.getRequestToken() == null - || task.getRequestToken().getValue().trim().isEmpty()) { - log - .error("received Task insert request with empty primary key field TaskId or RequestToken. TaskId = {}, request token = {}", task.getTaskId(), task.getRequestToken()); - throw new DataAccessException( - "Unable to create insert the task wth the provided UUID and " - + "request token using UUID-namebased algorithm. TaskId = " - + task.getTaskId() + " , request token = " + task.getRequestToken()); - } - Integer status = task.getStatusId(); - - Connection dbConnection = getConnection(); - PreparedStatement prepStat = null; - - try { - dbConnection.setAutoCommit(false); - } catch (SQLException e) { - log.error("Error setting autocommit to false! {}", e.getMessage()); - throw new DataAccessException("Error setting autocommit to false! " - + e.getMessage(), e); - } - - ResultSet res = null; - try { - - if (statuses == null || statuses.length == 0) { - prepStat = sqlHelper.getQueryGetGroupTaskIds(dbConnection, - task.getTaskId()); - } else { - prepStat = sqlHelper.getQueryGetGroupTaskIds(dbConnection, - task.getTaskId(), statuses); - } - log.debug("QUERY: {}", prepStat); - - res = prepStat.executeQuery(); - - if (res.first()) { - /* Take the first, but there can be more than one result */ - String uuidString = res - .getString(TapeRecallMySQLHelper.COL_GROUP_TASK_ID); - status = Integer.valueOf(res.getInt(TapeRecallMySQLHelper.COL_STATUS)); - task.setStatusId(status.intValue()); - task.setGroupTaskId(UUID.fromString(uuidString)); - Calendar calendar = new GregorianCalendar(); - try { - task.forceStatusUpdateInstants( - res.getDate(TapeRecallMySQLHelper.COL_IN_PROGRESS_DATE, calendar), - res.getDate(TapeRecallMySQLHelper.COL_FINAL_STATUS_DATE, calendar)); - } catch (IllegalArgumentException e) { - log.error("Unable to set status update timestamps on the coned task"); - } - } else { - log.debug("No task found for taskId={} Creating a new group entry", task.getTaskId()); - task.setGroupTaskId(proposedGroupTaskId); - task.setStatusId(status.intValue()); - } - - prepStat = sqlHelper.getQueryInsertTask(dbConnection, task); - if (prepStat == null) { - // this case is possible if and only if the task is null or empty - log.error("Cannot create the query because the task is null or empty."); - throw new DataAccessException( - "Cannot create the query because the task is null or empty."); - } - try { - log.debug("Query(insert-task)={}", prepStat); - prepStat.executeUpdate(); - commit(dbConnection); - } catch (SQLException e) { - rollback(dbConnection); - throw new DataAccessException("Error executing query : " - + prepStat + " ; " + e.getMessage(), e); - } - } catch (SQLException e) { - rollback(dbConnection); - throw new DataAccessException("Error executing query : " + " ; " - + e.getMessage(), e); - } finally { - releaseConnection(new ResultSet[] { res }, new Statement[] { prepStat }, - dbConnection); - } - return task.getGroupTaskId(); - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.persistence.dao.TapeRecallDAO#purgeCompletedTasks(int) - */ - @Override - public int purgeCompletedTasks(long expirationTime, int numTasks) throws DataAccessException { - - PreparedStatement ps = null; - Connection con = getConnection(); - - int count = 0; - boolean hasLimit = numTasks > 0; - try { - if (hasLimit) { - ps = sqlHelper.getQueryDeleteCompletedTasks(con, expirationTime, numTasks); - } else { - ps = sqlHelper.getQueryDeleteCompletedTasks(con, expirationTime); - } - - count = ps.executeUpdate(); - - } catch (SQLException e) { - throw new DataAccessException("Error executing query: " + ps, e); - } finally { - releaseConnection(null, ps, con); - } - - return count; - } - - @Override - public void setGroupTaskRetryValue(UUID groupTaskId, int value) - throws DataAccessException { - - Connection dbConnection = getConnection(); - PreparedStatement prepStatement = null; - - try { - prepStatement = sqlHelper.getQuerySetGroupTaskRetryValue(dbConnection, - groupTaskId, value); - - prepStatement.executeUpdate(); - } catch (SQLException e) { - throw new DataAccessException("Error executing query: " - + prepStatement, e); - } finally { - releaseConnection(null, prepStatement, dbConnection); - } - - } - - @Override - public TapeRecallTO takeoverTask() throws DataAccessException { - - return takeoverTask(null); - } - - @Override - public TapeRecallTO takeoverTask(String voName) throws DataAccessException { - - List taskList = takeoverTasksWithDoubles(1, voName); - - if (taskList.isEmpty()) { - return null; - } - return taskList.get(0); - } - - @Override - public List takeoverTasksWithDoubles(int numberOfTaks) - throws DataAccessException { - - return takeoverTasksWithDoubles(numberOfTaks, null); - } - - @Override - public List takeoverTasksWithDoubles(int numberOfTaks, - String voName) throws DataAccessException { - - Connection dbConnection = getConnection(); - - List taskList = Lists.newLinkedList(); - TapeRecallTO task = null; - ResultSet res = null; - - PreparedStatement prepStatement = null; - - try { - dbConnection.setAutoCommit(false); - } catch (SQLException e) { - log.error("Error setting autocommit to false! {}", e.getMessage()); - throw new DataAccessException("Error setting autocommit to false! " - + e.getMessage(), e); - } - try { - if (voName == null) { - prepStatement = sqlHelper.getQueryGetTakeoverTasksWithDoubles( - dbConnection, numberOfTaks); - } else { - prepStatement = sqlHelper.getQueryGetTakeoverTasksWithDoubles( - dbConnection, numberOfTaks, voName); - } - // start transaction - log.debug("QUERY: {}", prepStatement); - res = prepStatement.executeQuery(); - if (!res.first()) { - log.info("No tape recall rows ready for takeover"); - return taskList; - } - do { - task = new TapeRecallTO(); - setTaskInfo(task, res); - task.setStatus(TapeRecallStatus.IN_PROGRESS); - taskList.add(task); - } while (res.next()); - if (!taskList.isEmpty()) { - try { - prepStatement = sqlHelper.getQueryUpdateTasksStatus(dbConnection, - taskList, TapeRecallStatus.IN_PROGRESS.getStatusId(), - TapeRecallMySQLHelper.COL_IN_PROGRESS_DATE, new Date()); - } catch (IllegalArgumentException e) { - log - .error("Unable to obtain the query to update task status and set status transition timestamp. IllegalArgumentException: " - + e.getMessage()); - throw new DataAccessException( - "Unable to obtain the query to update task status and set status transition timestamp"); - } - prepStatement.executeUpdate(); - } - commit(dbConnection); - } catch (SQLException e) { - rollback(dbConnection); - throw new DataAccessException("Error executing query: " - + prepStatement, e); - } finally { - releaseConnection(res, prepStatement, dbConnection); - } - return taskList; - } - - @Override - public List getAllInProgressTasks(int numberOfTaks) - throws DataAccessException { - - Connection dbConnection = getConnection(); - ResultSet res = null; - List taskList = Lists.newArrayList(); - - PreparedStatement prepStatement = null; - - try { - prepStatement = sqlHelper.getQueryGetAllTasksInProgress(dbConnection, - numberOfTaks); - - log.debug("getAllInProgressTasks query: {}", prepStatement); - - res = prepStatement.executeQuery(); - - boolean emptyResultSet = true; - - while (res.next()) { - - emptyResultSet = false; - TapeRecallTO task = new TapeRecallTO(); - setTaskInfo(task, res); - taskList.add(task); - } - - if (emptyResultSet) { - - log.debug("No in progress recall tasks found."); - } - - } catch (Exception e) { - - log.error("Error executing query: {}", prepStatement, e); - throw new DataAccessException("Error executing query: " - + prepStatement, e); - - } finally { - - releaseConnection(res, prepStatement, dbConnection); - } - - return taskList; - } - - private void setTaskInfo(TapeRecallTO task, ResultSet res) - throws DataAccessException { - - if (res == null) { - throw new DataAccessException("Unable to build Task from NULL ResultSet"); - } - - String requestTokenStr = null; - Timestamp insertionInstant; - try { - requestTokenStr = res.getString(TapeRecallMySQLHelper.COL_REQUEST_TOKEN); - insertionInstant = res.getTimestamp(TapeRecallMySQLHelper.COL_DATE); - - } catch (SQLException e) { - throw new DataAccessException( - "Unable to retrieve RequestToken String from ResultSet. " + e); - } - try { - task - .setRequestToken(new TRequestToken(requestTokenStr, insertionInstant)); - } catch (InvalidTRequestTokenAttributesException e) { - throw new DataAccessException( - "Unable to build TRequestToken from token='" + requestTokenStr + "'. " - + e); - } - - UUID groupTaskId = null; - String groupTaskIdStr = null; - try { - groupTaskIdStr = res.getString(TapeRecallMySQLHelper.COL_GROUP_TASK_ID); - if (groupTaskIdStr != null) { - try { - groupTaskId = UUID.fromString(groupTaskIdStr); - task.setGroupTaskId(groupTaskId); - } catch (IllegalArgumentException iae) { - throw new DataAccessException( - "Unable to build UUID from GroupTaskId='" + groupTaskId + "'. " - + iae); - } - } - } catch (SQLException e) { - throw new DataAccessException( - "Unable to retrieve GroupTaskId String from ResultSet. " + e); - } - - // do not set the task ID, it is produced by the setFilename call - - try { - - task.setRequestType(valueOf(res.getString(TapeRecallMySQLHelper.COL_REQUEST_TYPE))); - task.setFileName(res.getString(TapeRecallMySQLHelper.COL_FILE_NAME)); - task.setPinLifetime(res.getInt(TapeRecallMySQLHelper.COL_PIN_LIFETIME)); - task.setStatusId(res.getInt(TapeRecallMySQLHelper.COL_STATUS)); - task.setVoName(res.getString(TapeRecallMySQLHelper.COL_VO_NAME)); - task.setUserID(res.getString(TapeRecallMySQLHelper.COL_USER_ID)); - task.setRetryAttempt(res.getInt(TapeRecallMySQLHelper.COL_RETRY_ATTEMPT)); - Calendar calendar = new GregorianCalendar(); - task.setDeferredRecallInstant(res.getTimestamp( - TapeRecallMySQLHelper.COL_DEFERRED_STARTTIME, calendar)); - task.setInsertionInstant(res.getTimestamp(TapeRecallMySQLHelper.COL_DATE, - calendar)); - try { - task.forceStatusUpdateInstants(res.getTimestamp( - TapeRecallMySQLHelper.COL_IN_PROGRESS_DATE, calendar), res - .getTimestamp(TapeRecallMySQLHelper.COL_FINAL_STATUS_DATE, calendar)); - } catch (IllegalArgumentException e) { - log.error("Unable to set status update timestamps on the coned task"); - } - } catch (SQLException e) { - throw new DataAccessException("Unable to getting info from ResultSet. " - + e); - } - } - - @Override - public boolean setGroupTaskStatus(UUID groupTaskId, int newStatusId, - Date timestamp) throws DataAccessException { - - PreparedStatement prepStatement = null; - Connection dbConnection = getConnection(); - - try { - dbConnection.setAutoCommit(false); - } catch (SQLException e) { - log.error("Error setting autocommit to false! {}", e.getMessage()); - throw new DataAccessException("Error setting autocommit to false! " - + e.getMessage(), e); - } - - ResultSet res = null; - boolean ret = false; - int oldStatusId = -1; - - try { - - try { - prepStatement = sqlHelper.getQueryGetGroupTasks(dbConnection, - groupTaskId); - - log.debug("QUERY: {}", prepStatement); - // retrieves the tasks of this task group - res = prepStatement.executeQuery(); - - if (!res.first()) { - log.error("No tasks with GroupTaskId='{}'", groupTaskId); - throw new DataAccessException( - "No recall table row retrieved executing query: '" - + prepStatement + "'"); - } - // verify if their stored status is equal for all - oldStatusId = res.getInt(TapeRecallMySQLHelper.COL_STATUS); - do { - int currentStatusId = res.getInt(TapeRecallMySQLHelper.COL_STATUS); - if (currentStatusId != oldStatusId) { - log.warn("The tasks with groupTaskId {} have different statuses: {} from task {} differs " - + "from expected {}", groupTaskId, currentStatusId, - res.getString(TapeRecallMySQLHelper.COL_TASK_ID), oldStatusId); - break; - } - oldStatusId = currentStatusId; - } while (res.next()); - } catch (SQLException e) { - log - .error("Unable to retrieve groupTaskId related tasks. SQLException: {}", e); - throw new DataAccessException( - "Unable to retrieve groupTaskId related tasks. "); - } - if (oldStatusId != newStatusId) { - // update the task status and if is a valid transition set the relative - // transition timestamp - if (!TapeRecallStatus.getRecallTaskStatus(oldStatusId).precedes( - newStatusId)) { - log - .warn("Requested the update of the status of a recall task group to status {} that is precedent " - + "to the recorded status performing the request the same...", newStatusId, oldStatusId); - } - String timestampColumn = null; - if (TapeRecallStatus.isFinalStatus(newStatusId)) { - timestampColumn = TapeRecallMySQLHelper.COL_FINAL_STATUS_DATE; - } else { - if (TapeRecallStatus.IN_PROGRESS.equals(TapeRecallStatus - .getRecallTaskStatus(newStatusId))) { - timestampColumn = TapeRecallMySQLHelper.COL_IN_PROGRESS_DATE; - } else { - log - .warn("unable to determine the status update timestamp column to use given the new statusId '{}'", newStatusId); - } - } - if (timestampColumn != null) { - try { - prepStatement = sqlHelper.getQueryUpdateGroupTaskStatus( - dbConnection, groupTaskId, newStatusId, timestampColumn, - timestamp); - } catch (IllegalArgumentException e) { - log - .error("Unable to obtain the query to update task status and set status transition timestamp. IllegalArgumentException: {}", e.getMessage()); - throw new DataAccessException( - "Unable to obtain the query to update task status and set status transition timestamp"); - } catch (SQLException e) { - throw new DataAccessException("Error executing query: " - + prepStatement.toString(), e); - } - } else { - try { - prepStatement = sqlHelper.getQuerySetGroupTaskStatus(dbConnection, - groupTaskId, newStatusId); - } catch (SQLException e) { - throw new DataAccessException("Error executing query: " - + prepStatement.toString(), e); - } - } - try { - if (prepStatement.executeUpdate() > 0) { - ret = true; - } - commit(dbConnection); - } catch (SQLException e) { - throw new DataAccessException("Error executing query: " - + prepStatement.toString(), e); - } - } else { - log - .warn("Skipping the status upadate operation, the status already stored is equal to the new one provided"); - } - } finally { - releaseConnection(res, prepStatement, dbConnection); - } - return ret; - } + Connection con = null; + int status = 0; + ResultSet res = null; + PreparedStatement ps = null; + + try { + con = getConnection(); + if (voName == null) { + ps = sqlHelper.getQueryNumberInProgress(con); + } else { + ps = sqlHelper.getQueryNumberInProgress(con, voName); + } + + log.debug("QUERY: {}", ps); + res = ps.executeQuery(); + + if (res.first()) { + status = res.getInt(1); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return status; + } + + @Override + public int getNumberQueued() throws DataAccessException { + + return getNumberQueued(null); + } + + @Override + public int getNumberQueued(String voName) throws DataAccessException { + + Connection con = null; + int status = 0; + ResultSet res = null; + PreparedStatement ps = null; + + try { + + con = getConnection(); + + if (voName == null) { + ps = sqlHelper.getQueryNumberQueued(con); + } else { + ps = sqlHelper.getQueryNumberQueued(con, voName); + } + + log.debug("QUERY: {}", ps); + res = ps.executeQuery(); + + if (res.first()) { + status = res.getInt(1); + } + } catch (SQLException e) { + log.error(e.getMessage(), e); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return status; + } + + @Override + public int getReadyForTakeOver() throws DataAccessException { + + return getReadyForTakeOver(null); + } + + @Override + public int getReadyForTakeOver(String voName) throws DataAccessException { + + Connection con = null; + int status = 0; + ResultSet res = null; + PreparedStatement ps = null; + + try { + + con = getConnection(); + + if (voName == null) { + ps = sqlHelper.getQueryReadyForTakeOver(con); + } else { + ps = sqlHelper.getQueryReadyForTakeOver(con, voName); + } + + log.debug("QUERY: {}", ps); + res = ps.executeQuery(); + + if (res.first()) { + status = res.getInt(1); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return status; + } + + @Override + public List getGroupTasks(UUID groupTaskId) throws DataAccessException { + + TapeRecallTO task = null; + List taskList = Lists.newArrayList(); + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + + con = getConnection(); + ps = sqlHelper.getQueryGetGroupTasks(con, groupTaskId); + + log.debug("QUERY: {}", ps); + res = ps.executeQuery(); + + if (res.first()) { + do { + task = new TapeRecallTO(); + setTaskInfo(task, res); + taskList.add(task); + } while (res.next()); + } else { + log.info("No tasks with GroupTaskId='{}'", groupTaskId); + } + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return taskList; + } + + @Override + public boolean existsGroupTask(UUID groupTaskId) throws DataAccessException { + + boolean response = false; + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + + con = getConnection(); + ps = sqlHelper.getQueryGetGroupTasks(con, groupTaskId); + + log.debug("QUERY: {}", ps); + res = ps.executeQuery(); + response = res.first(); + if (!response) { + log.info("No tasks found with GroupTaskId='{}'", groupTaskId); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return response; + } + + @Override + public Optional getTask(UUID taskId, String requestToken) + throws DataAccessException { + + TapeRecallTO task = null; + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + + con = getConnection(); + ps = sqlHelper.getQueryGetTask(con, taskId, requestToken); + log.debug("QUERY: {}", ps); + res = ps.executeQuery(); + + if (res.first()) { + task = new TapeRecallTO(); + setTaskInfo(task, res); + } else { + log.info("No task found for requestToken={} taskId={}. Query={}", requestToken, taskId, ps); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return Optional.ofNullable(task); + } + + @Override + public boolean existsTask(UUID taskId, String requestToken) throws DataAccessException { + + boolean response = false; + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + con = getConnection(); + ps = sqlHelper.getQueryGetTask(con, taskId, requestToken); + + log.debug("QUERY: {}", ps); + res = ps.executeQuery(); + response = res.first(); + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return response; + } + + @Override + public UUID insertCloneTask(TapeRecallTO task, int[] statuses, UUID proposedGroupTaskId) + throws DataAccessException { + + if (task.getTaskId() == null || task.getRequestToken() == null + || task.getRequestToken().getValue().trim().isEmpty()) { + log.error( + "received Task insert request with empty primary key field TaskId or RequestToken. TaskId = {}, request token = {}", + task.getTaskId(), task.getRequestToken()); + throw new DataAccessException("Unable to create insert the task with the provided UUID and " + + "request token using UUID-namebased algorithm. TaskId = " + task.getTaskId() + + " , request token = " + task.getRequestToken()); + } + int status = task.getStatusId(); + + Connection con = null; + PreparedStatement ps = null; + ResultSet res = null; + + try { + + con = getConnection(); + + if (statuses == null || statuses.length == 0) { + ps = sqlHelper.getQueryGetGroupTaskIds(con, task.getTaskId()); + } else { + ps = sqlHelper.getQueryGetGroupTaskIds(con, task.getTaskId(), statuses); + } + log.debug("QUERY: {}", ps); + + res = ps.executeQuery(); + + if (res.first()) { + /* Take the first, but there can be more than one result */ + String uuidString = res.getString(COL_GROUP_TASK_ID); + status = res.getInt(COL_STATUS); + task.setStatusId(status); + task.setGroupTaskId(UUID.fromString(uuidString)); + Calendar calendar = new GregorianCalendar(); + try { + task.forceStatusUpdateInstants( + res.getDate(TapeRecallMySQLHelper.COL_IN_PROGRESS_DATE, calendar), + res.getDate(TapeRecallMySQLHelper.COL_FINAL_STATUS_DATE, calendar)); + } catch (IllegalArgumentException e) { + log.error("Unable to set status update timestamps on the coned task"); + } + } else { + log.debug("No task found for taskId={} Creating a new group entry", task.getTaskId()); + task.setGroupTaskId(proposedGroupTaskId); + task.setStatusId(status); + } + + ps = sqlHelper.getQueryInsertTask(con, task); + if (ps == null) { + // this case is possible if and only if the task is null or empty + log.error("Cannot create the query because the task is null or empty."); + throw new DataAccessException("Cannot create the query because the task is null or empty."); + } + log.debug("Query(insert-task)={}", ps); + int n = ps.executeUpdate(); + log.debug("Query(insert-task)={} exited with {}", ps, n); + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return task.getGroupTaskId(); + } + + @Override + public int purgeCompletedTasks(long expirationTime, int numTasks) throws DataAccessException { + + PreparedStatement ps = null; + Connection con = null; + int count = 0; + boolean hasLimit = numTasks > 0; + + try { + + con = getConnection(); + if (hasLimit) { + ps = sqlHelper.getQueryDeleteCompletedTasks(con, expirationTime, numTasks); + } else { + ps = sqlHelper.getQueryDeleteCompletedTasks(con, expirationTime); + } + + count = ps.executeUpdate(); + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeStatement(ps); + closeConnection(con); + } + + return count; + } + + @Override + public void setGroupTaskRetryValue(UUID groupTaskId, int value) throws DataAccessException { + + Connection con = null; + PreparedStatement ps = null; + + try { + + con = getConnection(); + ps = sqlHelper.getQuerySetGroupTaskRetryValue(con, groupTaskId, value); + ps.executeUpdate(); + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeStatement(ps); + closeConnection(con); + } + + } + + @Override + public TapeRecallTO takeoverTask() throws DataAccessException { + + return takeoverTask(null); + } + + @Override + public TapeRecallTO takeoverTask(String voName) throws DataAccessException { + + List taskList = takeoverTasksWithDoubles(1, voName); + + if (taskList.isEmpty()) { + return null; + } + return taskList.get(0); + } + + @Override + public List takeoverTasksWithDoubles(int numberOfTaks) throws DataAccessException { + + return takeoverTasksWithDoubles(numberOfTaks, null); + } + + @Override + public List takeoverTasksWithDoubles(int numberOfTaks, String voName) + throws DataAccessException { + + List taskList = Lists.newLinkedList(); + TapeRecallTO task = null; + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + + con = getConnection(); + + if (voName == null) { + ps = sqlHelper.getQueryGetTakeoverTasksWithDoubles(con, numberOfTaks); + } else { + ps = sqlHelper.getQueryGetTakeoverTasksWithDoubles(con, numberOfTaks, voName); + } + + // start transaction + log.debug("QUERY: {}", ps); + res = ps.executeQuery(); + if (res.first()) { + do { + task = new TapeRecallTO(); + setTaskInfo(task, res); + task.setStatus(TapeRecallStatus.IN_PROGRESS); + taskList.add(task); + } while (res.next()); + if (!taskList.isEmpty()) { + try { + ps = sqlHelper.getQueryUpdateTasksStatus(con, taskList, + TapeRecallStatus.IN_PROGRESS.getStatusId(), COL_IN_PROGRESS_DATE, new Date()); + } catch (IllegalArgumentException e) { + log.error( + "Unable to obtain the query to update task status and set status transition timestamp. IllegalArgumentException: " + + e.getMessage()); + throw new DataAccessException( + "Unable to obtain the query to update task status and set status transition timestamp"); + } + ps.executeUpdate(); + } + } else { + log.info("No tape recall rows ready for takeover"); + } + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return taskList; + } + + @Override + public List getAllInProgressTasks(int numberOfTaks) throws DataAccessException { + + List taskList = Lists.newArrayList(); + + Connection con = null; + PreparedStatement ps = null; + ResultSet res = null; + + try { + con = getConnection(); + ps = sqlHelper.getQueryGetAllTasksInProgress(con, numberOfTaks); + + log.debug("getAllInProgressTasks query: {}", ps); + + res = ps.executeQuery(); + + boolean emptyResultSet = true; + + while (res.next()) { + + emptyResultSet = false; + TapeRecallTO task = new TapeRecallTO(); + setTaskInfo(task, res); + taskList.add(task); + } + + if (emptyResultSet) { + + log.debug("No in progress recall tasks found."); + } + + } catch (SQLException e) { + + e.printStackTrace(); + + } finally { + + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + + return taskList; + } + + private void setTaskInfo(TapeRecallTO task, ResultSet res) throws DataAccessException { + + if (res == null) { + throw new DataAccessException("Unable to build Task from NULL ResultSet"); + } + + String requestTokenStr = null; + Timestamp insertionInstant; + try { + requestTokenStr = res.getString(TapeRecallMySQLHelper.COL_REQUEST_TOKEN); + insertionInstant = res.getTimestamp(TapeRecallMySQLHelper.COL_DATE); + + } catch (SQLException e) { + throw new DataAccessException("Unable to retrieve RequestToken String from ResultSet. " + e); + } + try { + task.setRequestToken(new TRequestToken(requestTokenStr, insertionInstant)); + } catch (InvalidTRequestTokenAttributesException e) { + throw new DataAccessException( + "Unable to build TRequestToken from token='" + requestTokenStr + "'. " + e); + } + + UUID groupTaskId = null; + String groupTaskIdStr = null; + try { + groupTaskIdStr = res.getString(TapeRecallMySQLHelper.COL_GROUP_TASK_ID); + if (groupTaskIdStr != null) { + try { + groupTaskId = UUID.fromString(groupTaskIdStr); + task.setGroupTaskId(groupTaskId); + } catch (IllegalArgumentException iae) { + throw new DataAccessException( + "Unable to build UUID from GroupTaskId='" + groupTaskId + "'. " + iae); + } + } + } catch (SQLException e) { + throw new DataAccessException("Unable to retrieve GroupTaskId String from ResultSet. " + e); + } + + // do not set the task ID, it is produced by the setFilename call + + try { + + task.setRequestType(valueOf(res.getString(COL_REQUEST_TYPE))); + task.setFileName(res.getString(COL_FILE_NAME)); + task.setPinLifetime(res.getInt(COL_PIN_LIFETIME)); + task.setStatusId(res.getInt(COL_STATUS)); + task.setVoName(res.getString(COL_VO_NAME)); + task.setUserID(res.getString(COL_USER_ID)); + task.setRetryAttempt(res.getInt(COL_RETRY_ATTEMPT)); + Calendar calendar = new GregorianCalendar(); + task.setDeferredRecallInstant(res.getTimestamp(COL_DEFERRED_STARTTIME, calendar)); + task.setInsertionInstant(res.getTimestamp(COL_DATE, calendar)); + try { + task.forceStatusUpdateInstants(res.getTimestamp(COL_IN_PROGRESS_DATE, calendar), + res.getTimestamp(COL_FINAL_STATUS_DATE, calendar)); + } catch (IllegalArgumentException e) { + log.error("Unable to set status update timestamps on the coned task"); + } + } catch (SQLException e) { + throw new DataAccessException("Unable to getting info from ResultSet. " + e); + } + } + + @Override + public boolean setGroupTaskStatus(UUID groupTaskId, int newStatusId, Date timestamp) + throws DataAccessException { + + Connection con = null; + PreparedStatement ps = null; + ResultSet res = null; + + boolean ret = false; + int oldStatusId = -1; + + try { + con = getConnection(); + + ps = sqlHelper.getQueryGetGroupTasks(con, groupTaskId); + + log.debug("QUERY: {}", ps); + + // retrieves the tasks of this task group + res = ps.executeQuery(); + + if (!res.first()) { + log.error("No tasks with GroupTaskId='{}'", groupTaskId); + throw new DataAccessException( + "No recall table row retrieved executing query: '" + ps + "'"); + } + + // verify if their stored status is equal for all + oldStatusId = res.getInt(COL_STATUS); + do { + int currentStatusId = res.getInt(COL_STATUS); + if (currentStatusId != oldStatusId) { + log.warn( + "The tasks with groupTaskId {} have different statuses: {} from task {} differs " + + "from expected {}", + groupTaskId, currentStatusId, res.getString(COL_TASK_ID), oldStatusId); + break; + } + oldStatusId = currentStatusId; + } while (res.next()); + + if (oldStatusId != newStatusId) { + // update the task status and if is a valid transition set the relative transition timestamp + if (!TapeRecallStatus.getRecallTaskStatus(oldStatusId).precedes(newStatusId)) { + log.warn( + "Requested the update of the status of a recall task group to status {} that is precedent " + + "to the recorded status performing the request the same...", + newStatusId, oldStatusId); + } + String timestampColumn = null; + if (TapeRecallStatus.isFinalStatus(newStatusId)) { + timestampColumn = COL_FINAL_STATUS_DATE; + } else { + if (TapeRecallStatus.IN_PROGRESS + .equals(TapeRecallStatus.getRecallTaskStatus(newStatusId))) { + timestampColumn = COL_IN_PROGRESS_DATE; + } else { + log.warn( + "unable to determine the status update timestamp column to use given the new statusId '{}'", + newStatusId); + } + } + if (timestampColumn != null) { + ps = sqlHelper.getQueryUpdateGroupTaskStatus(con, groupTaskId, newStatusId, + timestampColumn, timestamp); + } else { + ps = sqlHelper.getQuerySetGroupTaskStatus(con, groupTaskId, newStatusId); + } + if (ps.executeUpdate() > 0) { + ret = true; + } + } else { + log.warn( + "Skipping the status upadate operation, the status already stored is equal to the new one provided"); + } + } catch (IllegalArgumentException | SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return ret; + } } diff --git a/src/main/java/it/grid/storm/persistence/impl/mysql/VolatileAndJiTDAOMySql.java b/src/main/java/it/grid/storm/persistence/impl/mysql/VolatileAndJiTDAOMySql.java new file mode 100644 index 000000000..f32f3bed5 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/impl/mysql/VolatileAndJiTDAOMySql.java @@ -0,0 +1,612 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.impl.mysql; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Lists; + +import it.grid.storm.persistence.dao.AbstractDAO; +import it.grid.storm.persistence.dao.VolatileAndJiTDAO; +import it.grid.storm.persistence.model.JiTData; +import it.grid.storm.persistence.pool.StormDbConnectionPool; + +/** + * DAO class for VolatileAndJiTCatalog: it has been specifically designed for MySQL. + * + * @author EGRID ICTP + * @version 1.0 (based on old PinnedFilesDAO) + * @date November, 2006 + */ +public class VolatileAndJiTDAOMySql extends AbstractDAO implements VolatileAndJiTDAO { + + private static final Logger log = LoggerFactory.getLogger(VolatileAndJiTDAOMySql.class); + + private static VolatileAndJiTDAO instance; + + public static synchronized VolatileAndJiTDAO getInstance() { + if (instance == null) { + instance = new VolatileAndJiTDAOMySql(); + } + return instance; + } + + private VolatileAndJiTDAOMySql() { + super(StormDbConnectionPool.getInstance()); + } + + /** + * Method that inserts a new entry in the JiT table of the DB, consisting of the specified + * filename, the local user uid, the local user gid, the acl, the start time as expressed by UNIX + * epoch (seconds since 00:00:00 1 1 1970) and the number of seconds the jit must last. + * + * In the DB, the start time gets translated into DATE:TIME in order to make it more readable. + * pinLifetime remains in seconds. + */ + public void addJiT(String filename, int uid, int gid, int acl, long start, long pinLifetime) { + + String sql = + "INSERT INTO jit(file,uid,gid,acl,start,pinLifetime) VALUES(?,?,?,?,FROM_UNIXTIME(?),?)"; + + Connection con = null; + PreparedStatement stmt = null; + try { + con = getConnection(); + stmt = con.prepareStatement(sql); + stmt.setString(1, filename); + stmt.setInt(2, uid); + stmt.setInt(3, gid); + stmt.setInt(4, acl); + stmt.setLong(5, start); + stmt.setLong(6, pinLifetime); + log.debug("VolatileAndJiTDAO. addJiT: {}", stmt); + stmt.execute(); + } catch (SQLException e) { + log.error("VolatileAndJiTDAO! Error in addJiT: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + } + + /** + * Method that inserts a new entry in the Volatile table of the DB, consisting of the specified + * filename, the start time as expressed by UNIX epoch (seconds since 00:00:00 1 1 1970), and the + * number of seconds the file must be kept for. + * + * In the DB, the start time gets translated into DATE:TIME in order to make it more readable. + * pinLifetime remains in seconds. + */ + public void addVolatile(String filename, long start, long fileLifetime) { + + String sql = "INSERT INTO volatile(file,start,fileLifetime) VALUES(?,FROM_UNIXTIME(?),?)"; + + Connection con = null; + PreparedStatement stmt = null; + try { + con = getConnection(); + stmt = con.prepareStatement(sql); + stmt.setString(1, filename); + stmt.setLong(2, start); + stmt.setLong(3, fileLifetime); + log.debug("VolatileAndJiTDAO. addVolatile: {}", stmt); + stmt.execute(); + } catch (SQLException e) { + log.error("VolatileAndJiTDAO! Error in addVolatile: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + } + + /** + * Checks whether the given file exists in the volatile table or not. + * + * @param filename + * @return true if there is antry for the given file in the volatilte table, + * false otherwise. + */ + public boolean exists(String filename) { + + String sql = "SELECT ID FROM volatile WHERE file=? LIMIT 1"; + + Connection con = null; + PreparedStatement stmt = null; + ResultSet rs = null; + boolean result; + + try { + con = getConnection(); + stmt = con.prepareStatement(sql); + stmt.setString(1, filename); + log.debug("VolatileAndJiTDAO - existsOnVolatile - {}", stmt); + rs = stmt.executeQuery(); + + if (rs.next()) { + result = true; + } else { + result = false; + } + } catch (SQLException e) { + log.error("VolatileAndJiTDAO! Error in existsOnVolatile: {}", e.getMessage(), e); + e.printStackTrace(); + result = false; + } finally { + closeResultSet(rs); + closeStatement(stmt); + closeConnection(con); + } + return result; + } + + /** + * Method that updates an existing entry in the JiT table of the DB, consisting of the specified + * filename, the uid and gid of the local user, the acl, the start time as expressed by UNIX epoch + * (seconds since 00:00:00 1 1 1970), and the number of seconds the jit must last. + * + * In the DB, the start time gets translated into DATE:TIME in order to make it more readable. + * pinLifetime remains in seconds. + * + * This method _forces_ the update regardless of the fact that the new expiry lasts less than the + * current one! This method is intended to be used by expireJiT. + * + * Only start and pinLifetime get updated, while filename, uid, gid and acl, are used as criteria + * to select records. + */ + public void forceUpdateJiT(String filename, int uid, int acl, long start, long pinLifetime) { + + String sql = "UPDATE jit " + "SET start=FROM_UNIXTIME(?), pinLifetime=? " + + "WHERE file=? AND uid=? AND acl=?"; + + Connection con = null; + PreparedStatement stmt = null; + try { + con = getConnection(); + stmt = con.prepareStatement(sql); + stmt.setLong(1, start); + stmt.setLong(2, pinLifetime); + stmt.setString(3, filename); + stmt.setInt(4, uid); + stmt.setInt(5, acl); + log.debug("VolatileAndJiTDAO. forceUpdateJiT: {}", stmt); + int n = stmt.executeUpdate(); + log.debug("VolatileAndJiTDAO. {} jit entries forced updated.", n); + } catch (SQLException e) { + log.error("VolatileAndJiTDAO! Error in forceUpdateJiT: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + } + + /** + * Method that returns the number of entries in the catalogue, matching the given filename, uid + * and acl. + * + * Notice that in general there should be either one or none, and more should be taken as + * indication of catalogue corruption. + * + * -1 is returned if there are problems with the DB. + */ + public int numberJiT(String filename, int uid, int acl) { + + String sql = "SELECT COUNT(ID) FROM jit WHERE file=? AND uid=? AND acl=?"; + + Connection con = null; + PreparedStatement stmt = null; + ResultSet rs = null; + int n = -1; + + try { + con = getConnection(); + stmt = con.prepareStatement(sql); + stmt.setString(1, filename); + stmt.setInt(2, uid); + stmt.setInt(3, acl); + log.debug("VolatileAndJiTDAO. numberJiT: {}", stmt); + rs = stmt.executeQuery(); + + if (rs.next()) { + n = rs.getInt(1); + } else { + log.error("VolatileAndJiTDAO! Unexpected situation in numberJiT: " + "result set empty!"); + } + } catch (SQLException e) { + log.error("VolatileAndJiTDAO! Error in numberJiT: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(stmt); + closeConnection(con); + } + return n; + } + + /** + * Method that returns the number of Volatile entries in the catalogue, for the given filename. + * + * Notice that in general there should be either one or none, and more should be taken as + * indication of catalogue corruption. + * + * -1 is returned if there are problems with the DB. + */ + public int numberVolatile(String filename) { + + String sql = "SELECT COUNT(ID) FROM volatile WHERE file=?"; + + Connection con = null; + PreparedStatement stmt = null; + ResultSet rs = null; + int n = -1; + + try { + con = getConnection(); + stmt = con.prepareStatement(sql); + stmt.setString(1, filename); + log.debug("VolatileAndJiTDAO. numberVolatile: {}", stmt); + rs = stmt.executeQuery(); + if (rs.next()) { + n = rs.getInt(1); + } else { + log.error( + "VolatileAndJiTDAO! Unexpected situation in numberVolatile: " + "result set empty!"); + } + } catch (SQLException e) { + log.error("VolatileAndJiTDAO! Error in numberVolatile: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(stmt); + closeConnection(con); + } + return n; + } + + /** + * Method that removes all entries in the JiT table of the DB, that match the specified filename. + * So this action takes place _regardless_ of the user that set up the ACL! + */ + public void removeAllJiTsOn(String filename) { + + String sql = "DELETE FROM jit WHERE file=?"; + + Connection con = null; + PreparedStatement stmt = null; + try { + con = getConnection(); + stmt = con.prepareStatement(sql); + stmt.setString(1, filename); + log.debug("VolatileAndJiTDAO. removeJiT: {}", stmt); + int n = stmt.executeUpdate(); + log.debug("VolatileAndJiTDAO. removeJiT: {} entries removed", n); + } catch (SQLException e) { + log.error("VolatileAndJiTDAO! Error in removeJiT: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + } + + /** + * Method used to remove all expired entries, both of pinned files and of jit ACLs. Also, when + * removing volatile entries, any jit entry that refers to those expired volatiles will also be + * removed. + * + * The method requires a long representing the time measured as UNIX EPOCH upon which to base the + * purging: entries are evaluated expired when compared to this date. + * + * The method returns an array of two Collections; Collection[0] contains expired volatile entries + * String PFNs, while Collection[1] contains JiTDataTO objects. Collection[1] also contains those + * entries that may not have expired yet, but since the respective Volatile is being removed they + * too must be removed automatically. + * + * WARNING! If any error occurs it gets logged, and an array of two empty Collection is returned. + * This operation is treated as a Transaction by the DB, so a Roll Back should return everything + * to its original state! + */ + public List removeExpired(long time) { + + List output = Lists.newArrayList(Lists.newArrayList(), Lists.newArrayList()); + + String vol = "SELECT ID,file FROM volatile WHERE (UNIX_TIMESTAMP(start)+fileLifetime volat = Lists.newArrayList(); + Collection volatid = Lists.newArrayList(); + while (rs.next()) { + volatid.add(Long.valueOf(rs.getLong("ID"))); + volat.add(rs.getString("file")); + } + int nvolat = volatid.size(); + closeResultSet(rs); + closeStatement(stmt); + + // get list of jits + if (nvolat > 0) { + // there are expired volatile entries: adjust jit selection to include + // those SURLs too! + jit = jit + " OR file IN " + makeFileString(volat); + } + stmt = con.prepareStatement(jit); + stmt.setLong(1, time); + log.debug("VolatileAndJiTDAO. removeExpired: {}", stmt); + rs = stmt.executeQuery(); + + Collection track = Lists.newArrayList(); + Collection trackid = Lists.newArrayList(); + + while (rs.next()) { + trackid.add(Long.valueOf(rs.getLong("ID"))); + JiTData aux = + new JiTData(rs.getString("file"), rs.getInt("acl"), rs.getInt("uid"), rs.getInt("gid")); + track.add(aux); + } + int njit = trackid.size(); + closeResultSet(rs); + closeStatement(stmt); + + // remove entries + Collection volcol = Lists.newArrayList(); + Collection jitcol = Lists.newArrayList(); + try { + con.setAutoCommit(false); // begin transaction! + // delete volatile + int deletedvol = 0; + if (nvolat > 0) { + delvol = delvol + makeIDString(volatid); + stmt = con.prepareStatement(delvol); + log.debug("VolatileAndJiTDAO. removeExpired: {}", stmt); + deletedvol = stmt.executeUpdate(); + closeStatement(stmt); + } + // delete jits + int deletedjit = 0; + if (njit > 0) { + deljit = deljit + makeIDString(trackid); + stmt = con.prepareStatement(deljit); + log.debug("VolatileAndJiTDAO. removeExpired: {}", stmt); + deletedjit = stmt.executeUpdate(); + closeStatement(stmt); + } + con.commit(); + con.setAutoCommit(true); // end transaction! + log.debug("VolatileAndJiTDAO. Removed {} volatile catalogue entries " + + "and {} jit catalogue entries.", deletedvol, deletedjit); + volcol = volat; + jitcol = track; + } catch (SQLException e) { + log.error("VolatileAndJiTDAO! Unable to complete removeExpired... " + "rolling back! {}", + e.getMessage(), e); + con.rollback(); + closeStatement(stmt); + } + + // return collections + return Lists.newArrayList(volcol, jitcol); + } catch (SQLException e) { + log.error("VolatileAndJiTDAO! Unable to complete removeExpired! {}", e.getMessage(), e); + // in case of any failure return an array of two empty Collection + return output; + } finally { + closeResultSet(rs); + closeStatement(stmt); + closeConnection(con); + } + } + + /** + * Method that updates an existing entry in the JiT table of the DB, consisting of the specified + * filename, the uid and gid of the local user, the acl, the start time as expressed by UNIX epoch + * (seconds since 00:00:00 1 1 1970), and the number of seconds the jit must last. + * + * In the DB, the start time gets translated into DATE:TIME in order to make it more readable. + * pinLifetime remains in seconds. + * + * Entries get updated only if the new expiry calculated by adding start and pinLifetime, is + * larger than the existing one. + * + * Only start and pinLifetime get updated, while filename, uid, gid and acl, are used as criteria + * to select records. + */ + public void updateJiT(String filename, int uid, int acl, long start, long pinLifetime) { + + String sql = "UPDATE jit SET start=FROM_UNIXTIME(?), pinLifetime=? " + + "WHERE file=? AND uid=? AND acl=? AND (UNIX_TIMESTAMP(start)+pinLifetime volatileInfoOn(String filename) { + + String sql = "SELECT UNIX_TIMESTAMP(start), fileLifetime FROM volatile WHERE file=?"; + + Connection con = null; + PreparedStatement stmt = null; + ResultSet rs = null; + List aux = Lists.newArrayList(); + + try { + con = getConnection(); + stmt = con.prepareStatement(sql); + stmt.setString(1, filename); + log.debug("VolatileAndJiTDAO - infoOnVolatile - {}", stmt); + rs = stmt.executeQuery(); + if (rs.next()) { + aux.add(rs.getLong("UNIX_TIMESTAMP(start)")); + aux.add(rs.getLong("fileLifetime")); + } else { + log.debug("VolatileAndJiTDAO! infoOnVolatile did not find {}", filename); + } + } catch (SQLException e) { + log.error("VolatileAndJiTDAO! Error in infoOnVolatile: {}", e.getMessage(), e); + } finally { + closeResultSet(rs); + closeStatement(stmt); + closeConnection(con); + } + return aux; + } + + /** + * Method that returns a String containing all Files. + */ + private String makeFileString(Collection files) { + + StringBuilder sb = new StringBuilder("("); + for (Iterator i = files.iterator(); i.hasNext();) { + sb.append("'"); + sb.append(i.next()); + sb.append("'"); + if (i.hasNext()) { + sb.append(","); + } + } + sb.append(")"); + return sb.toString(); + } + + /** + * Method that returns a String containing all IDs. + */ + private String makeIDString(Collection rowids) { + + StringBuilder sb = new StringBuilder("("); + for (Iterator i = rowids.iterator(); i.hasNext();) { + sb.append(String.valueOf(i.next())); + if (i.hasNext()) { + sb.append(","); + } + } + sb.append(")"); + return sb.toString(); + } +} diff --git a/src/main/java/it/grid/storm/persistence/model/AnonymousFileTransferData.java b/src/main/java/it/grid/storm/persistence/model/AnonymousFileTransferData.java new file mode 100644 index 000000000..ec610312c --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/AnonymousFileTransferData.java @@ -0,0 +1,85 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TTURL; + +/** + * @author Michele Dibenedetto + * + */ + +public abstract class AnonymousFileTransferData extends SurlMultyOperationRequestData + implements FileTransferData { + + protected TURLPrefix transferProtocols; + protected TTURL transferURL; + + public AnonymousFileTransferData(TSURL toSURL, TURLPrefix transferProtocols, TReturnStatus status, + TTURL transferURL) + throws InvalidFileTransferDataAttributesException, InvalidSurlRequestDataAttributesException { + + super(toSURL, status); + if (transferProtocols == null || transferURL == null) { + throw new InvalidFileTransferDataAttributesException(toSURL, transferProtocols, status, + transferURL); + } + this.transferProtocols = transferProtocols; + this.transferURL = transferURL; + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.catalogs.FileTransferData#getTransferProtocols() + */ + @Override + public final TURLPrefix getTransferProtocols() { + + return transferProtocols; + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.catalogs.FileTransferData#getTransferURL() + */ + @Override + public final TTURL getTransferURL() { + + return transferURL; + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.catalogs.FileTransferData#setTransferURL(it.grid.storm.srm .types.TTURL) + */ + @Override + public final void setTransferURL(final TTURL turl) { + + if (turl != null) { + transferURL = turl; + } + } +} diff --git a/src/main/java/it/grid/storm/persistence/model/AnonymousPtGData.java b/src/main/java/it/grid/storm/persistence/model/AnonymousPtGData.java new file mode 100644 index 000000000..103583402 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/AnonymousPtGData.java @@ -0,0 +1,229 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtGDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.srm.types.TDirOption; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.srm.types.TStatusCode; +import it.grid.storm.srm.types.TTURL; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This class represents a PrepareToGetChunkData, that is part of a multifile PrepareToGet srm + * request. It contains data about: the requestToken, the fromSURL, the requested lifeTime of + * pinning, the TDirOption which tells whether the requested SURL is a directory and if it must be + * recursed at all levels, as well as the desired number of levels to recurse, the desired + * transferProtocols in order of preference, the fileSize, and the transferURL for the supplied + * SURL. + * + * @author EGRID - ICTP Trieste + * @date March 21st, 2005 + * @version 3.0 + */ +public class AnonymousPtGData extends AnonymousFileTransferData implements PtGData { + + private static final Logger log = LoggerFactory.getLogger(AnonymousPtGData.class); + + /** requested lifetime of TURL: it is the pin time! */ + protected TLifeTimeInSeconds pinLifeTime; + /** specifies if the request regards a directory and related info */ + protected TDirOption dirOption; + /** size of file */ + protected TSizeInBytes fileSize; + + /** + * @param requestToken + * @param fromSURL + * @param lifeTime + * @param dirOption + * @param desiredProtocols + * @param fileSize + * @param status + * @param transferURL + * @throws InvalidPtGDataAttributesException + */ + public AnonymousPtGData(TSURL SURL, TLifeTimeInSeconds lifeTime, TDirOption dirOption, + TURLPrefix desiredProtocols, TSizeInBytes fileSize, TReturnStatus status, TTURL transferURL) + throws InvalidPtGDataAttributesException, InvalidFileTransferDataAttributesException, + InvalidSurlRequestDataAttributesException { + + super(SURL, desiredProtocols, status, transferURL); + if (lifeTime == null || dirOption == null || fileSize == null) { + log.debug("Invalid arguments: lifeTime={}, dirOption={}, fileSize={}", lifeTime, dirOption, + fileSize); + throw new InvalidPtGDataAttributesException(SURL, lifeTime, dirOption, desiredProtocols, + fileSize, status, transferURL); + + } + this.pinLifeTime = lifeTime; + this.dirOption = dirOption; + this.fileSize = fileSize; + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.catalogs.PtGData#getPinLifeTime() + */ + @Override + public TLifeTimeInSeconds getPinLifeTime() { + + return pinLifeTime; + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.catalogs.PtGData#getDirOption() + */ + @Override + public TDirOption getDirOption() { + + return dirOption; + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.catalogs.PtGData#getFileSize() + */ + @Override + public TSizeInBytes getFileSize() { + + return fileSize; + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.catalogs.PtGData#setFileSize(it.grid.storm.srm.types.TSizeInBytes ) + */ + @Override + public void setFileSize(TSizeInBytes size) { + + if (size != null) { + fileSize = size; + } + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.catalogs.PtGData#changeStatusSRM_FILE_PINNED(java.lang.String ) + */ + @Override + public void changeStatusSRM_FILE_PINNED(String explanation) { + + setStatus(TStatusCode.SRM_FILE_PINNED, explanation); + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + + StringBuilder builder = new StringBuilder(); + builder.append("PtGChunkData [pinLifeTime="); + builder.append(pinLifeTime); + builder.append(", dirOption="); + builder.append(dirOption); + builder.append(", fileSize="); + builder.append(fileSize); + builder.append(", transferProtocols="); + builder.append(transferProtocols); + builder.append(", SURL="); + builder.append(SURL); + builder.append(", status="); + builder.append(status); + builder.append(", transferURL="); + builder.append(transferURL); + builder.append("]"); + return builder.toString(); + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#hashCode() + */ + @Override + public int hashCode() { + + final int prime = 31; + int result = super.hashCode(); + result = prime * result + ((dirOption == null) ? 0 : dirOption.hashCode()); + result = prime * result + ((fileSize == null) ? 0 : fileSize.hashCode()); + result = prime * result + ((pinLifeTime == null) ? 0 : pinLifeTime.hashCode()); + return result; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#equals(java.lang.Object) + */ + @Override + public boolean equals(Object obj) { + + if (this == obj) { + return true; + } + if (!super.equals(obj)) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + AnonymousPtGData other = (AnonymousPtGData) obj; + if (dirOption == null) { + if (other.dirOption != null) { + return false; + } + } else if (!dirOption.equals(other.dirOption)) { + return false; + } + if (fileSize == null) { + if (other.fileSize != null) { + return false; + } + } else if (!fileSize.equals(other.fileSize)) { + return false; + } + if (pinLifeTime == null) { + if (other.pinLifeTime != null) { + return false; + } + } else if (!pinLifeTime.equals(other.pinLifeTime)) { + return false; + } + return true; + } + +} diff --git a/src/main/java/it/grid/storm/persistence/model/AnonymousPtPData.java b/src/main/java/it/grid/storm/persistence/model/AnonymousPtPData.java new file mode 100644 index 000000000..b2451461f --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/AnonymousPtPData.java @@ -0,0 +1,238 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtPDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.srm.types.TFileStorageType; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.srm.types.TOverwriteMode; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.srm.types.TSpaceToken; +import it.grid.storm.srm.types.TStatusCode; +import it.grid.storm.srm.types.TTURL; + +/** + * @author Michele Dibenedetto + * + */ +public class AnonymousPtPData extends AnonymousFileTransferData implements PtPData { + + private static final Logger log = LoggerFactory.getLogger(AnonymousPtPData.class); + + protected TSpaceToken spaceToken; + protected TLifeTimeInSeconds pinLifetime; + protected TLifeTimeInSeconds fileLifetime; + protected TFileStorageType fileStorageType; + protected TOverwriteMode overwriteOption; + protected TSizeInBytes expectedFileSize; + + public AnonymousPtPData(TSURL toSURL, TLifeTimeInSeconds pinLifetime, + TLifeTimeInSeconds fileLifetime, TFileStorageType fileStorageType, TSpaceToken spaceToken, + TSizeInBytes expectedFileSize, TURLPrefix transferProtocols, TOverwriteMode overwriteOption, + TReturnStatus status, TTURL transferURL) throws InvalidPtPDataAttributesException, + InvalidFileTransferDataAttributesException, InvalidSurlRequestDataAttributesException { + + super(toSURL, transferProtocols, status, transferURL); + if (pinLifetime == null || fileLifetime == null || spaceToken == null || fileStorageType == null + || expectedFileSize == null || overwriteOption == null) { + log.debug( + "Invalid arguments: pinLifetime={}, fileLifetime={}, " + + "spaceToken={}, fileStorageType={}, expectedFileSize={}, " + "overwriteOption={}", + pinLifetime, fileLifetime, spaceToken, fileStorageType, expectedFileSize, + overwriteOption); + throw new InvalidPtPDataAttributesException(toSURL, pinLifetime, fileLifetime, + fileStorageType, spaceToken, expectedFileSize, transferProtocols, overwriteOption, status, + transferURL); + } + this.spaceToken = spaceToken; + this.pinLifetime = pinLifetime; + this.fileLifetime = fileLifetime; + this.fileStorageType = fileStorageType; + this.expectedFileSize = expectedFileSize; + this.overwriteOption = overwriteOption; + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.catalogs.PtPData#getSpaceToken() + */ + @Override + public final TSpaceToken getSpaceToken() { + + return spaceToken; + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.catalogs.PtPData#pinLifetime() + */ + @Override + public TLifeTimeInSeconds pinLifetime() { + + return pinLifetime; + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.catalogs.PtPData#fileLifetime() + */ + @Override + public TLifeTimeInSeconds fileLifetime() { + + return fileLifetime; + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.catalogs.PtPData#fileStorageType() + */ + @Override + public TFileStorageType fileStorageType() { + + return fileStorageType; + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.catalogs.PtPData#expectedFileSize() + */ + @Override + public TSizeInBytes expectedFileSize() { + + return expectedFileSize; + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.catalogs.PtPData#overwriteOption() + */ + @Override + public TOverwriteMode overwriteOption() { + + return overwriteOption; + } + + /** + * Method that sets the status of this request to SRM_SPACE_AVAILABLE; it needs the explanation + * String which describes the situation in greater detail; if a null is passed, then an empty + * String is used as explanation. + */ + @Override + public void changeStatusSRM_SPACE_AVAILABLE(String explanation) { + + setStatus(TStatusCode.SRM_SPACE_AVAILABLE, explanation); + } + + /** + * Method that sets the status of this request to SRM_DUPLICATION_ERROR; it needs the explanation + * String which describes the situation in greater detail; if a null is passed, then an empty + * String is used as explanation. + */ + @Override + public void changeStatusSRM_DUPLICATION_ERROR(String explanation) { + + setStatus(TStatusCode.SRM_DUPLICATION_ERROR, explanation); + } + + @Override + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append("PtPChunkData\n"); + sb.append("toSURL="); + sb.append(SURL); + sb.append("; "); + sb.append("pinLifetime="); + sb.append(pinLifetime); + sb.append("; "); + sb.append("fileLifetime="); + sb.append(fileLifetime); + sb.append("; "); + sb.append("fileStorageType="); + sb.append(fileStorageType); + sb.append("; "); + sb.append("spaceToken="); + sb.append(spaceToken); + sb.append("; "); + sb.append("expectedFileSize="); + sb.append(expectedFileSize); + sb.append("; "); + sb.append("transferProtocols="); + sb.append(transferProtocols); + sb.append("; "); + sb.append("overwriteOption="); + sb.append(overwriteOption); + sb.append("; "); + sb.append("status="); + sb.append(status); + sb.append("; "); + sb.append("transferURL="); + sb.append(transferURL); + sb.append("; "); + return sb.toString(); + } + + @Override + public int hashCode() { + + int hash = 17; + hash = 37 * hash + SURL.hashCode(); + hash = 37 * hash + pinLifetime.hashCode(); + hash = 37 * hash + fileLifetime.hashCode(); + hash = 37 * hash + fileStorageType.hashCode(); + hash = 37 * hash + spaceToken.hashCode(); + hash = 37 * hash + expectedFileSize.hashCode(); + hash = 37 * hash + transferProtocols.hashCode(); + hash = 37 * hash + overwriteOption.hashCode(); + hash = 37 * hash + status.hashCode(); + hash = 37 * hash + transferURL.hashCode(); + return hash; + } + + @Override + public boolean equals(Object o) { + + if (o == this) { + return true; + } + if (!(o instanceof AnonymousPtPData)) { + return false; + } + AnonymousPtPData cd = (AnonymousPtPData) o; + return SURL.equals(cd.SURL) && pinLifetime.equals(cd.pinLifetime) + && fileLifetime.equals(cd.fileLifetime) && fileStorageType.equals(cd.fileStorageType) + && spaceToken.equals(cd.spaceToken) && expectedFileSize.equals(cd.expectedFileSize) + && transferProtocols.equals(cd.transferProtocols) + && overwriteOption.equals(cd.overwriteOption) && status.equals(cd.status) + && transferURL.equals(cd.transferURL); + } +} diff --git a/src/main/java/it/grid/storm/persistence/model/BoLChunkDataTO.java b/src/main/java/it/grid/storm/persistence/model/BoLChunkDataTO.java new file mode 100644 index 000000000..40ca5644d --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/BoLChunkDataTO.java @@ -0,0 +1,268 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.namespace.model.Protocol; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.converter.TransferProtocolListConverter; +import it.grid.storm.srm.types.TStatusCode; + +import java.sql.Timestamp; +import java.util.List; + +/** + * Class that represents a row in the Persistence Layer: this is all raw data referring to the + * BoLChunkData proper, that is, String and primitive types. + * + * Each field is initialized with default values as per SRM 2.2 specification: protocolList GSIFTP + * dirOption false status SRM_REQUEST_QUEUED + * + * All other fields are 0 if int, or a white space if String. + * + * @author CNAF + * @version 1.0 + * @date Aug 2009 + */ +public class BoLChunkDataTO { + + /* Database table request_Bol fields BEGIN */ + private long primaryKey = -1; // ID primary key of record in DB + private String fromSURL = " "; + private boolean dirOption; // initialised in constructor + private String normalizedStFN = null; + private Integer surlUniqueID = null; + /* Database table request_Get fields END */ + + private String requestToken = " "; + private int lifetime = 0; + private boolean allLevelRecursive; // initialised in constructor + private int numLevel; // initialised in constructor + private List protocolList = null; // initialised in constructor + private long filesize = 0; + private int status; // initialised in constructor + private String errString = " "; + private int deferredStartTime = -1; + private Timestamp timeStamp = null; + + public BoLChunkDataTO() { + + TURLPrefix protocolPreferences = new TURLPrefix(); + protocolPreferences.addProtocol(Protocol.GSIFTP); + this.protocolList = TransferProtocolListConverter.toDB(protocolPreferences); + this.status = StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_QUEUED); + this.dirOption = false; + this.allLevelRecursive = false; + this.numLevel = 0; + } + + public boolean getAllLevelRecursive() { + + return allLevelRecursive; + } + + public int getDeferredStartTime() { + + return deferredStartTime; + } + + public boolean getDirOption() { + + return dirOption; + } + + public String getErrString() { + + return errString; + } + + public long getFileSize() { + + return filesize; + } + + public String getFromSURL() { + + return fromSURL; + } + + public int getLifeTime() { + + return lifetime; + } + + public int getNumLevel() { + + return numLevel; + } + + public long getPrimaryKey() { + + return primaryKey; + } + + public List getProtocolList() { + + return protocolList; + } + + public String getRequestToken() { + + return requestToken; + } + + public Timestamp getTimeStamp() { + + return timeStamp; + } + + public int getStatus() { + + return status; + } + + public void setAllLevelRecursive(boolean b) { + + allLevelRecursive = b; + } + + public void setDeferredStartTime(int deferredStartTime) { + + this.deferredStartTime = deferredStartTime; + } + + public void setDirOption(boolean b) { + + dirOption = b; + } + + public void setErrString(String s) { + + errString = s; + } + + public void setFileSize(long n) { + + filesize = n; + } + + public void setFromSURL(String s) { + + fromSURL = s; + } + + public void setLifeTime(int n) { + + lifetime = n; + } + + public void setNumLevel(int n) { + + numLevel = n; + } + + public void setPrimaryKey(long n) { + + primaryKey = n; + } + + public void setProtocolList(List l) { + + if ((l != null) && (!l.isEmpty())) { + protocolList = l; + } + } + + public void setRequestToken(String s) { + + requestToken = s; + } + + public void setTimeStamp(Timestamp timeStamp) { + + this.timeStamp = timeStamp; + } + + public void setStatus(int n) { + + status = n; + } + + /** + * @param normalizedStFN the normalizedStFN to set + */ + public void setNormalizedStFN(String normalizedStFN) { + + this.normalizedStFN = normalizedStFN; + } + + /** + * @return the normalizedStFN + */ + public String normalizedStFN() { + + return normalizedStFN; + } + + /** + * @param surlUniqueID the sURLUniqueID to set + */ + public void setSurlUniqueID(Integer surlUniqueID) { + + this.surlUniqueID = surlUniqueID; + } + + /** + * @return the sURLUniqueID + */ + public Integer sulrUniqueID() { + + return surlUniqueID; + } + + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append(primaryKey); + sb.append(" "); + sb.append(requestToken); + sb.append(" "); + sb.append(fromSURL); + sb.append(" "); + sb.append(normalizedStFN); + sb.append(" "); + sb.append(surlUniqueID); + sb.append(" "); + sb.append(lifetime); + sb.append(" "); + sb.append(dirOption); + sb.append(" "); + sb.append(allLevelRecursive); + sb.append(" "); + sb.append(numLevel); + sb.append(" "); + sb.append(protocolList); + sb.append(" "); + sb.append(filesize); + sb.append(" "); + sb.append(status); + sb.append(" "); + sb.append(errString); + return sb.toString(); + } +} diff --git a/src/main/java/it/grid/storm/persistence/model/BoLData.java b/src/main/java/it/grid/storm/persistence/model/BoLData.java new file mode 100644 index 000000000..1ca1fe124 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/BoLData.java @@ -0,0 +1,145 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.common.types.TimeUnit; +import it.grid.storm.persistence.exceptions.InvalidBoLDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.srm.types.TDirOption; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.srm.types.TTURL; + +/** + * This class represents a BringOnLineChunkData, that is part of a multifile BringOnLine srm + * request. It contains data about: the requestToken, the fromSURL, the requested lifeTime of + * pinning, the TDirOption which tells whether the requested SURL is a directory and if it must be + * recursed at all levels, as well as the desired number of levels to recurse, the desired + * transferProtocols in order of preference, the fileSize, and the transferURL for the supplied + * SURL. + * + * @author CNAF + * @version 1.0 + * @date Aug 2009 + */ +public class BoLData extends AnonymousFileTransferData { + + private static final Logger log = LoggerFactory.getLogger(BoLData.class); + + /** + * requested lifetime of TURL: it is the pin time! + */ + private TLifeTimeInSeconds lifeTime; + + /** + * specifies if the request regards a directory and related info + */ + private TDirOption dirOption; + + /** + * size of file + */ + private TSizeInBytes fileSize; + + /** + * how many seconds to wait before to make the lifeTime start consuming + */ + private int deferredStartTime = 0; + + public BoLData(TSURL fromSURL, TLifeTimeInSeconds lifeTime, TDirOption dirOption, + TURLPrefix desiredProtocols, TSizeInBytes fileSize, TReturnStatus status, TTURL transferURL, + int deferredStartTime) throws InvalidFileTransferDataAttributesException, + InvalidBoLDataAttributesException, InvalidSurlRequestDataAttributesException { + + super(fromSURL, desiredProtocols, status, transferURL); + if (lifeTime == null || dirOption == null || fileSize == null) { + throw new InvalidBoLDataAttributesException(fromSURL, lifeTime, dirOption, desiredProtocols, + fileSize, status, transferURL); + } + this.lifeTime = lifeTime; + this.dirOption = dirOption; + this.fileSize = fileSize; + this.deferredStartTime = deferredStartTime; + } + + public int getDeferredStartTime() { + + return deferredStartTime; + } + + /** + * Method that returns the dirOption specified in the srm request. + */ + public TDirOption getDirOption() { + + return dirOption; + } + + /** + * Method that returns the file size for this chunk of the srm request. + */ + public TSizeInBytes getFileSize() { + + return fileSize; + } + + /** + * Method that returns the requested pin life time for this chunk of the srm request. + */ + public TLifeTimeInSeconds getLifeTime() { + + return lifeTime; + } + + public void setDeferredStartTime(int deferredStartTime) { + + this.deferredStartTime = deferredStartTime; + } + + /** + * Method used to set the size of the file corresponding to the requested SURL. If the supplied + * TSizeInByte is null, then nothing gets set! + */ + public void setFileSize(TSizeInBytes size) { + + if (size != null) { + fileSize = size; + } + } + + public void setLifeTime(long lifeTimeInSeconds) { + + TLifeTimeInSeconds lifeTime; + try { + lifeTime = TLifeTimeInSeconds.make(lifeTimeInSeconds, TimeUnit.SECONDS); + } catch (IllegalArgumentException e) { + log.error(e.getMessage(), e); + return; + } + + this.lifeTime = lifeTime; + } + +} diff --git a/src/main/java/it/grid/storm/persistence/model/BoLPersistentChunkData.java b/src/main/java/it/grid/storm/persistence/model/BoLPersistentChunkData.java new file mode 100644 index 000000000..1aaf7f6df --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/BoLPersistentChunkData.java @@ -0,0 +1,109 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.persistence.exceptions.InvalidBoLDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidBoLPersistentChunkDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.srm.types.TDirOption; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.srm.types.TTURL; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This class represents a BringOnLineChunkData, that is part of a multifile BringOnLine srm + * request. It contains data about: the requestToken, the fromSURL, the requested lifeTime of + * pinning, the TDirOption which tells whether the requested SURL is a directory and if it must be + * recursed at all levels, as well as the desired number of levels to recurse, the desired + * transferProtocols in order of preference, the fileSize, and the transferURL for the supplied + * SURL. + * + * @author CNAF + * @version 1.0 + * @date Aug 2009 + */ +public class BoLPersistentChunkData extends BoLData implements PersistentChunkData { + + private static final Logger log = LoggerFactory.getLogger(BoLPersistentChunkData.class); + + /** + * long representing the primary key for the persistence layer, in the status_Put table + */ + private long primaryKey = -1; + + /** + * This is the requestToken of the multifile srm request to which this chunk belongs + */ + private final TRequestToken requestToken; + + public BoLPersistentChunkData(TRequestToken requestToken, TSURL fromSURL, + TLifeTimeInSeconds lifeTime, TDirOption dirOption, TURLPrefix desiredProtocols, + TSizeInBytes fileSize, TReturnStatus status, TTURL transferURL, int deferredStartTime) + throws InvalidBoLPersistentChunkDataAttributesException, + InvalidFileTransferDataAttributesException, InvalidBoLDataAttributesException, + InvalidSurlRequestDataAttributesException { + + super(fromSURL, lifeTime, dirOption, desiredProtocols, fileSize, status, transferURL, + deferredStartTime); + if (requestToken == null) { + log.debug("BoLPersistentChunkData: requestToken is null!"); + throw new InvalidBoLPersistentChunkDataAttributesException(requestToken, fromSURL, lifeTime, + dirOption, desiredProtocols, fileSize, status, transferURL); + } + this.requestToken = requestToken; + } + + /** + * Method that returns the requestToken of the srm request to which this chunk belongs. + */ + public TRequestToken getRequestToken() { + + return requestToken; + } + + /** + * Method used to get the primary key used in the persistence layer! + */ + @Override + public long getPrimaryKey() { + + return primaryKey; + } + + /** + * Method used to set the primary key to be used in the persistence layer! + */ + public void setPrimaryKey(long l) { + + primaryKey = l; + } + + @Override + public long getIdentifier() { + + return getPrimaryKey(); + } +} diff --git a/src/main/java/it/grid/storm/persistence/model/ChunkData.java b/src/main/java/it/grid/storm/persistence/model/ChunkData.java new file mode 100644 index 000000000..fc684290e --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/ChunkData.java @@ -0,0 +1,10 @@ +package it.grid.storm.persistence.model; + +public interface ChunkData extends RequestData { + + /** + * Method that returns the primary key in persistence, associated with This Chunk. + */ + public long getIdentifier(); + +} diff --git a/src/main/java/it/grid/storm/persistence/model/FileTransferData.java b/src/main/java/it/grid/storm/persistence/model/FileTransferData.java new file mode 100644 index 000000000..c343f923c --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/FileTransferData.java @@ -0,0 +1,25 @@ +package it.grid.storm.persistence.model; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.srm.types.TTURL; + +public interface FileTransferData extends SynchMultyOperationRequestData { + + /** + * Method that returns a TURLPrefix containing the transfer protocols desired for this chunk of + * the srm request. + */ + public TURLPrefix getTransferProtocols(); + + /** + * Method that returns the TURL for this chunk of the srm request. + */ + public TTURL getTransferURL(); + + /** + * Method used to set the transferURL associated to the SURL of this chunk. If TTURL is null, then + * nothing gets set! + */ + public void setTransferURL(final TTURL turl); + +} diff --git a/src/main/java/it/grid/storm/persistence/model/GUID.java b/src/main/java/it/grid/storm/persistence/model/GUID.java deleted file mode 100644 index e316630d5..000000000 --- a/src/main/java/it/grid/storm/persistence/model/GUID.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.model; - -import java.io.Serializable; - -import java.net.InetAddress; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -//FIXME: Why isn't storm using the standard UUID class? - - -/** - * GUID Value Object. - *

- * Used to retain/generate a GUID/UUID. - *

- */ - -public class GUID implements Serializable { - - private static final long serialVersionUID = 7241176020077117264L; - - private static final Logger log = LoggerFactory.getLogger(GUID.class); - - private byte guidValue[] = new byte[16]; - - public GUID() { - buildNewGUID(); - } - - public GUID(String guidString) { - - int pos = 0; - int count = 0; - - while (pos < guidString.length()) { - guidValue[count] = getByteValue(guidString.substring(pos, pos + 2)); - pos += 2; - count++; - - if (pos == guidString.length()) { - continue; - } - - if (guidString.charAt(pos) == '-') { - pos++; - } - } - } - - - private byte getByteValue(String hex) { - - return (byte) Integer.parseInt(hex, 16); - } - - private String getHexString(byte val) { - - String hexString; - if (val < 0) { - hexString = Integer.toHexString(val + 256); - } else { - hexString = Integer.toHexString(val); - } - - if (hexString.length() < 2) { - return "0" + hexString.toUpperCase(); - } - return hexString.toUpperCase(); - } - - private void setByteValues(byte[] lg, int startPos, int count) { - - for (int i = 0; i < count; i++) { - guidValue[i + startPos] = lg[i]; - } - } - - private void setByteValues(long lg, int startPos, int count) { - - for (int i = 0; i < count; i++) { - guidValue[i + startPos] = (byte) (lg & 0xFF); - lg = lg / 0xFF; - } - } - - private void buildNewGUID() { - - try { - // The time in milli seconds for six bytes - // gives us until the year 10000ish. - long lg = System.currentTimeMillis(); - setByteValues(lg, 0, 6); - - // The hash code for this object for two bytes (As a why not option?) - lg = this.hashCode(); - setByteValues(lg, 6, 2); - - // The ip address for this computer (as we cannot get to the MAC address) - InetAddress inet = InetAddress.getLocalHost(); - byte[] bytes = inet.getAddress(); - setByteValues(bytes, 8, 4); - - // A random number for two bytes - lg = (long) ((Math.random() * 0xFFFF)); - setByteValues(lg, 12, 2); - - // Another random number for two bytes - lg = (long) ((Math.random() * 0xFFFF)); - setByteValues(lg, 14, 2); - - } catch (Exception e) { - log.error("GUID generation error : {}", e.getMessage(), e); - } - } - - public byte[] getBytes() { - - return guidValue; - } - - /** - * Overrides toString(). Returns the array of bytes in the standard form: - * xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - * - * @return the string format - */ - @Override - public String toString() { - - StringBuilder buf = new StringBuilder(); - - buf.append(getHexString(guidValue[0])); - buf.append(getHexString(guidValue[1])); - buf.append(getHexString(guidValue[2])); - buf.append(getHexString(guidValue[3])); - buf.append('-'); - buf.append(getHexString(guidValue[4])); - buf.append(getHexString(guidValue[5])); - buf.append('-'); - buf.append(getHexString(guidValue[6])); - buf.append(getHexString(guidValue[7])); - buf.append('-'); - buf.append(getHexString(guidValue[8])); - buf.append(getHexString(guidValue[9])); - buf.append('-'); - buf.append(getHexString(guidValue[10])); - buf.append(getHexString(guidValue[11])); - buf.append(getHexString(guidValue[12])); - buf.append(getHexString(guidValue[13])); - buf.append(getHexString(guidValue[14])); - buf.append(getHexString(guidValue[15])); - - return buf.toString(); - } -} diff --git a/src/main/java/it/grid/storm/persistence/model/IdentityPtGData.java b/src/main/java/it/grid/storm/persistence/model/IdentityPtGData.java new file mode 100644 index 000000000..e0442edb6 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/IdentityPtGData.java @@ -0,0 +1,73 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtGDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.srm.types.TDirOption; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.srm.types.TTURL; +import it.grid.storm.synchcall.data.IdentityInputData; + +public class IdentityPtGData extends AnonymousPtGData implements IdentityInputData { + + private final GridUserInterface auth; + + /** + * @param requestToken + * @param fromSURL + * @param lifeTime + * @param dirOption + * @param desiredProtocols + * @param fileSize + * @param status + * @param transferURL + * @throws InvalidPtGDataAttributesException + */ + public IdentityPtGData(GridUserInterface auth, TSURL SURL, TLifeTimeInSeconds lifeTime, + TDirOption dirOption, TURLPrefix desiredProtocols, TSizeInBytes fileSize, + TReturnStatus status, TTURL transferURL) + throws InvalidPtGDataAttributesException, InvalidFileTransferDataAttributesException, + InvalidSurlRequestDataAttributesException, IllegalArgumentException { + + super(SURL, lifeTime, dirOption, desiredProtocols, fileSize, status, transferURL); + if (auth == null) { + throw new IllegalArgumentException( + "Unable to create the object, invalid arguments: auth=" + auth); + } + this.auth = auth; + } + + @Override + public GridUserInterface getUser() { + + return auth; + } + + @Override + public String getPrincipal() { + + return this.auth.getDn(); + } +} diff --git a/src/main/java/it/grid/storm/persistence/model/IdentityPtPData.java b/src/main/java/it/grid/storm/persistence/model/IdentityPtPData.java new file mode 100644 index 000000000..81a4906e4 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/IdentityPtPData.java @@ -0,0 +1,69 @@ +/** + * + */ +package it.grid.storm.persistence.model; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtGDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtPDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.srm.types.TFileStorageType; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.srm.types.TOverwriteMode; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.srm.types.TSpaceToken; +import it.grid.storm.srm.types.TTURL; +import it.grid.storm.synchcall.data.IdentityInputData; + +/** + * @author Michele Dibenedetto + * + */ +public class IdentityPtPData extends AnonymousPtPData implements IdentityInputData { + + private final GridUserInterface auth; + + /** + * @param requestToken + * @param fromSURL + * @param lifeTime + * @param dirOption + * @param desiredProtocols + * @param fileSize + * @param status + * @param transferURL + * @throws InvalidPtGDataAttributesException + */ + public IdentityPtPData(GridUserInterface auth, TSURL SURL, TLifeTimeInSeconds pinLifetime, + TLifeTimeInSeconds fileLifetime, TFileStorageType fileStorageType, TSpaceToken spaceToken, + TSizeInBytes expectedFileSize, TURLPrefix transferProtocols, TOverwriteMode overwriteOption, + TReturnStatus status, TTURL transferURL) + throws InvalidPtPDataAttributesException, InvalidFileTransferDataAttributesException, + InvalidSurlRequestDataAttributesException, IllegalArgumentException { + + super(SURL, pinLifetime, fileLifetime, fileStorageType, spaceToken, expectedFileSize, + transferProtocols, overwriteOption, status, transferURL); + if (auth == null) { + throw new IllegalArgumentException( + "Unable to create the object, invalid arguments: auth=" + auth); + } + this.auth = auth; + } + + @Override + public GridUserInterface getUser() { + + return auth; + } + + @Override + public String getPrincipal() { + + return this.auth.getDn(); + } + +} diff --git a/src/main/java/it/grid/storm/persistence/model/InvalidPtGChunkDataAttributesException.java b/src/main/java/it/grid/storm/persistence/model/InvalidPtGChunkDataAttributesException.java deleted file mode 100644 index db429957e..000000000 --- a/src/main/java/it/grid/storm/persistence/model/InvalidPtGChunkDataAttributesException.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.model; - -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TStorageSystemInfo; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TSpaceToken; -import it.grid.storm.srm.types.TDirOption; -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TTURL; - -/** - * This class represents an exceptin thrown when the attributes supplied to the - * constructor of PtGChunkData are invalid, that is if any of the following is - * _null_: requestToken, fromSURL, storageSystemInfo, lifeTime, fileStorageType, - * spaceToken, numOfLevels, TURLPrefix transferProtocols, fileSize, status, - * estimatedWaitTimeOnQueue, estimatedProcessingTime, transferURL, - * remainingPinTime. - * - * @author EGRID - ICTP Trieste - * @date March 23rd, 2005 - * @version 2.0 - */ -public class InvalidPtGChunkDataAttributesException extends Exception { - - // booleans that indicate whether the corresponding variable is null - private boolean nullRequestToken; - private boolean nullFromSURL; - private boolean nullStorageSystemInfo; - private boolean nullLifeTime; - private boolean nullFileStorageType; - private boolean nullSpaceToken; - private boolean nullDirOption; - private boolean nullTransferProtocols; - private boolean nullFileSize; - private boolean nullStatus; - private boolean nullEstimatedWaitTimeOnQueue; - private boolean nullEstimatedProcessingTime; - private boolean nullTransferURL; - private boolean nullRemainingPinTime; - - /** - * Constructor that requires the attributes that caused the exception to be - * thrown. - */ - public InvalidPtGChunkDataAttributesException(TRequestToken requestToken, - TSURL fromSURL, TStorageSystemInfo storageSystemInfo, - TLifeTimeInSeconds lifeTime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TDirOption dirOption, TURLPrefix transferProtocols, - TSizeInBytes fileSize, TReturnStatus status, - TLifeTimeInSeconds estimatedWaitTimeOnQueue, - TLifeTimeInSeconds estimatedProcessingTime, TTURL transferURL, - TLifeTimeInSeconds remainingPinTime) { - - nullRequestToken = requestToken == null; - nullFromSURL = fromSURL == null; - nullStorageSystemInfo = storageSystemInfo == null; - nullLifeTime = lifeTime == null; - nullFileStorageType = fileStorageType == null; - nullSpaceToken = spaceToken == null; - nullDirOption = dirOption == null; - nullTransferProtocols = transferProtocols == null; - nullFileSize = fileSize == null; - nullStatus = status == null; - nullEstimatedWaitTimeOnQueue = estimatedWaitTimeOnQueue == null; - nullEstimatedProcessingTime = estimatedProcessingTime == null; - nullTransferURL = transferURL == null; - nullRemainingPinTime = remainingPinTime == null; - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("Invalid PtGChunkData attributes: null-requestToken="); - sb.append(nullRequestToken); - sb.append("; nul-fromSURL="); - sb.append(nullFromSURL); - sb.append("; null-storageSystemInfo="); - sb.append(nullStorageSystemInfo); - sb.append("; null-lifeTime="); - sb.append(nullLifeTime); - sb.append("; null-filestorageType="); - sb.append(nullFileStorageType); - sb.append("; null-spaceToken="); - sb.append(nullSpaceToken); - sb.append("; null-dirOption="); - sb.append(nullDirOption); - sb.append("; null-transferProtocols="); - sb.append(nullTransferProtocols); - sb.append("; null-fileSize="); - sb.append(nullFileSize); - sb.append("; null-status="); - sb.append(nullStatus); - sb.append("; null-estimatedWaitTimeOnQueue="); - sb.append(nullEstimatedWaitTimeOnQueue); - sb.append("; null-estimatedProcessingTime="); - sb.append(nullEstimatedProcessingTime); - sb.append("; null-transferURL="); - sb.append(nullTransferURL); - sb.append("; null-remainingPinTime="); - sb.append(nullRemainingPinTime); - sb.append("."); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/persistence/model/InvalidRequestSummaryDataAttributesException.java b/src/main/java/it/grid/storm/persistence/model/InvalidRequestSummaryDataAttributesException.java deleted file mode 100644 index b2c54f9f2..000000000 --- a/src/main/java/it/grid/storm/persistence/model/InvalidRequestSummaryDataAttributesException.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.model; - -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TRequestType; - -/** - * This class represents an Exception thrown when a RequestSummaryData object is created with any - * invalid attributes: null TRequestToken, null TRequestType, totalFilesInThisRequest<0, - * numOfQueuedRequests<0, numOfProgessingRequests<0, numFinished<0. - * - * @author EGRID - ICTP Trieste - * @date March 18th, 2005 - * @version 2.0 - */ -public class InvalidRequestSummaryDataAttributesException extends Exception { - - /** - * - */ - private static final long serialVersionUID = 1L; - - private final boolean nullRequestToken; - private final boolean nullRequestType; - private final boolean negTotalFilesInThisRequest; - private final boolean negNumOfQueuedRequests; - private final boolean negNumOfProgressingRequests; - private final boolean negNumFinished; - - /** - * Constructor that requires the attributes that caused the exception to be thrown. - */ - public InvalidRequestSummaryDataAttributesException(TRequestToken requestToken, - TRequestType requestType, int totalFilesInThisRequest, int numOfQueuedRequests, - int numOfProgressingRequests, int numFinished) { - - nullRequestToken = (requestToken == null); - nullRequestType = (requestType == null); - negTotalFilesInThisRequest = (totalFilesInThisRequest < 0); - negNumOfQueuedRequests = (numOfQueuedRequests < 0); - negNumOfProgressingRequests = (numOfProgressingRequests < 0); - negNumFinished = (numFinished < 0); - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("Invalid RequestSummaryData attributes exception: "); - sb.append("null-requestToken="); - sb.append(nullRequestToken); - sb.append("; null-requestType="); - sb.append(nullRequestType); - sb.append("; negative-totalFilesInThisRequest="); - sb.append(negTotalFilesInThisRequest); - sb.append("; negative-numOfQueuedRequests="); - sb.append(negNumOfQueuedRequests); - sb.append("; negative-numOfProgressingRequests="); - sb.append(negNumOfProgressingRequests); - sb.append("; negative-numFinished="); - sb.append(negNumFinished); - sb.append("."); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/persistence/model/JiTData.java b/src/main/java/it/grid/storm/persistence/model/JiTData.java new file mode 100644 index 000000000..83403028f --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/JiTData.java @@ -0,0 +1,71 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +/** + * Class that represents data associated to JiT entries. It contains a String representing the file, + * an int representing the ACL, an int representing the user UID, an int representing the user GID. + * + * @author EGRID - ICTP Trieste + * @version 1.0 + * @date November 2006 + */ +public class JiTData { + + private String file = ""; + private int uid = -1; + private int gid = -1; + private int acl = -1; + + /** + * Constructor requiring the complete name of the file as String, the acl as int, the uid and + * primary gid of the LocalUser bith as int. + */ + public JiTData(String file, int acl, int uid, int gid) { + + this.file = file; + this.acl = acl; + this.uid = uid; + this.gid = gid; + } + + public String pfn() { + + return file; + } + + public int acl() { + + return acl; + } + + public int uid() { + + return uid; + } + + public int gid() { + + return gid; + } + + public String toString() { + + return "file=" + file + " acl=" + acl + " uid=" + uid + " gid=" + gid; + } +} diff --git a/src/main/java/it/grid/storm/persistence/model/PersistentChunkData.java b/src/main/java/it/grid/storm/persistence/model/PersistentChunkData.java new file mode 100644 index 000000000..16f93a792 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/PersistentChunkData.java @@ -0,0 +1,10 @@ +package it.grid.storm.persistence.model; + +public interface PersistentChunkData extends ChunkData { + + /** + * Method that returns the primary key in persistence, associated with This Chunk. + */ + public long getPrimaryKey(); + +} diff --git a/src/main/java/it/grid/storm/persistence/model/PtGChunkDataTO.java b/src/main/java/it/grid/storm/persistence/model/PtGChunkDataTO.java new file mode 100644 index 000000000..749e70dcc --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/PtGChunkDataTO.java @@ -0,0 +1,312 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import it.grid.storm.srm.types.TStatusCode; +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.namespace.model.Protocol; + +import java.sql.Timestamp; +import java.util.List; + +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.converter.TransferProtocolListConverter; + +/** + * Class that represents a row in the Persistence Layer: this is all raw data referring to the + * PtGChunkData proper, that is, String and primitive types. + * + * Each field is initialized with default values as per SRM 2.2 specification: protocolList GSIFTP + * dirOption false status SRM_REQUEST_QUEUED + * + * All other fields are 0 if int, or a white space if String. + * + * @author EGRID ICTP + * @version 3.0 + * @date June 2005 + */ +public class PtGChunkDataTO { + + private static final String FQAN_SEPARATOR = "#"; + /* Database table request_Get fields BEGIN */ + private long primaryKey = -1; // ID primary key of record in DB + private boolean dirOption; // initialised in constructor + private String fromSURL = " "; + private String normalizedStFN = null; + private Integer surlUniqueID = null; + /* Database table request_Get fields END */ + + private String requestToken = " "; + private int lifetime = 0; + private boolean allLevelRecursive; // initialised in constructor + private int numLevel; // initialised in constructor + private List protocolList = null; // initialised in constructor + private long filesize = 0; + private int status; // initialised in constructor + private String errString = " "; + private String turl = " "; + private Timestamp timeStamp; + private String clientDN = null; + private String vomsAttributes = null; + + public PtGChunkDataTO() { + + TURLPrefix protocolPreferences = new TURLPrefix(); + protocolPreferences.addProtocol(Protocol.GSIFTP); + this.protocolList = TransferProtocolListConverter.toDB(protocolPreferences); + this.status = StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_QUEUED); + this.dirOption = false; + // + this.allLevelRecursive = false; + this.numLevel = 0; + } + + public long primaryKey() { + + return primaryKey; + } + + public void setPrimaryKey(long n) { + + primaryKey = n; + } + + public String requestToken() { + + return requestToken; + } + + public void setRequestToken(String s) { + + requestToken = s; + } + + public Timestamp timeStamp() { + + return timeStamp; + } + + public void setTimeStamp(Timestamp timeStamp) { + + this.timeStamp = timeStamp; + } + + public String fromSURL() { + + return fromSURL; + } + + public void setFromSURL(String s) { + + fromSURL = s; + } + + /** + * @param normalizedStFN the normalizedStFN to set + */ + public void setNormalizedStFN(String normalizedStFN) { + + this.normalizedStFN = normalizedStFN; + } + + /** + * @return the normalizedStFN + */ + public String normalizedStFN() { + + return normalizedStFN; + } + + /** + * @param sURLUniqueID the sURLUniqueID to set + */ + public void setSurlUniqueID(Integer sURLUniqueID) { + + this.surlUniqueID = sURLUniqueID; + } + + /** + * @return the sURLUniqueID + */ + public Integer surlUniqueID() { + + return surlUniqueID; + } + + public int lifeTime() { + + return lifetime; + } + + public void setLifeTime(int n) { + + lifetime = n; + } + + public boolean dirOption() { + + return dirOption; + } + + public void setDirOption(boolean b) { + + dirOption = b; + } + + public boolean allLevelRecursive() { + + return allLevelRecursive; + } + + public void setAllLevelRecursive(boolean b) { + + allLevelRecursive = b; + } + + public int numLevel() { + + return numLevel; + } + + public void setNumLevel(int n) { + + numLevel = n; + } + + public List protocolList() { + + return protocolList; + } + + public void setProtocolList(List l) { + + if ((l != null) && (!l.isEmpty())) + protocolList = l; + } + + public long fileSize() { + + return filesize; + } + + public void setFileSize(long n) { + + filesize = n; + } + + public int status() { + + return status; + } + + public void setStatus(int n) { + + status = n; + } + + public String errString() { + + return errString; + } + + public void setErrString(String s) { + + errString = s; + } + + public String turl() { + + return turl; + } + + public void setTurl(String s) { + + turl = s; + } + + public String clientDN() { + + return clientDN; + } + + public void setClientDN(String s) { + + clientDN = s; + } + + public String vomsAttributes() { + + return vomsAttributes; + } + + public void setVomsAttributes(String s) { + + vomsAttributes = s; + } + + public void setVomsAttributes(String[] fqaNsAsString) { + + vomsAttributes = ""; + for (int i = 0; i < fqaNsAsString.length; i++) { + vomsAttributes += fqaNsAsString[i]; + if (i < fqaNsAsString.length - 1) { + vomsAttributes += FQAN_SEPARATOR; + } + } + + } + + public String[] vomsAttributesArray() { + + return vomsAttributes.split(FQAN_SEPARATOR); + } + + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append(primaryKey); + sb.append(" "); + sb.append(requestToken); + sb.append(" "); + sb.append(fromSURL); + sb.append(" "); + sb.append(normalizedStFN); + sb.append(" "); + sb.append(surlUniqueID); + sb.append(" "); + sb.append(lifetime); + sb.append(" "); + sb.append(dirOption); + sb.append(" "); + sb.append(allLevelRecursive); + sb.append(" "); + sb.append(numLevel); + sb.append(" "); + sb.append(protocolList); + sb.append(" "); + sb.append(filesize); + sb.append(" "); + sb.append(status); + sb.append(" "); + sb.append(errString); + sb.append(" "); + sb.append(turl); + return sb.toString(); + } + +} diff --git a/src/main/java/it/grid/storm/persistence/model/PtGChunkTO.java b/src/main/java/it/grid/storm/persistence/model/PtGChunkTO.java deleted file mode 100644 index eed4500da..000000000 --- a/src/main/java/it/grid/storm/persistence/model/PtGChunkTO.java +++ /dev/null @@ -1,508 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.model; - -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.srm.types.TDirOption; -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TSpaceToken; -import it.grid.storm.srm.types.TStatusCode; -import it.grid.storm.srm.types.TStorageSystemInfo; -import it.grid.storm.srm.types.TTURL; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class represents a PrepareToGetChunkData, that is part of a multifile - * PrepareToGet srm request. It contains data about: the requestToken, the - * fromSURL and the storageSystemInfo for that SURL, the requested lifeTime of - * pinning, the requested fileStorageType and any available spaceToken, the - * TDirOption which explains whether the requested SURL is a directory and if it - * must be recursed at all levels, as well as the desired number of levels to - * recurse, the desired transferProtocols in order of preference, the fileSize, - * the estimatedTimeOnQueue, the estimatedProcessingTime, the transferURL for - * the supplied SURL, and the remainingPinTime. - * - * @author EGRID - ICTP Trieste - * @date March 21st, 2005 - * @version 2.0 - */ -public class PtGChunkTO { - - private static final Logger log = LoggerFactory.getLogger(PtGChunkTO.class); - - private TRequestToken requestToken; - - private TSURL fromSURL; - private TStorageSystemInfo storageSystemInfo; - - private TLifeTimeInSeconds lifeTime; // requested lifetime for fromSURL - - // BEWARE!!! It is the pin time!!! - private TFileStorageType fileStorageType; // TFileStorageType requested for - // specific fromSURL to get - private TSpaceToken spaceToken; // SpaceToken to use for fromSURL - private TDirOption dirOption; // specifies if the request regards a directory - // and related info - - private TURLPrefix transferProtocols; // list of desired transport protocols - // for fromSURL - - private TSizeInBytes fileSize; // size of file - private TReturnStatus status; // return status for this chunk of request - private TLifeTimeInSeconds estimatedWaitTimeOnQueue; // estimated time this - // chunk will remain in - // queue - private TLifeTimeInSeconds estimatedProcessingTime; // estimated time this - // chunk will take to be - // processed - private TTURL transferURL; // TURL for picking up the requested file - private TLifeTimeInSeconds remainingPinTime; // estimated time remaining for - // Pin validity - - public PtGChunkTO(TRequestToken requestToken, TSURL fromSURL, - TStorageSystemInfo storageSystemInfo, TLifeTimeInSeconds lifeTime, - TFileStorageType fileStorageType, TSpaceToken spaceToken, - TDirOption dirOption, TURLPrefix transferProtocols, TSizeInBytes fileSize, - TReturnStatus status, TLifeTimeInSeconds estimatedWaitTimeOnQueue, - TLifeTimeInSeconds estimatedProcessingTime, TTURL transferURL, - TLifeTimeInSeconds remainingPinTime) - throws InvalidPtGChunkDataAttributesException { - - boolean ok = requestToken != null && fromSURL != null - && storageSystemInfo != null && lifeTime != null - && fileStorageType != null && spaceToken != null && dirOption != null - && transferProtocols != null && fileSize != null && status != null - && estimatedWaitTimeOnQueue != null && estimatedProcessingTime != null - && transferURL != null && remainingPinTime != null; - - if (!ok) { - throw new InvalidPtGChunkDataAttributesException(requestToken, fromSURL, - storageSystemInfo, lifeTime, fileStorageType, spaceToken, dirOption, - transferProtocols, fileSize, status, estimatedWaitTimeOnQueue, - estimatedProcessingTime, transferURL, remainingPinTime); - } - this.requestToken = requestToken; - this.fromSURL = fromSURL; - this.storageSystemInfo = storageSystemInfo; - this.lifeTime = lifeTime; - this.fileStorageType = fileStorageType; - this.spaceToken = spaceToken; - this.dirOption = dirOption; - this.transferProtocols = transferProtocols; - this.fileSize = fileSize; - this.status = status; - this.estimatedWaitTimeOnQueue = estimatedWaitTimeOnQueue; - this.estimatedProcessingTime = estimatedProcessingTime; - this.transferURL = transferURL; - } - - /** - * Method that returns the requestToken of the srm request to which this chunk - * belongs. - */ - public TRequestToken requestToken() { - - return requestToken; - } - - /** - * Method that returns the fromSURL of the srm request to which this chunk - * belongs. - */ - public TSURL fromSURL() { - - return fromSURL; - } - - /** - * Method that returns the storageSystemInfo of the srm request to which this - * chunk belongs - */ - public TStorageSystemInfo storageSystemInfo() { - - return storageSystemInfo; - } - - /** - * Method that returns the requested pin life time for this chunk of the srm - * request. - */ - public TLifeTimeInSeconds lifeTime() { - - return lifeTime; - } - - /** - * Method that returns the filerequested pin life time for this chunk of the - * srm request. - */ - public TFileStorageType fileStorageType() { - - return fileStorageType; - } - - /** - * Method that returns the space token supplied for this chunk of the srm - * request. - */ - public TSpaceToken spaceToken() { - - return spaceToken; - } - - /** - * Method that returns the dirOption specified in the srm request. - */ - public TDirOption dirOption() { - - return dirOption; - } - - /** - * Method that returns a TURLPrefix containing the transfer protocols desired - * for this chunk of the srm request. - */ - public TURLPrefix transferProtocols() { - - return transferProtocols; - } - - /** - * Method that returns the status for this chunk of the srm request. - */ - public TReturnStatus status() { - - return status; - } - - /** - * Method that returns the file size for this chunk of the srm request. - */ - public TSizeInBytes fileSize() { - - return fileSize; - } - - /** - * Method that returns the estimated time in queue for this chunk of the srm - * request. - */ - public TLifeTimeInSeconds estimatedWaitTimeOnQueue() { - - return estimatedWaitTimeOnQueue; - } - - /** - * Method that returns the estimated processing time for this chunk of the srm - * request. - */ - public TLifeTimeInSeconds estimatedProcessingTime() { - - return estimatedProcessingTime; - } - - /** - * Method that returns the TURL for this chunk of the srm request. - */ - public TTURL transferURL() { - - return transferURL; - } - - /** - * Method that returns the estimated remaining pin time for this chunk of the - * srm request. - */ - public TLifeTimeInSeconds remainingPinTime() { - - return remainingPinTime; - } - - /** - * Method that sets the status of this request to SRM_REQUEST_QUEUED; it needs - * the explanation String which describes the situation in greater detail; if - * a null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_REQUEST_QUEUED(String explanation) { - - if (explanation == null) { - explanation = ""; - } - status = new TReturnStatus(TStatusCode.SRM_REQUEST_QUEUED, explanation); - } - - /** - * Method that sets the status of this request to SRM_DONE; it needs the - * explanation String which describes the situation in greater detail; if a - * null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_DONE(String explanation) { - - if (explanation == null) { - explanation = ""; - } - status = new TReturnStatus(TStatusCode.SRM_DONE, explanation); - } - - /** - * Method that sets the status of this request to SRM_INVALID_REQUEST; it - * needs the explanation String which describes the situation in greater - * detail; if a null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_INVALID_REQUEST(String explanation) { - - if (explanation == null) { - explanation = ""; - } - status = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, explanation); - } - - /** - * Method that sets the status of this request to SRM_AUTHORIZATION_FAILURE; - * it needs the explanation String which describes the situation in greater - * detail; if a null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_AUTHORIZATION_FAILURE(String explanation) { - - if (explanation == null) { - explanation = ""; - } - status = new TReturnStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, - explanation); - } - - /** - * Method that sets the status of this request to SRM_ABORTED; it needs the - * explanation String which describes the situation in greater detail; if a - * null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_ABORTED(String explanation) { - - if (explanation == null) { - explanation = ""; - } - status = new TReturnStatus(TStatusCode.SRM_ABORTED, explanation); - } - - /** - * Method that sets the status of this request to SRM_REQUEST_INPROGRESS; it - * needs the explanation String which describes the situation in greater - * detail; if a null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_REQUEST_INPROGRESS(String explanation) { - - if (explanation == null) { - explanation = ""; - } - status = new TReturnStatus(TStatusCode.SRM_REQUEST_INPROGRESS, explanation); - } - - /** - * Method that sets the status of this request to SRM_INTERNAL_ERROR; it needs - * the explanation String which describes the situation in greater detail; if - * a null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_INTERNAL_ERROR(String explanation) { - - if (explanation == null) { - explanation = ""; - } - status = new TReturnStatus(TStatusCode.SRM_INTERNAL_ERROR, explanation); - } - - /** - * Method that sets the status of this request to SRM_FATAL_INTERNAL_ERROR; it - * needs the explanation String which describes the situation in greater - * detail; if a null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_FATAL_INTERNAL_ERROR(String explanation) { - - if (explanation == null) { - explanation = ""; - } - status = new TReturnStatus(TStatusCode.SRM_FATAL_INTERNAL_ERROR, - explanation); - } - - /** - * Method that sets the status of this request to SRM_INVALID_PATH; it needs - * the explanation String which describes the situation in greater detail; if - * a null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_INVALID_PATH(String explanation) { - - if (explanation == null) { - explanation = ""; - } - status = new TReturnStatus(TStatusCode.SRM_INVALID_PATH, explanation); - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("PtGChunkData\n"); - sb.append("RequestToken="); - sb.append(requestToken); - sb.append("; "); - sb.append("fromSURL="); - sb.append(fromSURL); - sb.append("; "); - sb.append("storageSystemInfo="); - sb.append(storageSystemInfo); - sb.append("; "); - sb.append("lifeTime="); - sb.append(lifeTime); - sb.append("; "); - sb.append("fileStorageType="); - sb.append(fileStorageType); - sb.append("; "); - sb.append("spaceToken"); - sb.append(spaceToken); - sb.append("; "); - sb.append("dirOption="); - sb.append(dirOption); - sb.append("; "); - sb.append("transferProtocols="); - sb.append(transferProtocols); - sb.append("; "); - sb.append("fileSize="); - sb.append(fileSize); - sb.append("; "); - sb.append("status="); - sb.append(status); - sb.append("; "); - sb.append("estimatedWaitTimeOnQueue="); - sb.append(estimatedWaitTimeOnQueue); - sb.append("; "); - sb.append("estimatedProcessingTime="); - sb.append(estimatedProcessingTime); - sb.append("; "); - sb.append("transferURL="); - sb.append(transferURL); - sb.append("; "); - sb.append("remainingPinTime="); - sb.append(remainingPinTime); - sb.append("."); - return sb.toString(); - } - - @Override - public int hashCode() { - - int hash = 17; - hash = 37 * hash + requestToken.hashCode(); - hash = 37 * hash + fromSURL.hashCode(); - hash = 37 * hash + storageSystemInfo.hashCode(); - hash = 37 * hash + lifeTime.hashCode(); - hash = 37 * hash + fileStorageType.hashCode(); - hash = 37 * hash + spaceToken.hashCode(); - hash = 37 * hash + dirOption.hashCode(); - hash = 37 * hash + transferProtocols.hashCode(); - hash = 37 * hash + fileSize.hashCode(); - hash = 37 * hash + status.hashCode(); - hash = 37 * hash + estimatedWaitTimeOnQueue.hashCode(); - hash = 37 * hash + estimatedProcessingTime.hashCode(); - hash = 37 * hash + transferURL.hashCode(); - hash = 37 * hash + remainingPinTime.hashCode(); - return hash; - } - - @Override - public boolean equals(Object o) { - - if (o == this) { - return true; - } - if (!(o instanceof PtGChunkTO)) { - return false; - } - PtGChunkTO cd = (PtGChunkTO) o; - return requestToken.equals(cd.requestToken) && fromSURL.equals(cd.fromSURL) - && storageSystemInfo.equals(cd.storageSystemInfo) - && lifeTime.equals(cd.lifeTime) - && fileStorageType.equals(cd.fileStorageType) - && spaceToken.equals(cd.spaceToken) && dirOption.equals(cd.dirOption) - && transferProtocols.equals(cd.transferProtocols) - && fileSize.equals(cd.fileSize) && status.equals(cd.status) - && estimatedWaitTimeOnQueue.equals(cd.estimatedWaitTimeOnQueue) - && estimatedProcessingTime.equals(cd.estimatedProcessingTime) - && transferURL.equals(cd.transferURL) - && remainingPinTime.equals(cd.remainingPinTime); - } - - /** - * Method used to set the size of the file corresponding to the requested - * SURL. If the supplied TSizeInByte is null, the nothing gets set! - */ - public TSizeInBytes setFileSize(final TSizeInBytes size) { - - if (size != null) { - fileSize = size; - } - return null; - }; - - /** - * Method used to set the estimated time that the chunk will spend on the - * queue. If the supplied TLifeTimeInSeconds is null, then nothing gets set! - */ - public void setEstimatedWaitTimeOnQueue(final TLifeTimeInSeconds time) { - - if (time != null) { - estimatedWaitTimeOnQueue = time; - } - }; - - /** - * Method used to set the estimated time the processing will take. If the - * supplied TLifeTimeInSeconds is null, then nothing gets set! - */ - public void setEstimatedProcessingTime(final TLifeTimeInSeconds time) { - - if (time != null) { - estimatedProcessingTime = time; - } - }; - - /** - * Method used to set the transferURL associated to the SURL of this chunk. If - * TTURL is null, then nothing gets set! - */ - public void setTransferURL(final TTURL turl) { - - if (turl != null) { - transferURL = turl; - } - }; - - /** - * Method used in the mechanism for suspending and resuming a request. To be - * implemented! For now it always returns 0. - */ - public int getProgressCounter() { - - return 0; - }; -} diff --git a/src/main/java/it/grid/storm/persistence/model/PtGData.java b/src/main/java/it/grid/storm/persistence/model/PtGData.java new file mode 100644 index 000000000..499508bc1 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/PtGData.java @@ -0,0 +1,37 @@ +package it.grid.storm.persistence.model; + +import it.grid.storm.srm.types.TDirOption; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.srm.types.TSizeInBytes; + +public interface PtGData extends FileTransferData { + + /** + * Method that returns the requested pin life time for this chunk of the srm request. + */ + public TLifeTimeInSeconds getPinLifeTime(); + + /** + * Method that returns the dirOption specified in the srm request. + */ + public TDirOption getDirOption(); + + /** + * Method that returns the file size for this chunk of the srm request. + */ + public TSizeInBytes getFileSize(); + + /** + * Method used to set the size of the file corresponding to the requested SURL. If the supplied + * TSizeInByte is null, then nothing gets set! + */ + public void setFileSize(TSizeInBytes size); + + /** + * Method that sets the status of this request to SRM_FILE_PINNED; it needs the explanation String + * which describes the situation in greater detail; if a null is passed, then an empty String is + * used as explanation. + */ + public void changeStatusSRM_FILE_PINNED(String explanation); + +} diff --git a/src/main/java/it/grid/storm/persistence/model/PtGPersistentChunkData.java b/src/main/java/it/grid/storm/persistence/model/PtGPersistentChunkData.java new file mode 100644 index 000000000..b6f3cd763 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/PtGPersistentChunkData.java @@ -0,0 +1,209 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtGDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtGPersistentChunkDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.srm.types.TDirOption; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.srm.types.TStatusCode; +import it.grid.storm.srm.types.TTURL; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This class represents a PrepareToGetChunkData, that is part of a multifile PrepareToGet srm + * request. It contains data about: the requestToken, the fromSURL, the requested lifeTime of + * pinning, the TDirOption which tells whether the requested SURL is a directory and if it must be + * recursed at all levels, as well as the desired number of levels to recurse, the desired + * transferProtocols in order of preference, the fileSize, and the transferURL for the supplied + * SURL. + * + * @author EGRID - ICTP Trieste + * @date March 21st, 2005 + * @version 3.0 + */ +public class PtGPersistentChunkData extends IdentityPtGData implements PersistentChunkData { + + private static final Logger log = LoggerFactory.getLogger(PtGPersistentChunkData.class); + + /** + * long representing the primary key for the persistence layer, in the status_Get table + */ + private long primaryKey = -1; + + /** + * This is the requestToken of the multifile srm request to which this chunk belongs + */ + private TRequestToken requestToken; + + /** + * @param requestToken + * @param fromSURL + * @param lifeTime + * @param dirOption + * @param desiredProtocols + * @param fileSize + * @param status + * @param transferURL + * @throws InvalidPtGDataAttributesException + */ + public PtGPersistentChunkData(GridUserInterface auth, TRequestToken requestToken, TSURL fromSURL, + TLifeTimeInSeconds lifeTime, TDirOption dirOption, TURLPrefix desiredProtocols, + TSizeInBytes fileSize, TReturnStatus status, TTURL transferURL) + throws InvalidPtGDataAttributesException, InvalidPtGDataAttributesException, + InvalidFileTransferDataAttributesException, InvalidSurlRequestDataAttributesException { + + super(auth, fromSURL, lifeTime, dirOption, desiredProtocols, fileSize, status, transferURL); + if (requestToken == null) { + log.debug("PtGPersistentChunkData: requestToken is null!"); + throw new InvalidPtGPersistentChunkDataAttributesException(requestToken, fromSURL, lifeTime, + dirOption, desiredProtocols, fileSize, status, transferURL); + } + + this.requestToken = requestToken; + } + + /** + * Method used to get the primary key used in the persistence layer! + */ + @Override + public long getPrimaryKey() { + + return primaryKey; + } + + /** + * Method used to set the primary key to be used in the persistence layer! + */ + public void setPrimaryKey(long l) { + + primaryKey = l; + } + + /** + * Method that returns the requestToken of the srm request to which this chunk belongs. + */ + @Override + public TRequestToken getRequestToken() { + + return requestToken; + } + + /** + * Method that sets the status of this request to SRM_FILE_PINNED; it needs the explanation String + * which describes the situation in greater detail; if a null is passed, then an empty String is + * used as explanation. + */ + public void changeStatusSRM_FILE_PINNED(String explanation) { + + setStatus(TStatusCode.SRM_FILE_PINNED, explanation); + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#hashCode() + */ + @Override + public int hashCode() { + + final int prime = 31; + int result = super.hashCode(); + result = prime * result + (int) (primaryKey ^ (primaryKey >>> 32)); + result = prime * result + ((requestToken == null) ? 0 : requestToken.hashCode()); + return result; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#equals(java.lang.Object) + */ + @Override + public boolean equals(Object obj) { + + if (this == obj) { + return true; + } + if (!super.equals(obj)) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + PtGPersistentChunkData other = (PtGPersistentChunkData) obj; + if (primaryKey != other.primaryKey) { + return false; + } + if (requestToken == null) { + if (other.requestToken != null) { + return false; + } + } else if (!requestToken.equals(other.requestToken)) { + return false; + } + return true; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + + StringBuilder builder = new StringBuilder(); + builder.append("PtGPersistentChunkData [primaryKey="); + builder.append(primaryKey); + builder.append(", requestToken="); + builder.append(requestToken); + builder.append(", pinLifeTime="); + builder.append(pinLifeTime); + builder.append(", dirOption="); + builder.append(dirOption); + builder.append(", fileSize="); + builder.append(fileSize); + builder.append(", transferProtocols="); + builder.append(transferProtocols); + builder.append(", SURL="); + builder.append(SURL); + builder.append(", status="); + builder.append(status); + builder.append(", transferURL="); + builder.append(transferURL); + builder.append("]"); + return builder.toString(); + } + + @Override + public long getIdentifier() { + + return getPrimaryKey(); + } +} diff --git a/src/main/java/it/grid/storm/persistence/model/PtPChunkDataTO.java b/src/main/java/it/grid/storm/persistence/model/PtPChunkDataTO.java new file mode 100644 index 000000000..91084941a --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/PtPChunkDataTO.java @@ -0,0 +1,340 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.config.Configuration; +import it.grid.storm.namespace.model.Protocol; +import it.grid.storm.persistence.converter.FileStorageTypeConverter; +import it.grid.storm.persistence.converter.OverwriteModeConverter; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.converter.TransferProtocolListConverter; +import it.grid.storm.srm.types.TFileStorageType; +import it.grid.storm.srm.types.TOverwriteMode; +import it.grid.storm.srm.types.TStatusCode; + +import java.sql.Timestamp; +import java.util.List; + +/** + * Class that represents a row in the Persistence Layer: this is all raw data referring to the + * PtPChunkData proper, that is, String and primitive types. + * + * Each field is initialized with default values as per SRM 2.2 specification: protocolList GSIFTP + * fileStorageType VOLATILE overwriteMode NEVER status SRM_REQUEST_QUEUED + * + * All other fields are 0 if int, or a white space if String. + * + * @author EGRID ICTP + * @version 2.0 + * @date June 2005 + */ +public class PtPChunkDataTO { + + private static final String FQAN_SEPARATOR = "#"; + /* Database table request_Get fields BEGIN */ + private long primaryKey = -1; // ID primary key of status_Put record in DB + private String toSURL = " "; + private long expectedFileSize = 0; + private String normalizedStFN = null; + private Integer surlUniqueID = null; + /* Database table request_Get fields END */ + + private String requestToken = " "; + private int pinLifetime = -1; + private int fileLifetime = -1; + private String fileStorageType = null; // initialised in constructor + private String spaceToken = " "; + private List protocolList = null; // initialised in constructor + private String overwriteOption = null; // initialised in constructor + private int status; // initialised in constructor + private String errString = " "; + private String turl = " "; + private Timestamp timeStamp = null; + + private String clientDN = null; + private String vomsAttributes = null; + + + public PtPChunkDataTO() { + + this.fileStorageType = FileStorageTypeConverter.getInstance() + .toDB(TFileStorageType + .getTFileStorageType(Configuration.getInstance().getDefaultFileStorageType())); + TURLPrefix protocolPreferences = new TURLPrefix(); + protocolPreferences.addProtocol(Protocol.GSIFTP); + this.protocolList = TransferProtocolListConverter.toDB(protocolPreferences); + this.overwriteOption = OverwriteModeConverter.toDB(TOverwriteMode.NEVER).name(); + this.status = StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_QUEUED); + } + + public long primaryKey() { + + return primaryKey; + } + + public void setPrimaryKey(long n) { + + primaryKey = n; + } + + public String requestToken() { + + return requestToken; + } + + public void setRequestToken(String s) { + + requestToken = s; + } + + public Timestamp timeStamp() { + + return timeStamp; + } + + public void setTimeStamp(Timestamp timeStamp) { + + this.timeStamp = timeStamp; + } + + public String toSURL() { + + return toSURL; + } + + public void setToSURL(String s) { + + toSURL = s; + } + + /** + * @return the normalizedStFN + */ + public String normalizedStFN() { + + return normalizedStFN; + } + + /** + * @param normalizedStFN the normalizedStFN to set + */ + public void setNormalizedStFN(String normalizedStFN) { + + this.normalizedStFN = normalizedStFN; + } + + /** + * @return the surlUniqueID + */ + public Integer surlUniqueID() { + + return surlUniqueID; + } + + /** + * @param surlUniqueID the surlUniqueID to set + */ + public void setSurlUniqueID(Integer surlUniqueID) { + + this.surlUniqueID = surlUniqueID; + } + + public int pinLifetime() { + + return pinLifetime; + } + + public void setPinLifetime(int n) { + + pinLifetime = n; + } + + public int fileLifetime() { + + return fileLifetime; + } + + public void setFileLifetime(int n) { + + fileLifetime = n; + } + + public String fileStorageType() { + + return fileStorageType; + } + + /** + * Method that sets the FileStorageType: if it is null nothing gets set. The deafult value is + * Permanent. + */ + public void setFileStorageType(String s) { + + if (s != null) + fileStorageType = s; + } + + public String spaceToken() { + + return spaceToken; + } + + public void setSpaceToken(String s) { + + spaceToken = s; + } + + public long expectedFileSize() { + + return expectedFileSize; + } + + public void setExpectedFileSize(long l) { + + expectedFileSize = l; + } + + public List protocolList() { + + return protocolList; + } + + public void setProtocolList(List l) { + + if ((l != null) && (!l.isEmpty())) + protocolList = l; + } + + public String overwriteOption() { + + return overwriteOption; + } + + /** + * Method that sets the OverwriteMode: if it is null nothing gets set. The deafult value is Never. + */ + public void setOverwriteOption(String s) { + + if (s != null) + overwriteOption = s; + } + + public int status() { + + return status; + } + + public void setStatus(int n) { + + status = n; + } + + public String errString() { + + return errString; + } + + public void setErrString(String s) { + + errString = s; + } + + public String transferURL() { + + return turl; + } + + public void setTransferURL(String s) { + + turl = s; + } + + public String clientDN() { + + return clientDN; + } + + public void setClientDN(String s) { + + clientDN = s; + } + + public String vomsAttributes() { + + return vomsAttributes; + } + + public void setVomsAttributes(String s) { + + vomsAttributes = s; + } + + public void setVomsAttributes(String[] fqaNsAsString) { + + vomsAttributes = ""; + for (int i = 0; i < fqaNsAsString.length; i++) { + vomsAttributes += fqaNsAsString[i]; + if (i < fqaNsAsString.length - 1) { + vomsAttributes += FQAN_SEPARATOR; + } + } + + } + + public String[] vomsAttributesArray() { + + return vomsAttributes.split(FQAN_SEPARATOR); + } + + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append(primaryKey); + sb.append(" "); + sb.append(requestToken); + sb.append(" "); + sb.append(toSURL); + sb.append(" "); + sb.append(normalizedStFN); + sb.append(" "); + sb.append(surlUniqueID); + sb.append(" "); + sb.append(pinLifetime); + sb.append(" "); + sb.append(fileLifetime); + sb.append(" "); + sb.append(fileStorageType); + sb.append(" "); + sb.append(spaceToken); + sb.append(" "); + sb.append(expectedFileSize); + sb.append(" "); + sb.append(protocolList); + sb.append(" "); + sb.append(overwriteOption); + sb.append(" "); + sb.append(status); + sb.append(" "); + sb.append(errString); + sb.append(" "); + sb.append(turl); + return sb.toString(); + } + +} diff --git a/src/main/java/it/grid/storm/persistence/model/PtPChunkTO.java b/src/main/java/it/grid/storm/persistence/model/PtPChunkTO.java deleted file mode 100644 index b990f172b..000000000 --- a/src/main/java/it/grid/storm/persistence/model/PtPChunkTO.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.model; - - -public class PtPChunkTO { - -} diff --git a/src/main/java/it/grid/storm/persistence/model/PtPData.java b/src/main/java/it/grid/storm/persistence/model/PtPData.java new file mode 100644 index 000000000..1abb88798 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/PtPData.java @@ -0,0 +1,55 @@ +package it.grid.storm.persistence.model; + +import it.grid.storm.srm.types.TFileStorageType; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.srm.types.TOverwriteMode; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.srm.types.TSpaceToken; + +public interface PtPData extends FileTransferData { + + /** + * Method that returns the space token supplied for this chunk of the srm request. + */ + public TSpaceToken getSpaceToken(); + + /** + * Method that returns the requested pin life time for this chunk of the srm request. + */ + public TLifeTimeInSeconds pinLifetime(); + + /** + * Method that returns the requested file life time for this chunk of the srm request. + */ + public TLifeTimeInSeconds fileLifetime(); + + /** + * Method that returns the fileStorageType for this chunk of the srm request. + */ + public TFileStorageType fileStorageType(); + + /** + * Method that returns the knownSizeOfThisFile supplied with this chunk of the srm request. + */ + public TSizeInBytes expectedFileSize(); + + /** + * Method that returns the overwriteOption specified in the srm request. + */ + public TOverwriteMode overwriteOption(); + + /** + * Method that sets the status of this request to SRM_SPACE_AVAILABLE; it needs the explanation + * String which describes the situation in greater detail; if a null is passed, then an empty + * String is used as explanation. + */ + public void changeStatusSRM_SPACE_AVAILABLE(String explanation); + + /** + * Method that sets the status of this request to SRM_DUPLICATION_ERROR; it needs the explanation + * String which describes the situation in greater detail; if a null is passed, then an empty + * String is used as explanation. + */ + public void changeStatusSRM_DUPLICATION_ERROR(String explanation); + +} diff --git a/src/main/java/it/grid/storm/persistence/model/PtPPersistentChunkData.java b/src/main/java/it/grid/storm/persistence/model/PtPPersistentChunkData.java new file mode 100644 index 000000000..c8cfb173e --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/PtPPersistentChunkData.java @@ -0,0 +1,191 @@ +/* + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by + * applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS + * OF ANY KIND, either express or implied. See the License for the specific + * language governing permissions and limitations under the License. + */ + +package it.grid.storm.persistence.model; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtPDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtPPersistentChunkDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.srm.types.TFileStorageType; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.srm.types.TOverwriteMode; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.srm.types.TSpaceToken; +import it.grid.storm.srm.types.TTURL; + +/** + * This class represents a PrepareToPutChunkData, that is part of a multifile PrepareToPut srm + * request. It contains data about: the requestToken, the toSURL, the requested lifeTime of pinning, + * the requested lifetime of volatile, the requested fileStorageType and any available spaceToken, + * the expectedFileSize, the desired transferProtocols in order of preference, the overwriteOption + * to be applied in case the file already exists, the transferURL for the supplied SURL. + * + * @author EGRID - ICTP Trieste + * @date June, 2005 + * @version 2.0 + */ +public class PtPPersistentChunkData extends IdentityPtPData implements PersistentChunkData { + + private static final Logger log = LoggerFactory.getLogger(PtPPersistentChunkData.class); + + /** + * long representing the primary key for the persistence layer, in the status_Put table + */ + private long primaryKey = -1; + + /** + * This is the requestToken of the multifile srm request to which this chunk belongs + */ + private final TRequestToken requestToken; + + public PtPPersistentChunkData(GridUserInterface auth, TRequestToken requestToken, TSURL toSURL, + TLifeTimeInSeconds pinLifetime, TLifeTimeInSeconds fileLifetime, + TFileStorageType fileStorageType, TSpaceToken spaceToken, TSizeInBytes expectedFileSize, + TURLPrefix transferProtocols, TOverwriteMode overwriteOption, TReturnStatus status, + TTURL transferURL) + throws InvalidPtPPersistentChunkDataAttributesException, InvalidPtPDataAttributesException, + InvalidFileTransferDataAttributesException, InvalidSurlRequestDataAttributesException { + + super(auth, toSURL, pinLifetime, fileLifetime, fileStorageType, spaceToken, expectedFileSize, + transferProtocols, overwriteOption, status, transferURL); + if (requestToken == null) { + log.debug("PtPPersistentChunkData: requestToken is null!"); + throw new InvalidPtPPersistentChunkDataAttributesException(requestToken, toSURL, pinLifetime, + fileLifetime, fileStorageType, spaceToken, expectedFileSize, transferProtocols, + overwriteOption, status, transferURL); + } + this.requestToken = requestToken; + } + + /** + * Method used to get the primary key used in the persistence layer! + */ + @Override + public long getPrimaryKey() { + + return primaryKey; + } + + /** + * Method used to set the primary key to be used in the persistence layer! + */ + public void setPrimaryKey(long l) { + + primaryKey = l; + } + + /** + * Method that returns the requestToken of the srm request to which this chunk belongs. + */ + @Override + public TRequestToken getRequestToken() { + + return requestToken; + } + + @Override + public long getIdentifier() { + + return getPrimaryKey(); + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#hashCode() + */ + @Override + public int hashCode() { + + final int prime = 31; + int result = super.hashCode(); + result = prime * result + (int) (primaryKey ^ (primaryKey >>> 32)); + result = prime * result + ((requestToken == null) ? 0 : requestToken.hashCode()); + return result; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#equals(java.lang.Object) + */ + @Override + public boolean equals(Object obj) { + + if (this == obj) { + return true; + } + if (!super.equals(obj)) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + PtPPersistentChunkData other = (PtPPersistentChunkData) obj; + if (primaryKey != other.primaryKey) { + return false; + } + if (requestToken == null) { + if (other.requestToken != null) { + return false; + } + } else if (!requestToken.equals(other.requestToken)) { + return false; + } + return true; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + + StringBuilder builder = new StringBuilder(); + builder.append("PtPPersistentChunkData [primaryKey="); + builder.append(primaryKey); + builder.append(", requestToken="); + builder.append(requestToken); + builder.append(", spaceToken="); + builder.append(spaceToken); + builder.append(", pinLifetime="); + builder.append(pinLifetime); + builder.append(", fileLifetime="); + builder.append(fileLifetime); + builder.append(", fileStorageType="); + builder.append(fileStorageType); + builder.append(", overwriteOption="); + builder.append(overwriteOption); + builder.append(", expectedFileSize="); + builder.append(expectedFileSize); + builder.append(", transferProtocols="); + builder.append(transferProtocols); + builder.append(", SURL="); + builder.append(SURL); + builder.append(", status="); + builder.append(status); + builder.append(", transferURL="); + builder.append(transferURL); + builder.append("]"); + return builder.toString(); + } +} diff --git a/src/main/java/it/grid/storm/persistence/model/RecallTaskType.java b/src/main/java/it/grid/storm/persistence/model/RecallTaskType.java deleted file mode 100644 index a2ade8cd6..000000000 --- a/src/main/java/it/grid/storm/persistence/model/RecallTaskType.java +++ /dev/null @@ -1,2 +0,0 @@ -package it.grid.storm.persistence.model; - diff --git a/src/main/java/it/grid/storm/persistence/model/ReducedBoLChunkData.java b/src/main/java/it/grid/storm/persistence/model/ReducedBoLChunkData.java new file mode 100644 index 000000000..eb0bd9342 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/ReducedBoLChunkData.java @@ -0,0 +1,141 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import it.grid.storm.persistence.exceptions.InvalidReducedBoLChunkDataAttributesException; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TStatusCode; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This class represents a ReducedBringOnLineChunkData, that is part of a multifile PrepareToGet srm + * request. It is closely related to BoLChunkData but it is called Reduced because it only contains + * the fromSURL, the current TReturnStatus, and the primary key of the request. + * + * This class is intended to be used by srmReleaseFiles, where only a limited amunt of information + * is needed instead of full blown BoLChunkData. + * + * @author CNAF + * @date Aug 2009 + * @version 1.0 + */ +public class ReducedBoLChunkData implements ReducedChunkData { + + @SuppressWarnings("unused") + private static final Logger log = LoggerFactory.getLogger(ReducedBoLChunkData.class); + + private long primaryKey = -1; // long representing the primary key for the + // persistence layer! + private TSURL fromSURL; // SURL that the srm command wants to get + private TReturnStatus status; // return status for this chunk of request + + public ReducedBoLChunkData(TSURL fromSURL, TReturnStatus status) + throws InvalidReducedBoLChunkDataAttributesException { + + boolean ok = status != null && fromSURL != null; + if (!ok) { + throw new InvalidReducedBoLChunkDataAttributesException(fromSURL, status); + } + this.fromSURL = fromSURL; + this.status = status; + } + + @Override + public boolean equals(Object o) { + + if (o == this) { + return true; + } + if (!(o instanceof ReducedBoLChunkData)) { + return false; + } + ReducedBoLChunkData cd = (ReducedBoLChunkData) o; + return (primaryKey == cd.primaryKey) && fromSURL.equals(cd.fromSURL) + && status.equals(cd.status); + } + + /** + * Method that returns the fromSURL of the srm request to which this chunk belongs. + */ + public TSURL fromSURL() { + + return fromSURL; + } + + @Override + public int hashCode() { + + int hash = 17; + hash = 37 * hash + Long.valueOf(primaryKey).hashCode(); + hash = 37 * hash + fromSURL.hashCode(); + hash = 37 * hash + status.hashCode(); + return hash; + } + + public boolean isPinned() { + + if (status.getStatusCode() == TStatusCode.SRM_SUCCESS) { + return true; + } + return false; + } + + /** + * Method used to get the primary key used in the persistence layer! + */ + public long primaryKey() { + + return primaryKey; + } + + /** + * Method used to set the primary key to be used in the persistence layer! + */ + public void setPrimaryKey(long l) { + + primaryKey = l; + } + + /** + * Method that returns the status for this chunk of the srm request. + */ + public TReturnStatus status() { + + return status; + } + + @Override + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append("ReducedBoLChunkData\n"); + sb.append("primaryKey="); + sb.append(primaryKey); + sb.append("; "); + sb.append("fromSURL="); + sb.append(fromSURL); + sb.append("; "); + sb.append("status="); + sb.append(status); + sb.append("."); + return sb.toString(); + } +} diff --git a/src/main/java/it/grid/storm/persistence/model/ReducedBoLChunkDataTO.java b/src/main/java/it/grid/storm/persistence/model/ReducedBoLChunkDataTO.java new file mode 100644 index 000000000..96004be8e --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/ReducedBoLChunkDataTO.java @@ -0,0 +1,130 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.srm.types.TStatusCode; + +/** + * Class that represents some of the fields in a row in the Persistence Layer: this is all raw data + * referring to the ReducedBoLChunkData proper, that is String and primitive types. + * + * @author EGRID ICTP + * @version 1.0 + * @date November, 2006 + */ +public class ReducedBoLChunkDataTO { + + private long primaryKey = -1; // ID primary key of record in DB + private String fromSURL = " "; + private String normalizedStFN = null; + private Integer surlUniqueID = null; + + private int status = StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_QUEUED); + private String errString = " "; + + public String errString() { + + return errString; + } + + public String fromSURL() { + + return fromSURL; + } + + public long primaryKey() { + + return primaryKey; + } + + public void setErrString(String s) { + + errString = s; + } + + public void setFromSURL(String s) { + + fromSURL = s; + } + + public void setPrimaryKey(long n) { + + primaryKey = n; + } + + public void setStatus(int n) { + + status = n; + } + + public int status() { + + return status; + } + + /** + * @param normalizedStFN the normalizedStFN to set + */ + public void setNormalizedStFN(String normalizedStFN) { + + this.normalizedStFN = normalizedStFN; + } + + /** + * @return the normalizedStFN + */ + public String normalizedStFN() { + + return normalizedStFN; + } + + /** + * @param surlUniqueID the sURLUniqueID to set + */ + public void setSurlUniqueID(Integer surlUniqueID) { + + this.surlUniqueID = surlUniqueID; + } + + /** + * @return the sURLUniqueID + */ + public Integer surlUniqueID() { + + return surlUniqueID; + } + + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append(primaryKey); + sb.append(" "); + sb.append(fromSURL); + sb.append(" "); + sb.append(normalizedStFN); + sb.append(" "); + sb.append(surlUniqueID); + sb.append(" "); + sb.append(status); + sb.append(" "); + sb.append(errString); + sb.append(" "); + return sb.toString(); + } +} diff --git a/src/main/java/it/grid/storm/catalogs/ReducedChunkData.java b/src/main/java/it/grid/storm/persistence/model/ReducedChunkData.java similarity index 79% rename from src/main/java/it/grid/storm/catalogs/ReducedChunkData.java rename to src/main/java/it/grid/storm/persistence/model/ReducedChunkData.java index a0a97affa..b6b0eae1f 100644 --- a/src/main/java/it/grid/storm/catalogs/ReducedChunkData.java +++ b/src/main/java/it/grid/storm/persistence/model/ReducedChunkData.java @@ -15,21 +15,21 @@ * the License. */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; import it.grid.storm.srm.types.TReturnStatus; import it.grid.storm.srm.types.TSURL; public interface ReducedChunkData { - public TSURL fromSURL(); + public TSURL fromSURL(); - public boolean isPinned(); + public boolean isPinned(); - public long primaryKey(); + public long primaryKey(); - public void setPrimaryKey(long l); + public void setPrimaryKey(long l); - public TReturnStatus status(); + public TReturnStatus status(); } diff --git a/src/main/java/it/grid/storm/persistence/model/ReducedPtGChunkData.java b/src/main/java/it/grid/storm/persistence/model/ReducedPtGChunkData.java new file mode 100644 index 000000000..d77ae8072 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/ReducedPtGChunkData.java @@ -0,0 +1,139 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import it.grid.storm.persistence.exceptions.InvalidReducedPtGChunkDataAttributesException; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TStatusCode; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This class represents a ReducedPrepareToGetChunkData, that is part of a multifile PrepareToGet + * srm request. It is closely related to PtGChunkData but it is called Reduced because it only + * contains the fromSURL, the current TReturnStatus, and the primary key of the request. + * + * This class is intended to be used by srmReleaseFiles, where only a limited amunt of information + * is needed instead of full blown PtGChunkData. + * + * @author EGRID - ICTP Trieste + * @date November, 2006 + * @version 1.0 + */ +public class ReducedPtGChunkData implements ReducedChunkData { + + @SuppressWarnings("unused") + private static final Logger log = LoggerFactory.getLogger(ReducedPtGChunkData.class); + + private long primaryKey = -1; + private TSURL fromSURL; + private TReturnStatus status; + + public ReducedPtGChunkData(TSURL fromSURL, TReturnStatus status) + throws InvalidReducedPtGChunkDataAttributesException { + + if (status == null || fromSURL == null) { + throw new InvalidReducedPtGChunkDataAttributesException(fromSURL, status); + } + this.fromSURL = fromSURL; + this.status = status; + } + + /** + * Method that returns the fromSURL of the srm request to which this chunk belongs. + */ + public TSURL fromSURL() { + + return fromSURL; + } + + @Override + public int hashCode() { + + int hash = 17; + hash = 37 * hash + Long.valueOf(primaryKey).hashCode(); + hash = 37 * hash + fromSURL.hashCode(); + hash = 37 * hash + status.hashCode(); + return hash; + } + + public boolean isPinned() { + + if (status.getStatusCode() == TStatusCode.SRM_FILE_PINNED) { + return true; + } + return false; + } + + /** + * Method used to get the primary key used in the persistence layer! + */ + public long primaryKey() { + + return primaryKey; + } + + /** + * Method used to set the primary key to be used in the persistence layer! + */ + public void setPrimaryKey(long l) { + + primaryKey = l; + } + + /** + * Method that returns the status for this chunk of the srm request. + */ + public TReturnStatus status() { + + return status; + } + + @Override + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append("ReducedPtGChunkData\n"); + sb.append("primaryKey="); + sb.append(primaryKey); + sb.append("; "); + sb.append("fromSURL="); + sb.append(fromSURL); + sb.append("; "); + sb.append("status="); + sb.append(status); + sb.append("."); + return sb.toString(); + } + + @Override + public boolean equals(Object o) { + + if (o == this) { + return true; + } + if (!(o instanceof ReducedPtGChunkData)) { + return false; + } + ReducedPtGChunkData cd = (ReducedPtGChunkData) o; + return (primaryKey == cd.primaryKey) && fromSURL.equals(cd.fromSURL) + && status.equals(cd.status); + } +} diff --git a/src/main/java/it/grid/storm/persistence/model/ReducedPtGChunkDataTO.java b/src/main/java/it/grid/storm/persistence/model/ReducedPtGChunkDataTO.java new file mode 100644 index 000000000..fe95415ee --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/ReducedPtGChunkDataTO.java @@ -0,0 +1,131 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_QUEUED; + +import it.grid.storm.persistence.converter.StatusCodeConverter; + +/** + * Class that represents some of the fileds in a row in the Persistence Layer: this is all raw data + * referring to the ReducedPtGChunkData proper, that is String and primitive types. + * + * @author EGRID ICTP + * @version 1.0 + * @date November, 2006 + */ +public class ReducedPtGChunkDataTO { + + private long primaryKey = -1; // ID primary key of record in DB + private String fromSURL = " "; + private String normalizedStFN = null; + private Integer surlUniqueID = null; + + private int status = StatusCodeConverter.getInstance().toDB(SRM_REQUEST_QUEUED); + private String errString = " "; + + public long primaryKey() { + + return primaryKey; + } + + public void setPrimaryKey(long n) { + + primaryKey = n; + } + + public String fromSURL() { + + return fromSURL; + } + + public void setFromSURL(String s) { + + fromSURL = s; + } + + /** + * @param normalizedStFN the normalizedStFN to set + */ + public void setNormalizedStFN(String normalizedStFN) { + + this.normalizedStFN = normalizedStFN; + } + + /** + * @return the normalizedStFN + */ + public String normalizedStFN() { + + return normalizedStFN; + } + + /** + * @param surlUniqueID the sURLUniqueID to set + */ + public void setSurlUniqueID(Integer surlUniqueID) { + + this.surlUniqueID = surlUniqueID; + } + + /** + * @return the sURLUniqueID + */ + public Integer surlUniqueID() { + + return surlUniqueID; + } + + public int status() { + + return status; + } + + public void setStatus(int n) { + + status = n; + } + + public String errString() { + + return errString; + } + + public void setErrString(String s) { + + errString = s; + } + + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append(primaryKey); + sb.append(" "); + sb.append(fromSURL); + sb.append(" "); + sb.append(normalizedStFN); + sb.append(" "); + sb.append(surlUniqueID); + sb.append(" "); + sb.append(status); + sb.append(" "); + sb.append(errString); + sb.append(" "); + return sb.toString(); + } +} diff --git a/src/main/java/it/grid/storm/persistence/model/ReducedPtPChunkData.java b/src/main/java/it/grid/storm/persistence/model/ReducedPtPChunkData.java new file mode 100644 index 000000000..6d155fa9f --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/ReducedPtPChunkData.java @@ -0,0 +1,167 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import it.grid.storm.persistence.exceptions.InvalidReducedPtPChunkDataAttributesException; +import it.grid.storm.srm.types.TFileStorageType; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This class represents a ReducedPrepareToPutChunkData, that is part of a multifile PrepareToPut + * srm request. It is closely related to PtPChunkData but it is called Reduced because it only + * contains the toSURL, the current TReturnStatus, the TFileStorageType, the FileLifeTime in case of + * Volatile, the VomsGridUser limited to the DN, and the primary key of the request. + * + * This class is intended to be used by srmPutDone, where only a limited amount of information is + * needed instead of full blown PtPChunkData. It is also used by the automatic handlnig of non + * invoked srmPutDone, during transition to SRM_FILE_LIFETIME_EXPIRED. + * + * @author EGRID - ICTP Trieste + * @date January, 2007 + * @version 2.0 + */ +public class ReducedPtPChunkData { + + @SuppressWarnings("unused") + private static final Logger log = LoggerFactory.getLogger(ReducedPtPChunkData.class); + + private long primaryKey = -1; // long representing the primary key for the + // persistence layer! + private TSURL toSURL; // SURL that the srm command wants to get + private TReturnStatus status; // return status for this chunk of request + private TFileStorageType fileStorageType; // fileStorageType of this shunk of + // the request + private TLifeTimeInSeconds fileLifetime; // requested lifetime for SURL in + // case of Volatile entry. + + public ReducedPtPChunkData(TSURL toSURL, TReturnStatus status, TFileStorageType fileStorageType, + TLifeTimeInSeconds fileLifetime) throws InvalidReducedPtPChunkDataAttributesException { + + if (status == null || toSURL == null || fileStorageType == null || fileLifetime == null) { + throw new InvalidReducedPtPChunkDataAttributesException(toSURL, status, fileStorageType, + fileLifetime); + } + this.toSURL = toSURL; + this.status = status; + this.fileStorageType = fileStorageType; + this.fileLifetime = fileLifetime; + } + + /** + * Method used to get the primary key used in the persistence layer! + */ + public long primaryKey() { + + return primaryKey; + } + + /** + * Method used to set the primary key to be used in the persistence layer! + */ + public void setPrimaryKey(long l) { + + primaryKey = l; + } + + /** + * Method that returns the toSURL of the srm request to which this chunk belongs. + */ + public TSURL toSURL() { + + return toSURL; + } + + /** + * Method that returns the status for this chunk of the srm request. + */ + public TReturnStatus status() { + + return status; + } + + /** + * Method that returns the TFileStorageType of the srm request to which this chunk belongs. + */ + public TFileStorageType fileStorageType() { + + return fileStorageType; + } + + /** + * Method that returns the fileLifetime of the srm request to which this chunk belongs. + */ + public TLifeTimeInSeconds fileLifetime() { + + return fileLifetime; + } + + @Override + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append("ReducedPtPChunkData\n"); + sb.append("primaryKey="); + sb.append(primaryKey); + sb.append("; "); + sb.append("toSURL="); + sb.append(toSURL); + sb.append("; "); + sb.append("status="); + sb.append(status); + sb.append(";"); + sb.append("fileStorageType="); + sb.append(fileStorageType); + sb.append(";"); + sb.append("fileLifetime="); + sb.append(fileLifetime); + sb.append("."); + return sb.toString(); + } + + @Override + public int hashCode() { + + int hash = 17; + hash = 37 * hash + Long.valueOf(primaryKey).hashCode(); + hash = 37 * hash + toSURL.hashCode(); + hash = 37 * hash + status.hashCode(); + hash = 37 * hash + fileStorageType.hashCode(); + hash = 37 * hash + fileLifetime.hashCode(); + return hash; + } + + @Override + public boolean equals(Object o) { + + if (o == this) { + return true; + } + if (!(o instanceof ReducedPtPChunkData)) { + return false; + } + ReducedPtPChunkData cd = (ReducedPtPChunkData) o; + return (primaryKey == cd.primaryKey) && toSURL.equals(cd.toSURL) && status.equals(cd.status) + && fileStorageType.equals(cd.fileStorageType) && fileLifetime.equals(cd.fileLifetime); + } + +} diff --git a/src/main/java/it/grid/storm/persistence/model/ReducedPtPChunkDataTO.java b/src/main/java/it/grid/storm/persistence/model/ReducedPtPChunkDataTO.java new file mode 100644 index 000000000..24c80221f --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/ReducedPtPChunkDataTO.java @@ -0,0 +1,162 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import static it.grid.storm.srm.types.TFileStorageType.VOLATILE; +import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_QUEUED; + +import it.grid.storm.persistence.converter.FileStorageTypeConverter; +import it.grid.storm.persistence.converter.StatusCodeConverter; + +/** + * Class that represents some of the fields in a row in the Persistence Layer: this is all raw data + * referring to the ReducedPtPChunkData proper, that is String and primitive types. + * + * @author EGRID ICTP + * @version 1.0 + * @date January, 2007 + */ +public class ReducedPtPChunkDataTO { + + private long primaryKey = -1; // ID primary key of record in DB + private String toSURL = " "; + private String normalizedStFN = null; + private Integer surlUniqueID = null; + + private int status = StatusCodeConverter.getInstance().toDB(SRM_REQUEST_QUEUED); + private String errString = " "; + private String fileStorageType = FileStorageTypeConverter.getInstance().toDB(VOLATILE); + private int fileLifetime = -1; + + public long primaryKey() { + + return primaryKey; + } + + public void setPrimaryKey(long n) { + + primaryKey = n; + } + + public String toSURL() { + + return toSURL; + } + + public void setToSURL(String s) { + + toSURL = s; + } + + /** + * @param normalizedStFN the normalizedStFN to set + */ + public void setNormalizedStFN(String normalizedStFN) { + + this.normalizedStFN = normalizedStFN; + } + + /** + * @return the normalizedStFN + */ + public String normalizedStFN() { + + return normalizedStFN; + } + + /** + * @param surlUniqueID the sURLUniqueID to set + */ + public void setSurlUniqueID(Integer surlUniqueID) { + + this.surlUniqueID = surlUniqueID; + } + + /** + * @return the sURLUniqueID + */ + public Integer surlUniqueID() { + + return surlUniqueID; + } + + public int status() { + + return status; + } + + public void setStatus(int n) { + + status = n; + } + + public String errString() { + + return errString; + } + + public void setErrString(String s) { + + errString = s; + } + + public String fileStorageType() { + + return fileStorageType; + } + + /** + * Method that sets the FileStorageType: if it is null nothing gets set. The deafult value is + * Volatile. + */ + public void setFileStorageType(String s) { + + if (s != null) + fileStorageType = s; + } + + public int fileLifetime() { + + return fileLifetime; + } + + public void setFileLifetime(int n) { + + fileLifetime = n; + } + + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append(primaryKey); + sb.append(" "); + sb.append(toSURL); + sb.append(" "); + sb.append(normalizedStFN); + sb.append(" "); + sb.append(surlUniqueID); + sb.append(" "); + sb.append(status); + sb.append(" "); + sb.append(errString); + sb.append(" "); + sb.append(fileStorageType); + sb.append(" "); + return sb.toString(); + } +} diff --git a/src/main/java/it/grid/storm/catalogs/RequestData.java b/src/main/java/it/grid/storm/persistence/model/RequestData.java similarity index 94% rename from src/main/java/it/grid/storm/catalogs/RequestData.java rename to src/main/java/it/grid/storm/persistence/model/RequestData.java index 5b937f891..28083638c 100644 --- a/src/main/java/it/grid/storm/catalogs/RequestData.java +++ b/src/main/java/it/grid/storm/persistence/model/RequestData.java @@ -15,7 +15,7 @@ * the License. */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; import it.grid.storm.srm.types.TRequestToken; import it.grid.storm.srm.types.TReturnStatus; @@ -23,8 +23,8 @@ import it.grid.storm.synchcall.data.InputData; /** - * Class that represents a generic chunk. It provides only one method which is - * the primary key associated ot the chunk in persistence. + * Class that represents a generic chunk. It provides only one method which is the primary key + * associated ot the chunk in persistence. * * @author EGRID - ICTP Trieste * @version 1.0 diff --git a/src/main/java/it/grid/storm/persistence/model/RequestSummaryData.java b/src/main/java/it/grid/storm/persistence/model/RequestSummaryData.java new file mode 100644 index 000000000..ceb1ce710 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/RequestSummaryData.java @@ -0,0 +1,524 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TRequestType; +import it.grid.storm.srm.types.TReturnStatus; +// import it.grid.storm.griduser.VomsGridUser; +import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.exceptions.InvalidRequestSummaryDataAttributesException; + +/** + * This class represents the SummaryData associated with the SRM request. It contains info about: + * Primary Key of request, TRequestType, TRequestToken, VomsGridUser. + * + * @author EGRID - ICTP Trieste + * @date March 18th, 2005 + * @version 4.0 + */ +public class RequestSummaryData { + + private TRequestType requestType = null; // request type of SRM request + private TRequestToken requestToken = null; // TRequestToken of SRM request + private GridUserInterface gu = null; // VomsGridUser that issued This request + private long id = -1; // long representing This object in persistence + + private String userToken = null; + private Integer retrytime = null; + private TLifeTimeInSeconds pinLifetime = null; + private String spaceToken = null; + private TReturnStatus status = null; + private String errstring = null; + private Integer remainingTotalTime = null; + private Integer nbreqfiles = null; + private Integer numOfCompleted = null; + private TLifeTimeInSeconds fileLifetime = null; + private Integer deferredStartTime = null; + private Integer numOfWaiting = null; + private Integer numOfFailed = null; + private Integer remainingDeferredStartTime = null; + + public RequestSummaryData(TRequestType rtype, TRequestToken rtoken, GridUserInterface gu) + throws InvalidRequestSummaryDataAttributesException { + + boolean ok = rtype != null && rtoken != null && gu != null; + if (!ok) + throw new InvalidRequestSummaryDataAttributesException(rtype, rtoken, gu); + this.requestType = rtype; + this.requestToken = rtoken; + this.gu = gu; + } + + /** + * Method that returns the type of SRM request + */ + public TRequestType requestType() { + + return requestType; + } + + /** + * Method that returns the SRM request TRequestToken + */ + public TRequestToken requestToken() { + + return requestToken; + } + + /** + * Method that returns the VomsGridUser that issued this request + */ + public GridUserInterface gridUser() { + + return gu; + } + + /** + * Method that returns a long corresponding to the identifier of This object in persistence. + */ + public long primaryKey() { + + return id; + } + + /** + * Method used to set the log corresponding to the identifier of This object in persistence. + */ + public void setPrimaryKey(long l) { + + this.id = l; + } + + /** + * @return the userToken + */ + public String getUserToken() { + + return userToken; + } + + /** + * @return the retrytime + */ + public Integer getRetrytime() { + + return retrytime; + } + + /** + * @return the pinLifetime + */ + public TLifeTimeInSeconds getPinLifetime() { + + return pinLifetime; + } + + /** + * @return the spaceToken + */ + public String getSpaceToken() { + + return spaceToken; + } + + /** + * @return the status + */ + public TReturnStatus getStatus() { + + return status; + } + + /** + * @return the errstring + */ + public String getErrstring() { + + return errstring; + } + + /** + * @return the remainingTotalTime + */ + public Integer getRemainingTotalTime() { + + return remainingTotalTime; + } + + /** + * @return the nbreqfiles + */ + public Integer getNbreqfiles() { + + return nbreqfiles; + } + + /** + * @return the numOfCompleted + */ + public Integer getNumOfCompleted() { + + return numOfCompleted; + } + + /** + * @return the fileLifetime + */ + public TLifeTimeInSeconds getFileLifetime() { + + return fileLifetime; + } + + /** + * @return the deferredStartTime + */ + public Integer getDeferredStartTime() { + + return deferredStartTime; + } + + /** + * @return the numOfWaiting + */ + public Integer getNumOfWaiting() { + + return numOfWaiting; + } + + /** + * @return the numOfFailed + */ + public Integer getNumOfFailed() { + + return numOfFailed; + } + + /** + * @return the remainingDeferredStartTime + */ + public Integer getRemainingDeferredStartTime() { + + return remainingDeferredStartTime; + } + + public void setUserToken(String userToken) { + + this.userToken = userToken; + } + + public void setRetrytime(Integer retrytime) { + + this.retrytime = retrytime; + + } + + public void setPinLifetime(TLifeTimeInSeconds pinLifetime) { + + this.pinLifetime = pinLifetime; + + } + + public void setSpaceToken(String spaceToken) { + + this.spaceToken = spaceToken; + + } + + public void setStatus(TReturnStatus status) { + + this.status = status; + + } + + public void setErrstring(String errstring) { + + this.errstring = errstring; + + } + + public void setRemainingTotalTime(Integer remainingTotalTime) { + + this.remainingTotalTime = remainingTotalTime; + + } + + public void setNbreqfiles(Integer nbreqfiles) { + + this.nbreqfiles = nbreqfiles; + + } + + public void setNumOfCompleted(Integer numOfCompleted) { + + this.numOfCompleted = numOfCompleted; + + } + + public void setFileLifetime(TLifeTimeInSeconds fileLifetime) { + + this.fileLifetime = fileLifetime; + + } + + public void setDeferredStartTime(Integer deferredStartTime) { + + this.deferredStartTime = deferredStartTime; + + } + + public void setNumOfWaiting(Integer numOfWaiting) { + + this.numOfWaiting = numOfWaiting; + + } + + public void setNumOfFailed(Integer numOfFailed) { + + this.numOfFailed = numOfFailed; + + } + + public void setRemainingDeferredStartTime(Integer remainingDeferredStartTime) { + + this.remainingDeferredStartTime = remainingDeferredStartTime; + + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + + StringBuilder builder = new StringBuilder(); + builder.append("RequestSummaryData [requestType="); + builder.append(requestType); + builder.append(", requestToken="); + builder.append(requestToken); + builder.append(", gu="); + builder.append(gu); + builder.append(", id="); + builder.append(id); + builder.append(", userToken="); + builder.append(userToken); + builder.append(", retrytime="); + builder.append(retrytime); + builder.append(", pinLifetime="); + builder.append(pinLifetime); + builder.append(", spaceToken="); + builder.append(spaceToken); + builder.append(", status="); + builder.append(status); + builder.append(", errstring="); + builder.append(errstring); + builder.append(", remainingTotalTime="); + builder.append(remainingTotalTime); + builder.append(", nbreqfiles="); + builder.append(nbreqfiles); + builder.append(", numOfCompleted="); + builder.append(numOfCompleted); + builder.append(", fileLifetime="); + builder.append(fileLifetime); + builder.append(", deferredStartTime="); + builder.append(deferredStartTime); + builder.append(", numOfWaiting="); + builder.append(numOfWaiting); + builder.append(", numOfFailed="); + builder.append(numOfFailed); + builder.append(", remainingDeferredStartTime="); + builder.append(remainingDeferredStartTime); + builder.append("]"); + return builder.toString(); + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#hashCode() + */ + @Override + public int hashCode() { + + final int prime = 31; + int result = 1; + result = prime * result + ((deferredStartTime == null) ? 0 : deferredStartTime.hashCode()); + result = prime * result + ((errstring == null) ? 0 : errstring.hashCode()); + result = prime * result + ((fileLifetime == null) ? 0 : fileLifetime.hashCode()); + result = prime * result + ((gu == null) ? 0 : gu.hashCode()); + result = prime * result + (int) (id ^ (id >>> 32)); + result = prime * result + ((nbreqfiles == null) ? 0 : nbreqfiles.hashCode()); + result = prime * result + ((numOfCompleted == null) ? 0 : numOfCompleted.hashCode()); + result = prime * result + ((numOfFailed == null) ? 0 : numOfFailed.hashCode()); + result = prime * result + ((numOfWaiting == null) ? 0 : numOfWaiting.hashCode()); + result = prime * result + ((pinLifetime == null) ? 0 : pinLifetime.hashCode()); + result = prime * result + + ((remainingDeferredStartTime == null) ? 0 : remainingDeferredStartTime.hashCode()); + result = prime * result + ((remainingTotalTime == null) ? 0 : remainingTotalTime.hashCode()); + result = prime * result + ((requestToken == null) ? 0 : requestToken.hashCode()); + result = prime * result + ((requestType == null) ? 0 : requestType.hashCode()); + result = prime * result + ((retrytime == null) ? 0 : retrytime.hashCode()); + result = prime * result + ((spaceToken == null) ? 0 : spaceToken.hashCode()); + result = prime * result + ((status == null) ? 0 : status.hashCode()); + result = prime * result + ((userToken == null) ? 0 : userToken.hashCode()); + return result; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#equals(java.lang.Object) + */ + @Override + public boolean equals(Object obj) { + + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + RequestSummaryData other = (RequestSummaryData) obj; + if (deferredStartTime == null) { + if (other.deferredStartTime != null) { + return false; + } + } else if (!deferredStartTime.equals(other.deferredStartTime)) { + return false; + } + if (errstring == null) { + if (other.errstring != null) { + return false; + } + } else if (!errstring.equals(other.errstring)) { + return false; + } + if (fileLifetime == null) { + if (other.fileLifetime != null) { + return false; + } + } else if (!fileLifetime.equals(other.fileLifetime)) { + return false; + } + if (gu == null) { + if (other.gu != null) { + return false; + } + } else if (!gu.equals(other.gu)) { + return false; + } + if (id != other.id) { + return false; + } + if (nbreqfiles == null) { + if (other.nbreqfiles != null) { + return false; + } + } else if (!nbreqfiles.equals(other.nbreqfiles)) { + return false; + } + if (numOfCompleted == null) { + if (other.numOfCompleted != null) { + return false; + } + } else if (!numOfCompleted.equals(other.numOfCompleted)) { + return false; + } + if (numOfFailed == null) { + if (other.numOfFailed != null) { + return false; + } + } else if (!numOfFailed.equals(other.numOfFailed)) { + return false; + } + if (numOfWaiting == null) { + if (other.numOfWaiting != null) { + return false; + } + } else if (!numOfWaiting.equals(other.numOfWaiting)) { + return false; + } + if (pinLifetime == null) { + if (other.pinLifetime != null) { + return false; + } + } else if (!pinLifetime.equals(other.pinLifetime)) { + return false; + } + if (remainingDeferredStartTime == null) { + if (other.remainingDeferredStartTime != null) { + return false; + } + } else if (!remainingDeferredStartTime.equals(other.remainingDeferredStartTime)) { + return false; + } + if (remainingTotalTime == null) { + if (other.remainingTotalTime != null) { + return false; + } + } else if (!remainingTotalTime.equals(other.remainingTotalTime)) { + return false; + } + if (requestToken == null) { + if (other.requestToken != null) { + return false; + } + } else if (!requestToken.equals(other.requestToken)) { + return false; + } + if (requestType != other.requestType) { + return false; + } + if (retrytime == null) { + if (other.retrytime != null) { + return false; + } + } else if (!retrytime.equals(other.retrytime)) { + return false; + } + if (spaceToken == null) { + if (other.spaceToken != null) { + return false; + } + } else if (!spaceToken.equals(other.spaceToken)) { + return false; + } + if (status == null) { + if (other.status != null) { + return false; + } + } else if (!status.equals(other.status)) { + return false; + } + if (userToken == null) { + if (other.userToken != null) { + return false; + } + } else if (!userToken.equals(other.userToken)) { + return false; + } + return true; + } + +} diff --git a/src/main/java/it/grid/storm/persistence/model/RequestSummaryDataTO.java b/src/main/java/it/grid/storm/persistence/model/RequestSummaryDataTO.java new file mode 100644 index 000000000..911148f38 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/RequestSummaryDataTO.java @@ -0,0 +1,533 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import java.sql.Timestamp; + +/** + * Class that represents data of an asynchrnous Request, regardless of whether it is a Put, Get or + * Copy, in the Persistence Layer: this is all raw data referring to the request proper, that is, + * String and primitive types. + * + * @author EGRID ICTP + * @version 2.0 + * @date June 2005 + */ +public class RequestSummaryDataTO { + + public static final String PTG_REQUEST_TYPE = "PTG"; + public static final String PTP_REQUEST_TYPE = "PTP"; + public static final String BOL_REQUEST_TYPE = "BOL"; + public static final String COPY_REQUEST_TYPE = "COP"; + + private long id = -1; // id of request in persistence + private String requestType = ""; // request type + private String requestToken = ""; // request token + private String clientDN = ""; // DN that issued request + private String vomsAttributes = ""; // String containing all VOMS attributes + private Timestamp timestamp = null; + + private boolean empty = true; + private String userToken = null; + private Integer retrytime = null; + private Integer pinLifetime = null; + private String spaceToken = null; + private Integer status = null; + private String errstring = null; + private Integer remainingTotalTime = null; + private Integer nbreqfiles = null; + private Integer numOfCompleted = null; + private Integer fileLifetime = null; + private Integer deferredStartTime = null; + private Integer numOfWaiting = null; + private Integer numOfFailed = null; + private Integer remainingDeferredStartTime = null; + + public boolean isEmpty() { + + return empty; + } + + public long primaryKey() { + + return id; + } + + public void setPrimaryKey(long l) { + + empty = false; + id = l; + } + + public String requestType() { + + return requestType; + } + + public void setRequestType(String s) { + + empty = false; + requestType = s; + } + + public String requestToken() { + + return requestToken; + } + + public void setRequestToken(String s) { + + empty = false; + requestToken = s; + } + + public String clientDN() { + + return clientDN; + } + + public void setClientDN(String s) { + + empty = false; + clientDN = s; + } + + public String vomsAttributes() { + + return vomsAttributes; + } + + public void setVomsAttributes(String s) { + + empty = false; + vomsAttributes = s; + } + + public Timestamp timestamp() { + + return timestamp; + } + + public void setTimestamp(Timestamp timestamp) { + + empty = false; + this.timestamp = timestamp; + } + + /** + * @return the userToken + */ + public String getUserToken() { + + return userToken; + } + + /** + * @return the retrytime + */ + public Integer getRetrytime() { + + return retrytime; + } + + /** + * @return the pinLifetime + */ + public Integer getPinLifetime() { + + return pinLifetime; + } + + /** + * @return the spaceToken + */ + public String getSpaceToken() { + + return spaceToken; + } + + /** + * @return the status + */ + public Integer getStatus() { + + return status; + } + + /** + * @return the errstring + */ + public String getErrstring() { + + return errstring; + } + + /** + * @return the remainingTotalTime + */ + public Integer getRemainingTotalTime() { + + return remainingTotalTime; + } + + /** + * @return the nbreqfiles + */ + public Integer getNbreqfiles() { + + return nbreqfiles; + } + + /** + * @return the numOfCompleted + */ + public Integer getNumOfCompleted() { + + return numOfCompleted; + } + + /** + * @return the fileLifetime + */ + public Integer getFileLifetime() { + + return fileLifetime; + } + + /** + * @return the deferredStartTime + */ + public Integer getDeferredStartTime() { + + return deferredStartTime; + } + + /** + * @return the numOfWaiting + */ + public Integer getNumOfWaiting() { + + return numOfWaiting; + } + + /** + * @return the numOfFailed + */ + public Integer getNumOfFailed() { + + return numOfFailed; + } + + /** + * @return the remainingDeferredStartTime + */ + public Integer getRemainingDeferredStartTime() { + + return remainingDeferredStartTime; + } + + public void setUserToken(String userToken) { + + this.userToken = userToken; + } + + public void setRetrytime(Integer retrytime) { + + this.retrytime = retrytime; + + } + + public void setPinLifetime(Integer pinLifetime) { + + this.pinLifetime = pinLifetime; + + } + + public void setSpaceToken(String spaceToken) { + + this.spaceToken = spaceToken; + + } + + public void setStatus(Integer status) { + + this.status = status; + + } + + public void setErrstring(String errstring) { + + this.errstring = errstring; + + } + + public void setRemainingTotalTime(Integer remainingTotalTime) { + + this.remainingTotalTime = remainingTotalTime; + + } + + public void setNbreqfiles(Integer nbreqfiles) { + + this.nbreqfiles = nbreqfiles; + + } + + public void setNumOfCompleted(Integer numOfCompleted) { + + this.numOfCompleted = numOfCompleted; + + } + + public void setFileLifetime(Integer fileLifetime) { + + this.fileLifetime = fileLifetime; + + } + + public void setDeferredStartTime(Integer deferredStartTime) { + + this.deferredStartTime = deferredStartTime; + + } + + public void setNumOfWaiting(Integer numOfWaiting) { + + this.numOfWaiting = numOfWaiting; + + } + + public void setNumOfFailed(Integer numOfFailed) { + + this.numOfFailed = numOfFailed; + + } + + public void setRemainingDeferredStartTime(Integer remainingDeferredStartTime) { + + this.remainingDeferredStartTime = remainingDeferredStartTime; + + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + + StringBuilder builder = new StringBuilder(); + builder.append("RequestSummaryDataTO [id="); + builder.append(id); + builder.append(", requestType="); + builder.append(requestType); + builder.append(", requestToken="); + builder.append(requestToken); + builder.append(", clientDN="); + builder.append(clientDN); + builder.append(", vomsAttributes="); + builder.append(vomsAttributes); + builder.append(", timestamp="); + builder.append(timestamp); + builder.append(", empty="); + builder.append(empty); + builder.append(", userToken="); + builder.append(userToken); + builder.append(", retrytime="); + builder.append(retrytime); + builder.append(", pinLifetime="); + builder.append(pinLifetime); + builder.append(", spaceToken="); + builder.append(spaceToken); + builder.append(", status="); + builder.append(status); + builder.append(", errstring="); + builder.append(errstring); + builder.append(", remainingTotalTime="); + builder.append(remainingTotalTime); + builder.append(", nbreqfiles="); + builder.append(nbreqfiles); + builder.append(", numOfCompleted="); + builder.append(numOfCompleted); + builder.append(", fileLifetime="); + builder.append(fileLifetime); + builder.append(", deferredStartTime="); + builder.append(deferredStartTime); + builder.append(", numOfWaiting="); + builder.append(numOfWaiting); + builder.append(", numOfFailed="); + builder.append(numOfFailed); + builder.append(", remainingDeferredStartTime="); + builder.append(remainingDeferredStartTime); + builder.append("]"); + return builder.toString(); + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#hashCode() + */ + @Override + public int hashCode() { + + final int prime = 31; + int result = 1; + result = prime * result + ((clientDN == null) ? 0 : clientDN.hashCode()); + result = prime * result + (int) (deferredStartTime ^ (deferredStartTime >>> 32)); + result = prime * result + (empty ? 1231 : 1237); + result = prime * result + ((errstring == null) ? 0 : errstring.hashCode()); + result = prime * result + (int) (fileLifetime ^ (fileLifetime >>> 32)); + result = prime * result + (int) (id ^ (id >>> 32)); + result = prime * result + (int) (nbreqfiles ^ (nbreqfiles >>> 32)); + result = prime * result + (int) (numOfCompleted ^ (numOfCompleted >>> 32)); + result = prime * result + (int) (numOfFailed ^ (numOfFailed >>> 32)); + result = prime * result + (int) (numOfWaiting ^ (numOfWaiting >>> 32)); + result = prime * result + (int) (pinLifetime ^ (pinLifetime >>> 32)); + result = + prime * result + (int) (remainingDeferredStartTime ^ (remainingDeferredStartTime >>> 32)); + result = prime * result + (int) (remainingTotalTime ^ (remainingTotalTime >>> 32)); + result = prime * result + ((requestToken == null) ? 0 : requestToken.hashCode()); + result = prime * result + ((requestType == null) ? 0 : requestType.hashCode()); + result = prime * result + (int) (retrytime ^ (retrytime >>> 32)); + result = prime * result + ((spaceToken == null) ? 0 : spaceToken.hashCode()); + result = prime * result + (int) (status ^ (status >>> 32)); + result = prime * result + ((timestamp == null) ? 0 : timestamp.hashCode()); + result = prime * result + ((userToken == null) ? 0 : userToken.hashCode()); + result = prime * result + ((vomsAttributes == null) ? 0 : vomsAttributes.hashCode()); + return result; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#equals(java.lang.Object) + */ + @Override + public boolean equals(Object obj) { + + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + RequestSummaryDataTO other = (RequestSummaryDataTO) obj; + if (clientDN == null) { + if (other.clientDN != null) { + return false; + } + } else if (!clientDN.equals(other.clientDN)) { + return false; + } + if (deferredStartTime != other.deferredStartTime) { + return false; + } + if (empty != other.empty) { + return false; + } + if (errstring == null) { + if (other.errstring != null) { + return false; + } + } else if (!errstring.equals(other.errstring)) { + return false; + } + if (fileLifetime != other.fileLifetime) { + return false; + } + if (id != other.id) { + return false; + } + if (nbreqfiles != other.nbreqfiles) { + return false; + } + if (numOfCompleted != other.numOfCompleted) { + return false; + } + if (numOfFailed != other.numOfFailed) { + return false; + } + if (numOfWaiting != other.numOfWaiting) { + return false; + } + if (pinLifetime != other.pinLifetime) { + return false; + } + if (remainingDeferredStartTime != other.remainingDeferredStartTime) { + return false; + } + if (remainingTotalTime != other.remainingTotalTime) { + return false; + } + if (requestToken == null) { + if (other.requestToken != null) { + return false; + } + } else if (!requestToken.equals(other.requestToken)) { + return false; + } + if (requestType == null) { + if (other.requestType != null) { + return false; + } + } else if (!requestType.equals(other.requestType)) { + return false; + } + if (retrytime != other.retrytime) { + return false; + } + if (spaceToken == null) { + if (other.spaceToken != null) { + return false; + } + } else if (!spaceToken.equals(other.spaceToken)) { + return false; + } + if (status != other.status) { + return false; + } + if (timestamp == null) { + if (other.timestamp != null) { + return false; + } + } else if (!timestamp.equals(other.timestamp)) { + return false; + } + if (userToken == null) { + if (other.userToken != null) { + return false; + } + } else if (!userToken.equals(other.userToken)) { + return false; + } + if (vomsAttributes == null) { + if (other.vomsAttributes != null) { + return false; + } + } else if (!vomsAttributes.equals(other.vomsAttributes)) { + return false; + } + return true; + } + +} diff --git a/src/main/java/it/grid/storm/persistence/model/RequestSummaryTO.java b/src/main/java/it/grid/storm/persistence/model/RequestSummaryTO.java deleted file mode 100644 index 5855875c4..000000000 --- a/src/main/java/it/grid/storm/persistence/model/RequestSummaryTO.java +++ /dev/null @@ -1,265 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.model; - -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TRequestType; - -/** - * This class represents the SummaryData associated with the SRM request, that - * is it contains info about: TRequestToken, TRequsetType, total files in this - * request, number of files in queue, number of files progressing, number of - * files finished, and whether the request is currently suspended. - * - * @author EGRID - ICTP Trieste - * @date March 18th, 2005 - * @version 3.0 - */ -public class RequestSummaryTO { - - private TRequestToken requestToken = null; // TRequestToken of SRM request - private TRequestType requestType = null; // request type of SRM request - private int totalFilesInThisRequest = 0; // total number of files in SRM - // request - private int numOfQueuedRequests = 0; // number of files in SRM request that - // are in queue - private int numOfProgressingRequests = 0; // number of files in SRM request - // that are still in progress - private int numFinished = 0; // number of files in SRM request whose - // processing has completed - private boolean isSuspended = false; // flag that indicates whether the SRM - // request is suspended - - public RequestSummaryTO(TRequestToken requestToken, TRequestType requestType, - int totalFilesInThisRequest, int numOfQueuedRequests, - int numOfProgressingRequests, int numFinished, boolean isSuspended) - throws InvalidRequestSummaryDataAttributesException { - - boolean ok = requestToken != null && requestType != null - && totalFilesInThisRequest >= 0 && numOfQueuedRequests >= 0 - && numOfProgressingRequests >= 0 && numFinished >= 0; - if (!ok) - throw new InvalidRequestSummaryDataAttributesException(requestToken, - requestType, totalFilesInThisRequest, numOfQueuedRequests, - numOfProgressingRequests, numFinished); - this.requestToken = requestToken; - this.requestType = requestType; - this.totalFilesInThisRequest = totalFilesInThisRequest; - this.numOfQueuedRequests = numOfQueuedRequests; - this.numOfProgressingRequests = numOfProgressingRequests; - this.numFinished = numFinished; - this.isSuspended = isSuspended; - } - - /** - * Method that returns the SRM request TRequestToken - */ - public TRequestToken requestToken() { - - return requestToken; - } - - /** - * Method that returns the type of SRM request - */ - public TRequestType requestType() { - - return requestType; - } - - /** - * Method that returns the total number of files in the SRM request - */ - public int totalFilesInThisRequest() { - - return totalFilesInThisRequest; - } - - /** - * Method that returns the number of files in the SRM request that are - * currently in queue. - */ - public int numOfQueuedRequests() { - - return numOfQueuedRequests; - } - - /** - * Method that returns the number of files in the SRM request that are - * currently in progress. - */ - public int numOfProgressingRequests() { - - return numOfProgressingRequests; - } - - /** - * Method that returns the number of files in the SRM request that are - * currently finished. - */ - public int numFinished() { - - return numFinished; - } - - /** - * Method that tells whether the SRM requst is suspended. - */ - public boolean isSuspended() { - - return isSuspended; - } - - /** - * Method that increments the counter for the number of files in queue. - */ - public void incNumOfQueuedRequests() { - - numOfQueuedRequests++; - } - - /** - * Methos used to decrement the counter fo the number of files in queue. - */ - public void decNumOfQueuedRequests() { - - numOfQueuedRequests--; - } - - /** - * Method used to increment the counter for the number of progressing - * requests. - */ - public void incNumOfProgressingRequests() { - - numOfProgressingRequests++; - } - - /** - * Method used to decrement the counter for the number of progressing - * requests. - */ - public void decNumOfProgressingRequests() { - - numOfProgressingRequests--; - } - - /** - * Method used to increment the counter for the number of total files in the - * request. - */ - public void incTotalFilesInThisRequest() { - - totalFilesInThisRequest++; - } - - /** - * Method used to decrement the counter fot the number of total files in this - * request. - */ - public void decTotalFilesInThisRequest() { - - totalFilesInThisRequest--; - } - - /** - * Method used to increment the counter for the processing of files that are - * currently finished. - */ - public void incNumFinished() { - - numFinished++; - } - - /** - * Method used to decrement the counter that keeps track of the number of - * files that are currently finished. - */ - public void decNumFinished() { - - numFinished--; - } - - /** - * Method used to set the SRM flag that signals the processing of the request - * this RequestSummaryData applies to, is suspended. - */ - public void srmSuspend() { - - isSuspended = true; - } - - /** - * Method used to set the SRM flag that signals the procesing of the request - * this RequestSummaryData applies to, is _not_ suspended - */ - public void srmUnSuspend() { - - isSuspended = false; - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("SummaryRequestData"); - sb.append("; requestToken="); - sb.append(requestToken); - sb.append("; requestType="); - sb.append(requestType); - sb.append("; totalFilesInThisRequest="); - sb.append(totalFilesInThisRequest); - sb.append("; numOfQueuedRequests="); - sb.append(numOfQueuedRequests); - sb.append("; numOfProgressingRequests="); - sb.append(numOfProgressingRequests); - sb.append("; numFinished="); - sb.append(numFinished); - sb.append("; isSuspended="); - sb.append(isSuspended); - sb.append("."); - return sb.toString(); - } - - public int hashCode() { - - int hash = 17; - hash = 37 * hash + requestToken.hashCode(); - hash = 37 * hash + requestType.hashCode(); - hash = 37 * hash + totalFilesInThisRequest; - hash = 37 * hash + numOfQueuedRequests; - hash = 37 * hash + numOfProgressingRequests; - hash = 37 * hash + numFinished; - hash = (isSuspended) ? (37 * hash + 1) : (37 * hash + 0); - return hash; - } - - public boolean equals(Object o) { - - if (o == this) - return true; - if (!(o instanceof RequestSummaryTO)) - return false; - RequestSummaryTO rsd = (RequestSummaryTO) o; - return requestToken.equals(rsd.requestToken) - && requestType.equals(rsd.requestType) - && (totalFilesInThisRequest == rsd.totalFilesInThisRequest) - && (numOfQueuedRequests == rsd.numOfQueuedRequests) - && (numOfProgressingRequests == rsd.numOfProgressingRequests) - && (numFinished == rsd.numFinished) && (isSuspended == rsd.isSuspended); - } -} diff --git a/src/main/java/it/grid/storm/persistence/model/ResourceRuleData.java b/src/main/java/it/grid/storm/persistence/model/ResourceRuleData.java deleted file mode 100644 index f517b4545..000000000 --- a/src/main/java/it/grid/storm/persistence/model/ResourceRuleData.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.model; - -public class ResourceRuleData { -} diff --git a/src/main/java/it/grid/storm/persistence/model/SQLHelper.java b/src/main/java/it/grid/storm/persistence/model/SQLHelper.java new file mode 100644 index 000000000..ed2797a67 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/SQLHelper.java @@ -0,0 +1,72 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import it.grid.storm.persistence.util.helper.MySqlFormat; +import it.grid.storm.persistence.util.helper.SQLFormat; + +public abstract class SQLHelper { + + private final SQLFormat formatter = new MySqlFormat(); + + public String format(Object value) { + + return formatter.format(value); + } + + /** + * + * @param value boolean + * @return String + */ + public String format(boolean value) { + + return formatter.format(Boolean.valueOf(value)); + } + + /** + * + * @param value int + * @return String + */ + public String format(int value) throws NumberFormatException { + + return formatter.format(Integer.valueOf(value)); + } + + /** + * + * @param value long + * @return String + */ + public String format(long value) throws NumberFormatException { + + return formatter.format(Long.valueOf(value)); + } + + /** + * + * @param date Date + * @return String + */ + public String format(java.util.Date date) { + + return formatter.format(date); + } + +} diff --git a/src/main/java/it/grid/storm/persistence/model/StorageSpaceTO.java b/src/main/java/it/grid/storm/persistence/model/StorageSpaceTO.java index dd2e68809..6c640d559 100644 --- a/src/main/java/it/grid/storm/persistence/model/StorageSpaceTO.java +++ b/src/main/java/it/grid/storm/persistence/model/StorageSpaceTO.java @@ -43,475 +43,466 @@ */ public class StorageSpaceTO implements Serializable, Comparable { - private static final long serialVersionUID = -87317982494792808L; - - private static final Logger log = LoggerFactory - .getLogger(StorageSpaceTO.class); - - // ----- PRIMARY KEY ----// - private Long storageSpaceId = null; // Persistence Object IDentifier - - // ----- FIELDS ----// - private String ownerName = null; - private String voName = null; - private String spaceType = null; // `SPACE_TYPE` VARCHAR(10) NOT NULL default - // '' - private String alias = null; - private String spaceToken = null; - private String spaceFile = null; // `SPACE_FILE` VARCHAR(145) NOT NULL default - // '' - private long lifetime = -1L; // `LIFETIME` bigint(20) default NULL - private String storageInfo = null;// `STORAGE_INFO` VARCHAR(255) default NULL - private Date created = new Date(); - - private long totalSize = 0L; // `TOTAL_SIZE` bigint(20) NOT NULL default '0' - private long guaranteedSize = 0L; // `GUAR_SIZE` bigint(20) NOT NULL default - // '0' - private long freeSize = 0L; // `FREE_SIZE` bigint(20) default NULL - - private long usedSize = -1L; // `USED_SIZE` bigint(20) NOT NULL default '-1' - private long busySize = -1L; // `BUSY_SIZE` bigint(20) NOT NULL default '-1' - private long unavailableSize = -1L; // `UNAVAILABLE_SIZE` bigint(20) NOT NULL - // default '-1' - private long availableSize = -1L; // `AVAILABLE_SIZE` bigint(20) NOT NULL - // default '-1' - private long reservedSize = -1L; // `RESERVED_SIZE` bigint(20) NOT NULL - // default '-1' - private Date updateTime = null; - - // ********************** Constructor methods ********************** // - - /** - * No-arg constructor for JavaBean tools. - */ - public StorageSpaceTO() { - - super(); - } - - /** - * Constructor from Domain Object StorageSpaceData - * - * @param spaceData - * SpaceData - */ - public StorageSpaceTO(StorageSpaceData spaceData) { - - if (spaceData != null) { - log.debug("Building StorageSpaceTO with {}" , spaceData); - if (spaceData.getOwner() != null) { - ownerName = spaceData.getOwner().getDn(); - voName = getVOName(spaceData.getOwner()); - } - if (spaceData.getSpaceType() != null) { - spaceType = (spaceData.getSpaceType()).getValue(); - } - alias = spaceData.getSpaceTokenAlias(); - if (spaceData.getSpaceToken() != null) { - spaceToken = spaceData.getSpaceToken().getValue(); - } - spaceFile = spaceData.getSpaceFileNameString(); - if (spaceData.getTotalSpaceSize() != null) { - totalSize = spaceData.getTotalSpaceSize().value(); - } - if (spaceData.getTotalGuaranteedSize() != null) { - guaranteedSize = spaceData.getTotalGuaranteedSize().value(); - } - if (spaceData.getAvailableSpaceSize() != null) { - availableSize = spaceData.getAvailableSpaceSize().value(); - } - if (spaceData.getUsedSpaceSize() != null) { - usedSize = spaceData.getUsedSpaceSize().value(); - } - if (spaceData.getFreeSpaceSize() != null) { - freeSize = spaceData.getFreeSpaceSize().value(); - } - if (spaceData.getUnavailableSpaceSize() != null) { - unavailableSize = spaceData.getUnavailableSpaceSize().value(); - } - if (spaceData.getBusySpaceSize() != null) { - busySize = spaceData.getBusySpaceSize().value(); - } - if (spaceData.getReservedSpaceSize() != null) { - reservedSize = spaceData.getReservedSpaceSize().value(); - } - if (spaceData.getLifeTime() != null) { - lifetime = spaceData.getLifeTime().value(); - } - if (spaceData.getStorageInfo() != null) { - storageInfo = spaceData.getStorageInfo().getValue(); - } - if (spaceData.getCreationDate() != null) { - created = spaceData.getCreationDate(); - } - } - } - - // ************ HELPER Method *************** // - private String getVOName(GridUserInterface maker) { + private static final long serialVersionUID = -87317982494792808L; + + private static final Logger log = LoggerFactory.getLogger(StorageSpaceTO.class); + + // ----- PRIMARY KEY ----// + private Long storageSpaceId = null; // Persistence Object IDentifier + + // ----- FIELDS ----// + private String ownerName = null; + private String voName = null; + private String spaceType = null; // `SPACE_TYPE` VARCHAR(10) NOT NULL default + // '' + private String alias = null; + private String spaceToken = null; + private String spaceFile = null; // `SPACE_FILE` VARCHAR(145) NOT NULL default + // '' + private long lifetime = -1L; // `LIFETIME` bigint(20) default NULL + private String storageInfo = null;// `STORAGE_INFO` VARCHAR(255) default NULL + private Date created = new Date(); + + private long totalSize = 0L; // `TOTAL_SIZE` bigint(20) NOT NULL default '0' + private long guaranteedSize = 0L; // `GUAR_SIZE` bigint(20) NOT NULL default + // '0' + private long freeSize = 0L; // `FREE_SIZE` bigint(20) default NULL + + private long usedSize = -1L; // `USED_SIZE` bigint(20) NOT NULL default '-1' + private long busySize = -1L; // `BUSY_SIZE` bigint(20) NOT NULL default '-1' + private long unavailableSize = -1L; // `UNAVAILABLE_SIZE` bigint(20) NOT NULL + // default '-1' + private long availableSize = -1L; // `AVAILABLE_SIZE` bigint(20) NOT NULL + // default '-1' + private long reservedSize = -1L; // `RESERVED_SIZE` bigint(20) NOT NULL + // default '-1' + private Date updateTime = null; + + // ********************** Constructor methods ********************** // + + /** + * No-arg constructor for JavaBean tools. + */ + public StorageSpaceTO() { + + super(); + } + + /** + * Constructor from Domain Object StorageSpaceData + * + * @param spaceData SpaceData + */ + public StorageSpaceTO(StorageSpaceData spaceData) { + + if (spaceData != null) { + log.debug("Building StorageSpaceTO with {}", spaceData); + if (spaceData.getOwner() != null) { + ownerName = spaceData.getOwner().getDn(); + voName = getVOName(spaceData.getOwner()); + } + if (spaceData.getSpaceType() != null) { + spaceType = (spaceData.getSpaceType()).getValue(); + } + alias = spaceData.getSpaceTokenAlias(); + if (spaceData.getSpaceToken() != null) { + spaceToken = spaceData.getSpaceToken().getValue(); + } + spaceFile = spaceData.getSpaceFileNameString(); + if (spaceData.getTotalSpaceSize() != null) { + totalSize = spaceData.getTotalSpaceSize().value(); + } + if (spaceData.getTotalGuaranteedSize() != null) { + guaranteedSize = spaceData.getTotalGuaranteedSize().value(); + } + if (spaceData.getAvailableSpaceSize() != null) { + availableSize = spaceData.getAvailableSpaceSize().value(); + } + if (spaceData.getUsedSpaceSize() != null) { + usedSize = spaceData.getUsedSpaceSize().value(); + } + if (spaceData.getFreeSpaceSize() != null) { + freeSize = spaceData.getFreeSpaceSize().value(); + } + if (spaceData.getUnavailableSpaceSize() != null) { + unavailableSize = spaceData.getUnavailableSpaceSize().value(); + } + if (spaceData.getBusySpaceSize() != null) { + busySize = spaceData.getBusySpaceSize().value(); + } + if (spaceData.getReservedSpaceSize() != null) { + reservedSize = spaceData.getReservedSpaceSize().value(); + } + if (spaceData.getLifeTime() != null) { + lifetime = spaceData.getLifeTime().value(); + } + if (spaceData.getStorageInfo() != null) { + storageInfo = spaceData.getStorageInfo().getValue(); + } + if (spaceData.getCreationDate() != null) { + created = spaceData.getCreationDate(); + } + } + } - String voStr = VO.makeNoVo().getValue(); - if (maker instanceof AbstractGridUser) { - voStr = ((AbstractGridUser) maker).getVO().getValue(); - } - return voStr; - } + // ************ HELPER Method *************** // + private String getVOName(GridUserInterface maker) { - // ********************** Accessor Methods ********************** // + String voStr = VO.makeNoVo().getValue(); + if (maker instanceof AbstractGridUser) { + voStr = ((AbstractGridUser) maker).getVO().getValue(); + } + return voStr; + } - public Long getStorageSpaceId() { + // ********************** Accessor Methods ********************** // - return storageSpaceId; - } + public Long getStorageSpaceId() { - public void setStorageSpaceId(Long id) { + return storageSpaceId; + } - storageSpaceId = id; - } + public void setStorageSpaceId(Long id) { - // ------------------------------------- + storageSpaceId = id; + } - public String getOwnerName() { + // ------------------------------------- - return ownerName; - } + public String getOwnerName() { - public void setOwnerName(String ownerName) { + return ownerName; + } - this.ownerName = ownerName; - } + public void setOwnerName(String ownerName) { - // ------------------------------------- + this.ownerName = ownerName; + } - public String getVoName() { + // ------------------------------------- - return voName; - } + public String getVoName() { - public void setVoName(String voName) { + return voName; + } - this.voName = voName; - } + public void setVoName(String voName) { - // ------------------------------------- + this.voName = voName; + } - public String getSpaceType() { + // ------------------------------------- - return spaceType; - } + public String getSpaceType() { - public void setSpaceType(String spaceType) { + return spaceType; + } - this.spaceType = spaceType; - } + public void setSpaceType(String spaceType) { - // ------------------------------------- + this.spaceType = spaceType; + } - public long getGuaranteedSize() { + // ------------------------------------- - return guaranteedSize; - } + public long getGuaranteedSize() { - public void setGuaranteedSize(long guaranteedSize) { + return guaranteedSize; + } - this.guaranteedSize = guaranteedSize; - } + public void setGuaranteedSize(long guaranteedSize) { - // ------------------------------------- + this.guaranteedSize = guaranteedSize; + } - public long getTotalSize() { + // ------------------------------------- - return totalSize; - } + public long getTotalSize() { - public void setTotalSize(long totalSize) { + return totalSize; + } - this.totalSize = totalSize; - } + public void setTotalSize(long totalSize) { - // ------------------------------------- + this.totalSize = totalSize; + } - public void setSpaceToken(String spaceToken) { + // ------------------------------------- - this.spaceToken = spaceToken; - } + public void setSpaceToken(String spaceToken) { - public String getSpaceToken() { + this.spaceToken = spaceToken; + } - return spaceToken; - } + public String getSpaceToken() { - // ------------------------------------- + return spaceToken; + } - public void setAlias(String alias) { + // ------------------------------------- - this.alias = alias; - } + public void setAlias(String alias) { - public String getAlias() { + this.alias = alias; + } - return alias; - } + public String getAlias() { - // ------------------------------------- + return alias; + } - public void setSpaceFile(String spaceFile) { + // ------------------------------------- - this.spaceFile = spaceFile; - } + public void setSpaceFile(String spaceFile) { - public String getSpaceFile() { + this.spaceFile = spaceFile; + } - return spaceFile; - } + public String getSpaceFile() { - // ------------------------------------- + return spaceFile; + } - public long getLifetime() { + // ------------------------------------- - return lifetime; - } + public long getLifetime() { - public void setLifetime(long lifetime) { + return lifetime; + } - this.lifetime = lifetime; - } + public void setLifetime(long lifetime) { - // ------------------------------------- + this.lifetime = lifetime; + } - public String getStorageInfo() { + // ------------------------------------- - return storageInfo; - } + public String getStorageInfo() { - public void setStorageInfo(String storageInfo) { + return storageInfo; + } - this.storageInfo = storageInfo; - } + public void setStorageInfo(String storageInfo) { - // ------------------------------------- + this.storageInfo = storageInfo; + } - public Date getCreated() { + // ------------------------------------- - return created; - } + public Date getCreated() { - public void setCreated(Date date) { + return created; + } - created = date; - } + public void setCreated(Date date) { - // ------------------------------------- + created = date; + } - /** - * @return the freeSize - */ - public final long getFreeSize() { + // ------------------------------------- - return freeSize; - } + /** + * @return the freeSize + */ + public final long getFreeSize() { - /** - * @param freeSize - * the freeSize to set - */ - public final void setFreeSize(long freeSize) { + return freeSize; + } - this.freeSize = freeSize; - } + /** + * @param freeSize the freeSize to set + */ + public final void setFreeSize(long freeSize) { - /** - * @return the usedSize - */ - public final long getUsedSize() { + this.freeSize = freeSize; + } - return usedSize; - } + /** + * @return the usedSize + */ + public final long getUsedSize() { - /** - * @param usedSize - * the usedSize to set - */ - public final void setUsedSize(long usedSize) { + return usedSize; + } - this.usedSize = usedSize; - } + /** + * @param usedSize the usedSize to set + */ + public final void setUsedSize(long usedSize) { - /** - * @return the busySize - */ - public final long getBusySize() { + this.usedSize = usedSize; + } - return busySize; - } + /** + * @return the busySize + */ + public final long getBusySize() { - /** - * @param busySize - * the busySize to set - */ - public final void setBusySize(long busySize) { + return busySize; + } - this.busySize = busySize; - } + /** + * @param busySize the busySize to set + */ + public final void setBusySize(long busySize) { - /** - * @return the unavailableSize - */ - public final long getUnavailableSize() { + this.busySize = busySize; + } - return unavailableSize; - } + /** + * @return the unavailableSize + */ + public final long getUnavailableSize() { - /** - * @param unavailableSize - * the unavailableSize to set - */ - public final void setUnavailableSize(long unavailableSize) { + return unavailableSize; + } - this.unavailableSize = unavailableSize; - } - - /** - * @return the reservedSize - */ - public final long getReservedSize() { - - return reservedSize; - } - - /** - * @param reservedSize - * the reservedSize to set - */ - public final void setReservedSize(long reservedSize) { - - this.reservedSize = reservedSize; - } - - /** - * @param availableSize - * the availableSize to set - */ - public void setAvailableSize(long availableSize) { - - this.availableSize = availableSize; - } - - /** - * @return the availableSize - */ - public long getAvailableSize() { - - return availableSize; - } - - // ********************** Common Methods ********************** // - - /** - * @param updateTime - * the updateTime to set - */ - public void setUpdateTime(Date updateTime) { - - this.updateTime = updateTime; - } - - /** - * @return the updateTime - */ - public Date getUpdateTime() { - - return updateTime; - } - - @Override - public boolean equals(Object o) { - - if (o == null) { - return false; - } - if (o instanceof StorageSpaceTO) { - if (this == o) { - return true; - } - final StorageSpaceTO storageSpace = (StorageSpaceTO) o; - if (!spaceToken.equals(storageSpace.getSpaceToken())) { - return false; - } - if (!spaceFile.equals(storageSpace.getSpaceFile())) { - return false; - } - return true; - } else { - return false; - } - } - - @Override - public int hashCode() { - - int hash = 17; - hash = 37 * hash + spaceToken.hashCode(); - return hash; - - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append(" ==== STORAGE SPACE (token=" + spaceToken + ") ==== \n"); - sb.append(" STORAGE SPACE ID = " + storageSpaceId); - sb.append("\n"); - sb.append(" OWNER USER NAME = " + ownerName); - sb.append("\n"); - sb.append(" OWNER VO NAME = " + voName); - sb.append("\n"); - sb.append(" SPACE ALIAS NAME = " + alias); - sb.append("\n"); - sb.append(" SPACE TYPE = " + spaceType); - sb.append("\n"); - sb.append(" SPACE TOKEN = " + spaceToken); - sb.append("\n"); - sb.append(" SPACE FILE = " + spaceFile); - sb.append("\n"); - sb.append(" CREATED = " + created); - sb.append("\n"); - sb.append(" TOTAL SIZE = " + totalSize); - sb.append("\n"); - sb.append(" GUARANTEED SIZE = " + guaranteedSize); - sb.append("\n"); - sb.append(" FREE SIZE = " + freeSize); - sb.append("\n"); - sb.append(" USED SIZE = " + usedSize); - sb.append("\n"); - sb.append(" BUSY SIZE = " + busySize); - sb.append("\n"); - sb.append(" AVAILABLE = " + availableSize); - sb.append("\n"); - sb.append(" RESERVED = " + reservedSize); - sb.append("\n"); - sb.append(" UNAVAILABLE = " + unavailableSize); - sb.append("\n"); - sb.append(" LIFETIME (sec) = " + lifetime); - sb.append("\n"); - sb.append(" STORAGE INFO = " + storageInfo); - sb.append("\n"); - sb.append(" UPDATE TIME = " + updateTime); - sb.append("\n"); - sb.append(" NR STOR_FILES = "); - sb.append("\n"); - return sb.toString(); - } - - @Override - public int compareTo(StorageSpaceTO o) { - - if (o instanceof StorageSpaceTO) { - return getCreated().compareTo(((StorageSpaceTO) o).getCreated()); - } - return 0; - } - - // ********************** Business Methods ********************** // + /** + * @param unavailableSize the unavailableSize to set + */ + public final void setUnavailableSize(long unavailableSize) { + + this.unavailableSize = unavailableSize; + } + + /** + * @return the reservedSize + */ + public final long getReservedSize() { + + return reservedSize; + } + + /** + * @param reservedSize the reservedSize to set + */ + public final void setReservedSize(long reservedSize) { + + this.reservedSize = reservedSize; + } + + /** + * @param availableSize the availableSize to set + */ + public void setAvailableSize(long availableSize) { + + this.availableSize = availableSize; + } + + /** + * @return the availableSize + */ + public long getAvailableSize() { + + return availableSize; + } + + // ********************** Common Methods ********************** // + + /** + * @param updateTime the updateTime to set + */ + public void setUpdateTime(Date updateTime) { + + this.updateTime = updateTime; + } + + /** + * @return the updateTime + */ + public Date getUpdateTime() { + + return updateTime; + } + + @Override + public boolean equals(Object o) { + + if (o == null) { + return false; + } + if (o instanceof StorageSpaceTO) { + if (this == o) { + return true; + } + final StorageSpaceTO storageSpace = (StorageSpaceTO) o; + if (!spaceToken.equals(storageSpace.getSpaceToken())) { + return false; + } + if (!spaceFile.equals(storageSpace.getSpaceFile())) { + return false; + } + return true; + } else { + return false; + } + } + + @Override + public int hashCode() { + + int hash = 17; + hash = 37 * hash + spaceToken.hashCode(); + return hash; + + } + + @Override + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append(" ==== STORAGE SPACE (token=" + spaceToken + ") ==== \n"); + sb.append(" STORAGE SPACE ID = " + storageSpaceId); + sb.append("\n"); + sb.append(" OWNER USER NAME = " + ownerName); + sb.append("\n"); + sb.append(" OWNER VO NAME = " + voName); + sb.append("\n"); + sb.append(" SPACE ALIAS NAME = " + alias); + sb.append("\n"); + sb.append(" SPACE TYPE = " + spaceType); + sb.append("\n"); + sb.append(" SPACE TOKEN = " + spaceToken); + sb.append("\n"); + sb.append(" SPACE FILE = " + spaceFile); + sb.append("\n"); + sb.append(" CREATED = " + created); + sb.append("\n"); + sb.append(" TOTAL SIZE = " + totalSize); + sb.append("\n"); + sb.append(" GUARANTEED SIZE = " + guaranteedSize); + sb.append("\n"); + sb.append(" FREE SIZE = " + freeSize); + sb.append("\n"); + sb.append(" USED SIZE = " + usedSize); + sb.append("\n"); + sb.append(" BUSY SIZE = " + busySize); + sb.append("\n"); + sb.append(" AVAILABLE = " + availableSize); + sb.append("\n"); + sb.append(" RESERVED = " + reservedSize); + sb.append("\n"); + sb.append(" UNAVAILABLE = " + unavailableSize); + sb.append("\n"); + sb.append(" LIFETIME (sec) = " + lifetime); + sb.append("\n"); + sb.append(" STORAGE INFO = " + storageInfo); + sb.append("\n"); + sb.append(" UPDATE TIME = " + updateTime); + sb.append("\n"); + sb.append(" NR STOR_FILES = "); + sb.append("\n"); + return sb.toString(); + } + + @Override + public int compareTo(StorageSpaceTO o) { + + if (o instanceof StorageSpaceTO) { + return getCreated().compareTo(((StorageSpaceTO) o).getCreated()); + } + return 0; + } + + // ********************** Business Methods ********************** // } diff --git a/src/main/java/it/grid/storm/catalogs/SurlMultyOperationRequestData.java b/src/main/java/it/grid/storm/persistence/model/SurlMultyOperationRequestData.java similarity index 96% rename from src/main/java/it/grid/storm/catalogs/SurlMultyOperationRequestData.java rename to src/main/java/it/grid/storm/persistence/model/SurlMultyOperationRequestData.java index cc28d7818..8bcbd499f 100644 --- a/src/main/java/it/grid/storm/catalogs/SurlMultyOperationRequestData.java +++ b/src/main/java/it/grid/storm/persistence/model/SurlMultyOperationRequestData.java @@ -1,6 +1,7 @@ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; import it.grid.storm.srm.types.TRequestToken; import it.grid.storm.srm.types.TReturnStatus; import it.grid.storm.srm.types.TSURL; @@ -46,8 +47,7 @@ public synchronized void store() { stored = true; } - private static Map buildSurlStatusMap(TSURL surl, - TReturnStatus status) { + private static Map buildSurlStatusMap(TSURL surl, TReturnStatus status) { if (surl == null || status == null) { throw new IllegalArgumentException( diff --git a/src/main/java/it/grid/storm/persistence/model/SurlRequestData.java b/src/main/java/it/grid/storm/persistence/model/SurlRequestData.java new file mode 100644 index 000000000..735ff0450 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/SurlRequestData.java @@ -0,0 +1,264 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import java.util.Map; + +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TStatusCode; + +/** + * @author Michele Dibenedetto + * + */ +public abstract class SurlRequestData implements RequestData { + + protected TSURL SURL; + protected TReturnStatus status; + + public SurlRequestData(TSURL toSURL, TReturnStatus status) + throws InvalidSurlRequestDataAttributesException { + + if (toSURL == null || status == null || status.getStatusCode() == null) { + throw new InvalidSurlRequestDataAttributesException(toSURL, status); + } + this.SURL = toSURL; + this.status = status; + } + + /** + * Method that returns the TURL for this chunk of the srm request. + */ + @Override + public final TSURL getSURL() { + + return SURL; + } + + /** + * Method that returns the status for this chunk of the srm request. + */ + @Override + public final TReturnStatus getStatus() { + + return status; + } + + /** + * Method used to set the Status associated to this chunk. If status is null, then nothing gets + * set! + */ + public void setStatus(TReturnStatus status) { + + if (status != null) { + this.status = status; + } + } + + protected void setStatus(TStatusCode statusCode, String explanation) { + + if (explanation == null) { + status = new TReturnStatus(statusCode); + } else { + status = new TReturnStatus(statusCode, explanation); + } + } + + /** + * Method that sets the status of this request to SRM_REQUEST_QUEUED; it needs the explanation + * String which describes the situation in greater detail; if a null is passed, then an empty + * String is used as explanation. + */ + @Override + public final void changeStatusSRM_REQUEST_QUEUED(String explanation) { + + setStatus(TStatusCode.SRM_REQUEST_QUEUED, explanation); + } + + /** + * Method that sets the status of this request to SRM_REQUEST_INPROGRESS; it needs the explanation + * String which describes the situation in greater detail; if a null is passed, then an empty + * String is used as explanation. + */ + @Override + public final void changeStatusSRM_REQUEST_INPROGRESS(String explanation) { + + setStatus(TStatusCode.SRM_REQUEST_INPROGRESS, explanation); + } + + /** + * Method that sets the status of this request to SRM_SUCCESS; it needs the explanation String + * which describes the situation in greater detail; if a null is passed, then an empty String is + * used as explanation. + */ + @Override + public final void changeStatusSRM_SUCCESS(String explanation) { + + setStatus(TStatusCode.SRM_SUCCESS, explanation); + } + + /** + * Method that sets the status of this request to SRM_INTERNAL_ERROR; it needs the explanation + * String which describes the situation in greater detail; if a null is passed, then an empty + * String is used as explanation. + */ + @Override + public final void changeStatusSRM_INTERNAL_ERROR(String explanation) { + + setStatus(TStatusCode.SRM_INTERNAL_ERROR, explanation); + } + + /** + * Method that sets the status of this request to SRM_INVALID_REQUEST; it needs the explanation + * String which describes the situation in greater detail; if a null is passed, then an empty + * String is used as explanation. + */ + @Override + public final void changeStatusSRM_INVALID_REQUEST(String explanation) { + + setStatus(TStatusCode.SRM_INVALID_REQUEST, explanation); + } + + /** + * Method that sets the status of this request to SRM_AUTHORIZATION_FAILURE; it needs the + * explanation String which describes the situation in greater detail; if a null is passed, then + * an empty String is used as explanation. + */ + @Override + public final void changeStatusSRM_AUTHORIZATION_FAILURE(String explanation) { + + setStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, explanation); + } + + /** + * Method that sets the status of this request to SRM_ABORTED; it needs the explanation String + * which describes the situation in greater detail; if a null is passed, then an empty String is + * used as explanation. + */ + @Override + public final void changeStatusSRM_ABORTED(String explanation) { + + setStatus(TStatusCode.SRM_ABORTED, explanation); + } + + @Override + public final void changeStatusSRM_FILE_BUSY(String explanation) { + + setStatus(TStatusCode.SRM_FILE_BUSY, explanation); + } + + @Override + public final void changeStatusSRM_INVALID_PATH(String explanation) { + + setStatus(TStatusCode.SRM_INVALID_PATH, explanation); + } + + @Override + public final void changeStatusSRM_NOT_SUPPORTED(String explanation) { + + setStatus(TStatusCode.SRM_NOT_SUPPORTED, explanation); + } + + @Override + public final void changeStatusSRM_FAILURE(String explanation) { + + setStatus(TStatusCode.SRM_FAILURE, explanation); + } + + @Override + public final void changeStatusSRM_SPACE_LIFETIME_EXPIRED(String explanation) { + + setStatus(TStatusCode.SRM_SPACE_LIFETIME_EXPIRED, explanation); + } + + @Override + public String display(Map map) { + + // nonsense method + return ""; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#hashCode() + */ + @Override + public int hashCode() { + + final int prime = 31; + int result = 1; + result = prime * result + ((SURL == null) ? 0 : SURL.hashCode()); + result = prime * result + ((status == null) ? 0 : status.hashCode()); + return result; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#equals(java.lang.Object) + */ + @Override + public boolean equals(Object obj) { + + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + SurlRequestData other = (SurlRequestData) obj; + if (SURL == null) { + if (other.SURL != null) { + return false; + } + } else if (!SURL.equals(other.SURL)) { + return false; + } + if (status == null) { + if (other.status != null) { + return false; + } + } else if (!status.equals(other.status)) { + return false; + } + return true; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + + StringBuilder builder = new StringBuilder(); + builder.append("SurlRequestData [SURL="); + builder.append(SURL); + builder.append(", status="); + builder.append(status); + builder.append("]"); + return builder.toString(); + } +} diff --git a/src/main/java/it/grid/storm/catalogs/SynchMultyOperationRequestData.java b/src/main/java/it/grid/storm/persistence/model/SynchMultyOperationRequestData.java similarity index 51% rename from src/main/java/it/grid/storm/catalogs/SynchMultyOperationRequestData.java rename to src/main/java/it/grid/storm/persistence/model/SynchMultyOperationRequestData.java index c12c524f0..01581e03c 100644 --- a/src/main/java/it/grid/storm/catalogs/SynchMultyOperationRequestData.java +++ b/src/main/java/it/grid/storm/persistence/model/SynchMultyOperationRequestData.java @@ -1,10 +1,10 @@ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; import it.grid.storm.srm.types.TRequestToken; public interface SynchMultyOperationRequestData extends RequestData { - public TRequestToken getGeneratedRequestToken(); + public TRequestToken getGeneratedRequestToken(); - public void store(); + public void store(); } diff --git a/src/main/java/it/grid/storm/persistence/model/TapeRecallTO.java b/src/main/java/it/grid/storm/persistence/model/TapeRecallTO.java index edb1a5fe8..af6943a21 100644 --- a/src/main/java/it/grid/storm/persistence/model/TapeRecallTO.java +++ b/src/main/java/it/grid/storm/persistence/model/TapeRecallTO.java @@ -21,15 +21,11 @@ import java.util.Calendar; import java.util.Date; import java.util.GregorianCalendar; -import java.util.Random; import java.util.UUID; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static it.grid.storm.persistence.model.TapeRecallTO.RecallTaskType.BOL; -import static it.grid.storm.persistence.model.TapeRecallTO.RecallTaskType.PTG; - import com.fasterxml.jackson.annotation.JsonIgnore; import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException; @@ -38,423 +34,398 @@ public class TapeRecallTO implements Serializable, Comparable { - public enum RecallTaskType { - - PTG, BOL, BACK, RCLL; - } - - private static final Logger log = LoggerFactory.getLogger(TapeRecallTO.class); + public enum RecallTaskType { - private static final long serialVersionUID = -2907739786996767167L; + PTG, BOL, BACK, RCLL; + } - public static final String START_CHAR = ""; - public static final char SEPARATOR_CHAR = '\u0009'; - public static final String DATE_FORMAT = "dd-MM-yyyy HH.mm.ss"; + private static final Logger log = LoggerFactory.getLogger(TapeRecallTO.class); - private UUID taskId = null; - private TRequestToken requestToken = null; - private RecallTaskType requestType = null; - private String fileName = null; - private String userID = null; - private String voName = null; - private int pinLifetime = 0; - private TapeRecallStatus status = TapeRecallStatus.QUEUED; - private int retryAttempt = 0; - private Date insertionInstant = null; - private Date inProgressInstant = null; - private Date finalStateInstant = null; - private Date deferredRecallInstant = null; - private UUID groupTaskId = null; + private static final long serialVersionUID = -2907739786996767167L; - private final Calendar endOfTheWorld = new GregorianCalendar(2012, Calendar.DECEMBER, 21); + public static final String START_CHAR = ""; + public static final char SEPARATOR_CHAR = '\u0009'; + public static final String DATE_FORMAT = "dd-MM-yyyy HH.mm.ss"; - public static TapeRecallTO createRandom(Date date, String voName) { + private UUID taskId = null; + private TRequestToken requestToken = null; + private RecallTaskType requestType = null; + private String fileName = null; + private String userID = null; + private String voName = null; + private int pinLifetime = 0; + private TapeRecallStatus status = TapeRecallStatus.QUEUED; + private int retryAttempt = 0; + private Date insertionInstant = null; + private Date inProgressInstant = null; + private Date finalStateInstant = null; + private Date deferredRecallInstant = null; + private UUID groupTaskId = null; - TapeRecallTO result = new TapeRecallTO(); - Random r = new Random(); - result.setFileName("/root/" + voName + "/test/" + r.nextInt(1001)); - result.setRequestToken(TRequestToken.getRandom()); - if (r.nextInt(2) == 0) { - result.setRequestType(BOL); - } else { - result.setRequestType(PTG); - } - result.setUserID("FakeId"); - result.setRetryAttempt(0); - result.setPinLifetime(r.nextInt(1001)); - result.setVoName(voName); - result.setInsertionInstant(date); - int deferred = r.nextInt(2); - Date deferredRecallTime = new Date(date.getTime() + (deferred * (long) Math.random())); - result.setDeferredRecallInstant(deferredRecallTime); - result.setGroupTaskId(UUID.randomUUID()); - return result; - } + private final Calendar endOfTheWorld = new GregorianCalendar(2012, Calendar.DECEMBER, 21); - /* - * Implementing the natural order (by age) - */ - public int compareTo(TapeRecallTO arg0) { + /* + * Implementing the natural order (by age) + */ + public int compareTo(TapeRecallTO arg0) { - if (arg0 == null) { - return 0; - } - return insertionInstant.compareTo(arg0.getInsertionInstant()); - } + if (arg0 == null) { + return 0; + } + return insertionInstant.compareTo(arg0.getInsertionInstant()); + } - public Date getDeferredRecallInstant() { + public Date getDeferredRecallInstant() { - return deferredRecallInstant; - } + return deferredRecallInstant; + } - public String getFileName() { + public String getFileName() { - return fileName; - } + return fileName; + } - public Date getInsertionInstant() { + public Date getInsertionInstant() { - return insertionInstant; - } + return insertionInstant; + } - public Date getInProgressInstant() { + public Date getInProgressInstant() { - return inProgressInstant; - } + return inProgressInstant; + } - public Date getFinalStateInstant() { + public Date getFinalStateInstant() { - return finalStateInstant; - } + return finalStateInstant; + } - public int getPinLifetime() { + public int getPinLifetime() { - return pinLifetime; - } + return pinLifetime; + } - public TapeRecallStatus getStatus() { + public TapeRecallStatus getStatus() { - return status; - } + return status; + } - /** - * RequestToken is the primary key of the table - * - * @return - */ - public TRequestToken getRequestToken() { + /** + * RequestToken is the primary key of the table + * + * @return + */ + public TRequestToken getRequestToken() { - return requestToken; - } + return requestToken; + } - public RecallTaskType getRequestType() { + public RecallTaskType getRequestType() { - return requestType; - } + return requestType; + } - public int getRetryAttempt() { + public int getRetryAttempt() { - return retryAttempt; - } + return retryAttempt; + } - @JsonIgnore - public int getStatusId() { + @JsonIgnore + public int getStatusId() { - return status.getStatusId(); - } + return status.getStatusId(); + } - public UUID getTaskId() { + public UUID getTaskId() { - buildTaskId(); - return taskId; - } + buildTaskId(); + return taskId; + } - public UUID getGroupTaskId() { + public UUID getGroupTaskId() { - return groupTaskId; - } + return groupTaskId; + } - public String getUserID() { + public String getUserID() { - return userID; - } + return userID; + } - public String getVoName() { + public String getVoName() { - return voName; - } + return voName; + } - public void setDeferredRecallInstant(Date date) { + public void setDeferredRecallInstant(Date date) { - deferredRecallInstant = date; - } + deferredRecallInstant = date; + } - public void setFileName(String fileName) { + public void setFileName(String fileName) { - this.fileName = fileName; - buildTaskId(); - } + this.fileName = fileName; + buildTaskId(); + } - public void setInsertionInstant(Date date) { + public void setInsertionInstant(Date date) { - insertionInstant = date; - } + insertionInstant = date; + } - private void setInProgressInstant(Date date) { + private void setInProgressInstant(Date date) { - inProgressInstant = date; - } + inProgressInstant = date; + } - private void setFinalStateInstant(Date date) { + private void setFinalStateInstant(Date date) { - finalStateInstant = date; - } + finalStateInstant = date; + } - public void setPinLifetime(int pinLifetime) { + public void setPinLifetime(int pinLifetime) { - this.pinLifetime = pinLifetime; - } + this.pinLifetime = pinLifetime; + } - /** - * - * @param requestToken - */ - public void setRequestToken(TRequestToken requestToken) { + /** + * + * @param requestToken + */ + public void setRequestToken(TRequestToken requestToken) { - this.requestToken = requestToken; - } + this.requestToken = requestToken; + } - public void setRequestType(RecallTaskType requestType) { + public void setRequestType(RecallTaskType requestType) { - this.requestType = requestType; - } + this.requestType = requestType; + } - public void setRetryAttempt(int retryAttempt) { + public void setRetryAttempt(int retryAttempt) { - this.retryAttempt = retryAttempt; - } + this.retryAttempt = retryAttempt; + } - /** - * Sets the status of the recall task and if a transition is performed records the appropriate - * time-stamp - * - * @param status - */ - public void setStatus(TapeRecallStatus status) { + /** + * Sets the status of the recall task and if a transition is performed records the appropriate + * time-stamp + * + * @param status + */ + public void setStatus(TapeRecallStatus status) { - this.status = status; - if (this.status.equals(TapeRecallStatus.IN_PROGRESS) && this.inProgressInstant == null) { - this.setInProgressInstant(new Date()); - } else { - if (TapeRecallStatus.isFinalStatus(this.status.getStatusId()) - && this.inProgressInstant == null) { - this.setFinalStateInstant(new Date()); - } - } - } + this.status = status; + if (this.status.equals(TapeRecallStatus.IN_PROGRESS) && this.inProgressInstant == null) { + this.setInProgressInstant(new Date()); + } else { + if (TapeRecallStatus.isFinalStatus(this.status.getStatusId()) + && this.inProgressInstant == null) { + this.setFinalStateInstant(new Date()); + } + } + } - /** - * @param statusId - */ - public void setStatusId(int statusId) { + /** + * @param statusId + */ + public void setStatusId(int statusId) { - this.setStatus(TapeRecallStatus.getRecallTaskStatus(statusId)); - } + this.setStatus(TapeRecallStatus.getRecallTaskStatus(statusId)); + } - public void setTaskId(UUID taskId) { + public void setTaskId(UUID taskId) { - this.taskId = taskId; - } + this.taskId = taskId; + } - public void setGroupTaskId(UUID groupTaskId) { + public void setGroupTaskId(UUID groupTaskId) { - this.groupTaskId = groupTaskId; - } + this.groupTaskId = groupTaskId; + } - public void setUserID(String userID) { - - this.userID = userID; - } - - public void setVoName(String voName) { - - this.voName = voName; - } - - /** - * Does not print the taskId but the group task Id Does not print the state transition time - * stamps - * - * @return - */ - public String toGEMSS() { - - StringBuilder sb = new StringBuilder(); - - sb.append(START_CHAR); - sb.append(groupTaskId); - sb.append(SEPARATOR_CHAR); - - Format formatter = new SimpleDateFormat(DATE_FORMAT); - if (insertionInstant != null) { - sb.append(formatter.format(insertionInstant)); - } else { - insertionInstant = endOfTheWorld.getTime(); - sb.append(formatter.format(insertionInstant)); - } - - sb.append(SEPARATOR_CHAR); - sb.append(requestType); - sb.append(SEPARATOR_CHAR); - sb.append(fileName); - sb.append(SEPARATOR_CHAR); - sb.append(voName); - sb.append(SEPARATOR_CHAR); - sb.append(userID); - sb.append(SEPARATOR_CHAR); - sb.append(retryAttempt); - sb.append(SEPARATOR_CHAR); - sb.append(status); - sb.append(SEPARATOR_CHAR); - - if (deferredRecallInstant != null) { - sb.append(formatter.format(deferredRecallInstant)); - } else { - sb.append(formatter.format(insertionInstant)); - } - - sb.append(SEPARATOR_CHAR); - sb.append(pinLifetime); - sb.append(SEPARATOR_CHAR); - sb.append(requestToken); - sb.append(SEPARATOR_CHAR); - - if (inProgressInstant != null) - sb.append(formatter.format(inProgressInstant)); - else - sb.append("null"); - - return sb.toString(); - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - - sb.append(START_CHAR); - sb.append(taskId); - sb.append(SEPARATOR_CHAR); - - Format formatter = new SimpleDateFormat(DATE_FORMAT); - if (insertionInstant != null) { - sb.append(formatter.format(insertionInstant)); - } else { - insertionInstant = endOfTheWorld.getTime(); - sb.append(formatter.format(insertionInstant)); - } - - sb.append(SEPARATOR_CHAR); - sb.append(requestType); - sb.append(SEPARATOR_CHAR); - sb.append(fileName); - sb.append(SEPARATOR_CHAR); - sb.append(voName); - sb.append(SEPARATOR_CHAR); - sb.append(userID); - sb.append(SEPARATOR_CHAR); - sb.append(retryAttempt); - sb.append(SEPARATOR_CHAR); - sb.append(status); - sb.append(SEPARATOR_CHAR); - - if (inProgressInstant != null) { - sb.append(formatter.format(inProgressInstant)); - } else { - sb.append("null"); - } - sb.append(SEPARATOR_CHAR); - - if (finalStateInstant != null) { - sb.append(formatter.format(finalStateInstant)); - } else { - sb.append("null"); - } - sb.append(SEPARATOR_CHAR); - - if (deferredRecallInstant != null) { - sb.append(formatter.format(deferredRecallInstant)); - } else { - sb.append(formatter.format(insertionInstant)); - } - - sb.append(SEPARATOR_CHAR); - sb.append(pinLifetime); - sb.append(SEPARATOR_CHAR); - sb.append(requestToken); - sb.append(SEPARATOR_CHAR); - sb.append(groupTaskId); - return sb.toString(); - } - - /** - * This method generate a TaskId from fileName - * - * @return - */ - private void buildTaskId() { - - if (this.fileName != null) { - this.taskId = buildTaskIdFromFileName(this.fileName); - } else { - log.error("Unable to create taskId because filename is NULL"); - } - } - - public static UUID buildTaskIdFromFileName(String fileName) { - - return UUID.nameUUIDFromBytes(fileName.getBytes()); - } - - /** - * Intended to be used when building this object from a database row NOTE: before to call this - * method, call the set status method - * - * @param inProgressInstant - * @param finalStateInstant - */ - public void forceStatusUpdateInstants(Date inProgressInstant, Date finalStateInstant) { - - if (inProgressInstant != null) { - if (this.status.equals(TapeRecallStatus.IN_PROGRESS) - || TapeRecallStatus.isFinalStatus(this.status.getStatusId())) { - this.inProgressInstant = inProgressInstant; - } else { - log.error("Unable to force the in progress transition time-stamp. " - + "Invalid status: {}", status); - } - } - if (finalStateInstant != null) { - if (TapeRecallStatus.isFinalStatus(this.status.getStatusId())) { - this.finalStateInstant = finalStateInstant; - } else { - log.error("Unable to force the in final status transition time-stamp. " - + "current status {} is not finale", status); - } - } - } - - public void setFakeRequestToken() { - - final String FAKE_PREFIX = "FAKE-"; - try { - this.setRequestToken(new TRequestToken( - FAKE_PREFIX - .concat(UUID.randomUUID().toString().substring(FAKE_PREFIX.length())), - Calendar.getInstance().getTime())); - } catch (InvalidTRequestTokenAttributesException e) { - log.error(e.getMessage(), e); - } - } + public void setUserID(String userID) { + + this.userID = userID; + } + + public void setVoName(String voName) { + + this.voName = voName; + } + + /** + * Does not print the taskId but the group task Id Does not print the state transition time stamps + * + * @return + */ + public String toGEMSS() { + + StringBuilder sb = new StringBuilder(); + + sb.append(START_CHAR); + sb.append(groupTaskId); + sb.append(SEPARATOR_CHAR); + + Format formatter = new SimpleDateFormat(DATE_FORMAT); + if (insertionInstant != null) { + sb.append(formatter.format(insertionInstant)); + } else { + insertionInstant = endOfTheWorld.getTime(); + sb.append(formatter.format(insertionInstant)); + } + + sb.append(SEPARATOR_CHAR); + sb.append(requestType); + sb.append(SEPARATOR_CHAR); + sb.append(fileName); + sb.append(SEPARATOR_CHAR); + sb.append(voName); + sb.append(SEPARATOR_CHAR); + sb.append(userID); + sb.append(SEPARATOR_CHAR); + sb.append(retryAttempt); + sb.append(SEPARATOR_CHAR); + sb.append(status); + sb.append(SEPARATOR_CHAR); + + if (deferredRecallInstant != null) { + sb.append(formatter.format(deferredRecallInstant)); + } else { + sb.append(formatter.format(insertionInstant)); + } + + sb.append(SEPARATOR_CHAR); + sb.append(pinLifetime); + sb.append(SEPARATOR_CHAR); + sb.append(requestToken); + sb.append(SEPARATOR_CHAR); + + if (inProgressInstant != null) + sb.append(formatter.format(inProgressInstant)); + else + sb.append("null"); + + return sb.toString(); + } + + @Override + public String toString() { + + StringBuilder sb = new StringBuilder(); + + sb.append(START_CHAR); + sb.append(taskId); + sb.append(SEPARATOR_CHAR); + + Format formatter = new SimpleDateFormat(DATE_FORMAT); + if (insertionInstant != null) { + sb.append(formatter.format(insertionInstant)); + } else { + insertionInstant = endOfTheWorld.getTime(); + sb.append(formatter.format(insertionInstant)); + } + + sb.append(SEPARATOR_CHAR); + sb.append(requestType); + sb.append(SEPARATOR_CHAR); + sb.append(fileName); + sb.append(SEPARATOR_CHAR); + sb.append(voName); + sb.append(SEPARATOR_CHAR); + sb.append(userID); + sb.append(SEPARATOR_CHAR); + sb.append(retryAttempt); + sb.append(SEPARATOR_CHAR); + sb.append(status); + sb.append(SEPARATOR_CHAR); + + if (inProgressInstant != null) { + sb.append(formatter.format(inProgressInstant)); + } else { + sb.append("null"); + } + sb.append(SEPARATOR_CHAR); + + if (finalStateInstant != null) { + sb.append(formatter.format(finalStateInstant)); + } else { + sb.append("null"); + } + sb.append(SEPARATOR_CHAR); + + if (deferredRecallInstant != null) { + sb.append(formatter.format(deferredRecallInstant)); + } else { + sb.append(formatter.format(insertionInstant)); + } + + sb.append(SEPARATOR_CHAR); + sb.append(pinLifetime); + sb.append(SEPARATOR_CHAR); + sb.append(requestToken); + sb.append(SEPARATOR_CHAR); + sb.append(groupTaskId); + return sb.toString(); + } + + /** + * This method generate a TaskId from fileName + * + * @return + */ + private void buildTaskId() { + + if (this.fileName != null) { + this.taskId = buildTaskIdFromFileName(this.fileName); + } else { + log.error("Unable to create taskId because filename is NULL"); + } + } + + public static UUID buildTaskIdFromFileName(String fileName) { + + return UUID.nameUUIDFromBytes(fileName.getBytes()); + } + + /** + * Intended to be used when building this object from a database row NOTE: before to call this + * method, call the set status method + * + * @param inProgressInstant + * @param finalStateInstant + */ + public void forceStatusUpdateInstants(Date inProgressInstant, Date finalStateInstant) { + + if (inProgressInstant != null) { + if (this.status.equals(TapeRecallStatus.IN_PROGRESS) + || TapeRecallStatus.isFinalStatus(this.status.getStatusId())) { + this.inProgressInstant = inProgressInstant; + } else { + log.error("Unable to force the in progress transition time-stamp. " + "Invalid status: {}", + status); + } + } + if (finalStateInstant != null) { + if (TapeRecallStatus.isFinalStatus(this.status.getStatusId())) { + this.finalStateInstant = finalStateInstant; + } else { + log.error("Unable to force the in final status transition time-stamp. " + + "current status {} is not finale", status); + } + } + } + + public void setFakeRequestToken() { + + final String FAKE_PREFIX = "FAKE-"; + try { + this.setRequestToken(new TRequestToken( + FAKE_PREFIX.concat(UUID.randomUUID().toString().substring(FAKE_PREFIX.length())), + Calendar.getInstance().getTime())); + } catch (InvalidTRequestTokenAttributesException e) { + log.error(e.getMessage(), e); + } + } } diff --git a/src/main/java/it/grid/storm/persistence/util/db/SQLBuilder.java b/src/main/java/it/grid/storm/persistence/pool/DatabaseConnectionPool.java similarity index 69% rename from src/main/java/it/grid/storm/persistence/util/db/SQLBuilder.java rename to src/main/java/it/grid/storm/persistence/pool/DatabaseConnectionPool.java index cede0ea1d..49fba8584 100644 --- a/src/main/java/it/grid/storm/persistence/util/db/SQLBuilder.java +++ b/src/main/java/it/grid/storm/persistence/pool/DatabaseConnectionPool.java @@ -15,21 +15,20 @@ * the License. */ -package it.grid.storm.persistence.util.db; +package it.grid.storm.persistence.pool; -public abstract class SQLBuilder { +public interface DatabaseConnectionPool { - public SQLBuilder() { + public int getMaxTotal(); - super(); - } + public int getInitialSize(); - public abstract String getCommand(); + public int getMinIdle(); - public abstract String getTable(); + public long getMaxConnLifetimeMillis(); - public abstract String getWhat(); + public boolean getTestOnBorrow(); - public abstract String getCriteria(); + public boolean getTestWhileIdle(); } diff --git a/src/main/java/it/grid/storm/common/exception/StoRMException.java b/src/main/java/it/grid/storm/persistence/pool/DatabaseConnector.java similarity index 73% rename from src/main/java/it/grid/storm/common/exception/StoRMException.java rename to src/main/java/it/grid/storm/persistence/pool/DatabaseConnector.java index 96429fcae..3cffc61af 100644 --- a/src/main/java/it/grid/storm/common/exception/StoRMException.java +++ b/src/main/java/it/grid/storm/persistence/pool/DatabaseConnector.java @@ -15,12 +15,18 @@ * the License. */ -package it.grid.storm.common.exception; +package it.grid.storm.persistence.pool; -public class StoRMException extends Exception { +public interface DatabaseConnector { - public StoRMException() { + public String getDbName(); - } + public String getDriverName(); + + public String getDbUsername(); + + public String getDbPassword(); + + public String getDbURL(); } diff --git a/src/main/java/it/grid/storm/persistence/pool/DefaultDatabaseConnectionPool.java b/src/main/java/it/grid/storm/persistence/pool/DefaultDatabaseConnectionPool.java new file mode 100644 index 000000000..cf02d1b02 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/pool/DefaultDatabaseConnectionPool.java @@ -0,0 +1,115 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.pool; + +import java.sql.Connection; +import java.sql.SQLException; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import it.grid.storm.metrics.InstrumentedBasicDataSource; +import it.grid.storm.metrics.StormMetricRegistry; + +public class DefaultDatabaseConnectionPool implements DatabaseConnectionPool { + + private static final Logger log = LoggerFactory.getLogger(DefaultDatabaseConnectionPool.class); + + private DatabaseConnector dbs; + + private int maxTotal; + private int minIdle; + private int maxConnLifetimeMillis; + private boolean isTestOnBorrow; + private boolean isTestWhileIdle; + + private InstrumentedBasicDataSource bds; + + public DefaultDatabaseConnectionPool(DatabaseConnector dbs, int maxTotal, int minIdle, + int maxConnLifetimeMillis, boolean isTestOnBorrow, boolean isTestWhileIdle) { + + this.dbs = dbs; + this.maxTotal = maxTotal; + this.minIdle = minIdle; + this.maxConnLifetimeMillis = maxConnLifetimeMillis; + this.isTestOnBorrow = isTestOnBorrow; + this.isTestWhileIdle = isTestWhileIdle; + + init(); + } + + private void init() { + + bds = new InstrumentedBasicDataSource(dbs.getDbName(), + StormMetricRegistry.METRIC_REGISTRY.getRegistry()); + + bds.setDriverClassName(dbs.getDriverName()); + bds.setUrl(dbs.getDbURL()); + bds.setUsername(dbs.getDbUsername()); + bds.setPassword(dbs.getDbPassword()); + bds.setMaxTotal(maxTotal); + bds.setInitialSize(minIdle); + bds.setMinIdle(minIdle); + bds.setMaxConnLifetimeMillis(maxConnLifetimeMillis); + bds.setTestOnBorrow(isTestOnBorrow); + bds.setTestWhileIdle(isTestWhileIdle); + + log.info("Connecting to database '{}' as user '{}'", dbs.getDbName(), dbs.getDbUsername()); + log.debug("Database URL: {}", dbs.getDbURL()); + log.debug( + "Pool settings: [max-total: {}, min-idle: {}, max-conn-lifetime-millis: {}, test-on-borrow: {}, test-while-idle: {}]", + maxTotal, minIdle, maxConnLifetimeMillis, isTestOnBorrow, isTestWhileIdle); + + } + + public Connection getConnection() throws SQLException { + + return bds.getConnection(); + } + + @Override + public int getMaxTotal() { + return maxTotal; + } + + @Override + public int getInitialSize() { + return minIdle; + } + + @Override + public int getMinIdle() { + return minIdle; + } + + @Override + public long getMaxConnLifetimeMillis() { + return maxConnLifetimeMillis; + } + + @Override + public boolean getTestOnBorrow() { + return isTestOnBorrow; + } + + @Override + public boolean getTestWhileIdle() { + return isTestWhileIdle; + } + +} diff --git a/src/main/java/it/grid/storm/persistence/pool/DefaultMySqlDatabaseConnector.java b/src/main/java/it/grid/storm/persistence/pool/DefaultMySqlDatabaseConnector.java new file mode 100644 index 000000000..922956595 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/pool/DefaultMySqlDatabaseConnector.java @@ -0,0 +1,85 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.pool; + +import static java.lang.String.format; + +import it.grid.storm.config.Configuration; + +public class DefaultMySqlDatabaseConnector implements DatabaseConnector { + + private static final String MYSQL_DRIVER = "com.mysql.cj.jdbc.Driver"; + + private final String name; + private final String url; + private final String username; + private final String password; + + private DefaultMySqlDatabaseConnector(String database) { + + this.name = database; + + Configuration config = Configuration.getInstance(); + + this.username = config.getDbUsername(); + this.password = config.getDbPassword(); + + String hostname = config.getDbHostname(); + int port = config.getDbPort(); + String properties = config.getDbProperties(); + + if (properties.isEmpty()) { + this.url = format("jdbc:mysql://%s:%d/%s", hostname, port, database); + } else { + this.url = format("jdbc:mysql://%s:%d/%s?%s", hostname, port, database, properties); + } + } + + @Override + public String getDriverName() { + return MYSQL_DRIVER; + } + + @Override + public String getDbURL() { + return url; + } + + @Override + public String getDbUsername() { + return username; + } + + @Override + public String getDbPassword() { + return password; + } + + public static DatabaseConnector getStormDbDatabaseConnector() { + return new DefaultMySqlDatabaseConnector("storm_db"); + } + + public static DatabaseConnector getStormBeIsamDatabaseConnector() { + return new DefaultMySqlDatabaseConnector("storm_be_ISAM"); + } + + @Override + public String getDbName() { + return name; + } +} diff --git a/src/main/java/it/grid/storm/persistence/pool/StormBeIsamConnectionPool.java b/src/main/java/it/grid/storm/persistence/pool/StormBeIsamConnectionPool.java new file mode 100644 index 000000000..a11a688e4 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/pool/StormBeIsamConnectionPool.java @@ -0,0 +1,42 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.pool; + +import it.grid.storm.config.Configuration; + +public class StormBeIsamConnectionPool extends DefaultDatabaseConnectionPool { + + private static StormBeIsamConnectionPool instance; + + public static synchronized StormBeIsamConnectionPool getInstance() { + if (instance == null) { + instance = new StormBeIsamConnectionPool(); + } + return instance; + } + + private final static Configuration c = Configuration.getInstance(); + + private StormBeIsamConnectionPool() { + + super(DefaultMySqlDatabaseConnector.getStormBeIsamDatabaseConnector(), c.getDbPoolSize(), + c.getDbPoolMinIdle(), c.getDbPoolMaxWaitMillis(), c.isDbPoolTestOnBorrow(), + c.isDbPoolTestWhileIdle()); + } + +} diff --git a/src/main/java/it/grid/storm/persistence/pool/StormDbConnectionPool.java b/src/main/java/it/grid/storm/persistence/pool/StormDbConnectionPool.java new file mode 100644 index 000000000..df7a3c048 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/pool/StormDbConnectionPool.java @@ -0,0 +1,41 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.pool; + +import it.grid.storm.config.Configuration; + +public class StormDbConnectionPool extends DefaultDatabaseConnectionPool { + + private static StormDbConnectionPool instance; + + public static synchronized StormDbConnectionPool getInstance() { + if (instance == null) { + instance = new StormDbConnectionPool(); + } + return instance; + } + + private final static Configuration c = Configuration.getInstance(); + + private StormDbConnectionPool() { + + super(DefaultMySqlDatabaseConnector.getStormDbDatabaseConnector(), c.getDbPoolSize(), + c.getDbPoolMinIdle(), c.getDbPoolMaxWaitMillis(), c.isDbPoolTestOnBorrow(), + c.isDbPoolTestWhileIdle()); + } +} diff --git a/src/main/java/it/grid/storm/persistence/util/db/DBConnection.java b/src/main/java/it/grid/storm/persistence/util/db/DBConnection.java deleted file mode 100644 index 7441759ee..000000000 --- a/src/main/java/it/grid/storm/persistence/util/db/DBConnection.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.util.db; - -import it.grid.storm.persistence.DataSourceConnectionFactory; -import it.grid.storm.persistence.exceptions.PersistenceException; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class DBConnection implements DataSourceConnectionFactory - -{ - - private static final Logger log = LoggerFactory.getLogger(DBConnection.class); - private Connection connection = null; - private DataBaseStrategy db; - - public DBConnection(DataBaseStrategy db) throws PersistenceException { - - this.db = db; - - try { - Class.forName(db.getDriverName()).newInstance(); - } catch (Exception ex) { - log.error("Exception while getting JDBC driver: {}", ex.getMessage(), ex); - throw new PersistenceException("Driver loading problem", ex); - } - } - - private void handleSQLException(SQLException e) throws PersistenceException{ - - log.error("SQL Error: {}, SQLState: {}, VendorError: {}.", - e.getMessage(), - e.getSQLState(), - e.getErrorCode(), - e); - - throw new PersistenceException(e); - - } - - public Connection borrowConnection() throws PersistenceException { - - Connection result = null; - try { - result = getConnection(); - } catch (SQLException e) { - handleSQLException(e); - } - return result; - } - - public void giveBackConnection(Connection con) throws PersistenceException { - - if (connection != null) { - try { - shutdown(); - } catch (SQLException e) { - handleSQLException(e); - } - } else { - throw new PersistenceException("Closing NON-Existing connection"); - } - } - - private Connection getConnection() throws SQLException { - - if (connection == null) { - String url = db.getConnectionString(); - connection = DriverManager.getConnection(url, db.getDbUsr(), - db.getDbPwd()); - } - return connection; - } - - private void shutdown() throws SQLException { - - connection.close(); // if there are no other open connection - connection = null; - } - -} diff --git a/src/main/java/it/grid/storm/persistence/util/db/DBConnectionPool.java b/src/main/java/it/grid/storm/persistence/util/db/DBConnectionPool.java deleted file mode 100644 index afffc8341..000000000 --- a/src/main/java/it/grid/storm/persistence/util/db/DBConnectionPool.java +++ /dev/null @@ -1,175 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.util.db; - -import it.grid.storm.persistence.DataSourceConnectionFactory; -import it.grid.storm.persistence.exceptions.PersistenceException; - -import java.sql.Connection; -import java.sql.SQLException; - -import org.apache.commons.dbcp2.cpdsadapter.DriverAdapterCPDS; -import org.apache.commons.dbcp2.datasources.SharedPoolDataSource; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class DBConnectionPool implements DataSourceConnectionFactory { - - private static final Logger log = LoggerFactory - .getLogger(DBConnectionPool.class); - private DataBaseStrategy db; - private static SharedPoolDataSource sharedDatasource; - private static DBConnectionPool instance = new DBConnectionPool(); - private static long handle = -1; - - private DBConnectionPool() { - super(); - } - - public static DBConnectionPool getPoolInstance() { - if (handle == -1) { - return null; - } else { - return instance; - } - } - - public static void initPool(DataBaseStrategy db, int maxActive, int maxWait) - throws PersistenceException { - instance.init(db, maxActive, maxWait); - } - - - private void handleSQLException(SQLException e) throws PersistenceException{ - - log.error("SQL Error: {}, SQLState: {}, VendorError: {}.", - e.getMessage(), - e.getSQLState(), - e.getErrorCode(), - e); - - throw new PersistenceException(e); - - } - public Connection borrowConnection() throws PersistenceException { - - Connection result = null; - if (handle == -1) { - throw new PersistenceException("Connection Pool is not initialized!"); - } - try { - result = sharedDatasource.getConnection(); - } catch (SQLException e) { - handleSQLException(e); - } - return result; - } - - public void giveBackConnection(Connection con) throws PersistenceException { - - if (con != null) { - try { - shutdown(con); - } catch (SQLException e) { - handleSQLException(e); - } - } else { - throw new PersistenceException("Closing NON-Existing connection"); - } - } - - public String getPoolInfo() throws PersistenceException { - - String result = ""; - if (handle == -1) { - throw new PersistenceException("Connection Pool is not initialized!"); - } - if (sharedDatasource.getValidationQuery() != null) { - result += "Validation query = " + sharedDatasource.getValidationQuery() - + "\n"; - } - if (sharedDatasource.getDescription() != null) { - result += "Description = " + sharedDatasource.getDescription() + "\n"; - } - result += "Nr Connection Active = " + sharedDatasource.getNumActive() - + "\n"; - result += "Nr Connection Idle = " + sharedDatasource.getNumIdle() + "\n"; - result += "Nr Max Active Connection = " + sharedDatasource.getMaxTotal() - + "\n"; - - return result; - } - - private void init(DataBaseStrategy db, int maxActive, int maxWait) { - - instance.setDatabaseStrategy(db); - DriverAdapterCPDS connectionPoolDatasource = new DriverAdapterCPDS(); - try { - connectionPoolDatasource.setDriver(db.getDriverName()); - } catch (Exception ex) { - log.error("Exception while getting driver: {}", ex.getMessage(), ex); - } - - String connectionString = db.getConnectionString(); - connectionPoolDatasource.setUrl(connectionString); - log.debug("Database connection string: {}", connectionString); - connectionPoolDatasource.setUser(db.getDbUsr()); - connectionPoolDatasource.setPassword(db.getDbPwd()); - - sharedDatasource = new SharedPoolDataSource(); - sharedDatasource.setConnectionPoolDataSource(connectionPoolDatasource); - - sharedDatasource.setMaxTotal(maxActive); - sharedDatasource.setDefaultMaxWaitMillis(maxWait); - sharedDatasource.setValidationQuery("SELECT 1"); - sharedDatasource.setDefaultTestOnBorrow(true); - - handle = System.currentTimeMillis(); - } - - /** - * - * @throws SQLException - */ - private void shutdown(Connection conn) throws SQLException { - - conn.close(); - conn = null; - } - - public static void printInfo(DBConnectionPool pool) { - - try { - log.info("DATABASE POOL INFO: {}" , pool.getPoolInfo()); - } catch (PersistenceException ex2) { - log.error(ex2.getMessage(),ex2); - } - - } - - public DataBaseStrategy getDatabaseStrategy() { - - return db; - } - - private void setDatabaseStrategy(DataBaseStrategy db) { - - this.db = db; - } - -} diff --git a/src/main/java/it/grid/storm/persistence/util/db/DataBaseStrategy.java b/src/main/java/it/grid/storm/persistence/util/db/DataBaseStrategy.java deleted file mode 100644 index 18fdf6b56..000000000 --- a/src/main/java/it/grid/storm/persistence/util/db/DataBaseStrategy.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.util.db; - -public class DataBaseStrategy { - - private final String dbmsVendor; - private final String driverName; - private final String jdbcPrefix; - private String dbName; - private String dbPrefix; - private String dbHost; - private String dbUsr; - private String dbPwd; - private SQLFormat formatter; - private String properties; - - public DataBaseStrategy(String dbmsVendor, String driverName, String prefix, - SQLFormat formatter) { - - this.dbmsVendor = dbmsVendor; - this.driverName = driverName; - jdbcPrefix = prefix; - this.formatter = formatter; - this.properties = ""; - } - - - public String getDbmsVendor() { - return dbmsVendor; - } - - public String getDriverName() { - return driverName; - } - - public String getJdbcPrefix() { - - return jdbcPrefix; - } - - public void setDbUsr(String usrDb) { - - dbUsr = usrDb; - } - - public String getDbUsr() { - - return dbUsr; - } - - public void setDbPwd(String pwd) { - - dbPwd = pwd; - } - - public String getDbPwd() { - - return dbPwd; - } - - public void setDbName(String dbName) { - - this.dbName = dbName; - } - - public String getDbName() { - - return dbName; - } - - public void setDbPrefix(String dbName) { - - dbPrefix = dbName; - } - - public String getDbPrefix() { - - return dbPrefix; - } - - public void setDbHost(String host) { - - dbHost = host; - } - - public String getDbHost() { - - return dbHost; - } - - public String getConnectionString() { - - String connStr = jdbcPrefix + dbHost + "/" + dbName; - if (!properties.isEmpty()) { - connStr += "?" + properties; - } - return connStr; - } - - public void setFormatter(SQLFormat formatter) { - - this.formatter = formatter; - } - - public SQLFormat getFormatter() { - - return formatter; - } - - public void setProperties(String encodedProperties) { - - this.properties = encodedProperties; - } - - @Override - public String toString() { - - return dbmsVendor; - } -} diff --git a/src/main/java/it/grid/storm/persistence/util/db/Databases.java b/src/main/java/it/grid/storm/persistence/util/db/Databases.java deleted file mode 100644 index ec1109e26..000000000 --- a/src/main/java/it/grid/storm/persistence/util/db/Databases.java +++ /dev/null @@ -1,35 +0,0 @@ -package it.grid.storm.persistence.util.db; - -import java.util.Map; - -import com.google.common.collect.Maps; - -import it.grid.storm.config.Configuration; - -public class Databases { - - private static final Map DATABASES = Maps.newHashMap(); - - private static final String MYSQL_VENDOR = "mysql"; - private static final String MYSQL_DRIVER = "com.mysql.cj.jdbc.Driver"; - private static final String MYSQL_PREFIX = "jdbc:mysql://"; - private static final SQLFormat MYSQL_FORMATTER = new MySqlFormat(); - - private static final String DB_NAME = "storm_be_ISAM"; - - static { - Configuration config = Configuration.getInstance(); - DataBaseStrategy dbs = new DataBaseStrategy(MYSQL_VENDOR, MYSQL_DRIVER, MYSQL_PREFIX, MYSQL_FORMATTER); - dbs.setDbUsr(config.getDBUserName()); - dbs.setDbPwd(config.getDBPassword()); - dbs.setProperties(config.getDBProperties()); - dbs.setDbName(DB_NAME); - dbs.setDbHost(config.getDBHostname()); - DATABASES.put(MYSQL_VENDOR, dbs); - } - - public static DataBaseStrategy getDataBaseStrategy(String vendor) { - - return DATABASES.get(vendor); -} -} diff --git a/src/main/java/it/grid/storm/persistence/util/db/InsertBuilder.java b/src/main/java/it/grid/storm/persistence/util/db/InsertBuilder.java deleted file mode 100644 index 3af7148dc..000000000 --- a/src/main/java/it/grid/storm/persistence/util/db/InsertBuilder.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.util.db; - -import java.util.Map; -import java.util.HashMap; -import java.util.Iterator; - -public class InsertBuilder extends SQLBuilder { - - private String table; - private Map columnsAndData = new HashMap(); - - public void setTable(String table) { - - this.table = table; - } - - public String getTable() { - - return table; - } - - public String getCommand() { - - return "INSERT INTO "; - } - - public String getCriteria() { - - return ""; - } - - public String getWhat() { - - StringBuilder columns = new StringBuilder(); - StringBuilder values = new StringBuilder(); - StringBuilder what = new StringBuilder(); - - String columnName = null; - Iterator iter = columnsAndData.keySet().iterator(); - while (iter.hasNext()) { - columnName = iter.next(); - columns.append(columnName); - values.append(columnsAndData.get(columnName)); - if (iter.hasNext()) { - columns.append(','); - values.append(','); - } - } - - what.append(" ("); - what.append(columns); - what.append(") VALUES ("); - what.append(values); - what.append(") "); - return what.toString(); - - } - - public void addColumnAndData(String columnName, Object value) { - - if (value != null) { - columnsAndData.put(columnName, value); - } - } -} diff --git a/src/main/java/it/grid/storm/persistence/util/db/MySqlFormat.java b/src/main/java/it/grid/storm/persistence/util/db/MySqlFormat.java deleted file mode 100644 index 10afc79ea..000000000 --- a/src/main/java/it/grid/storm/persistence/util/db/MySqlFormat.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.util.db; - -import java.text.SimpleDateFormat; - -public class MySqlFormat implements SQLFormat { - - private static final SimpleDateFormat dateFormat = new SimpleDateFormat( - "yyyy-MM-dd HH:mm:ss"); - - /** - * Create a string value of fields insertable into the query - * - * @param value - * Object - * @return String - */ - public String format(Object value) { - - if (value == null) { - return null; - } - Class clazz = value.getClass(); - if (Character.class.equals(clazz) || char.class.equals(clazz)) { - value = value.toString(); - } - if (value instanceof String) { - return value.toString(); - } - if (value instanceof java.util.Date) { - return dateFormat.format(value); - } - return value.toString(); - } - -} diff --git a/src/main/java/it/grid/storm/persistence/util/db/SQLHelper.java b/src/main/java/it/grid/storm/persistence/util/db/SQLHelper.java deleted file mode 100644 index f246cce58..000000000 --- a/src/main/java/it/grid/storm/persistence/util/db/SQLHelper.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.util.db; - -public abstract class SQLHelper { - - public String dbmsVendor; - private SQLFormat formatter; - - protected SQLHelper(String dbmsVendor) { - - this.dbmsVendor = dbmsVendor; - this.formatter = Databases.getDataBaseStrategy(dbmsVendor).getFormatter(); - } - - public String format(Object value) { - - return formatter.format(value); - } - - /** - * - * @param value - * boolean - * @return String - */ - public String format(boolean value) { - - String result = null; - Boolean boolValue = new Boolean(value); - result = formatter.format(boolValue); - return result; - } - - /** - * - * @param value - * int - * @return String - */ - public String format(int value) { - - String result = null; - Integer intValue = null; - try { - intValue = new Integer(value); - } catch (NumberFormatException nfe) { - nfe.printStackTrace(); - } - result = formatter.format(intValue); - return result; - } - - /** - * - * @param value - * long - * @return String - */ - public String format(long value) { - - String result = null; - Long longValue = null; - try { - longValue = new Long(value); - } catch (NumberFormatException nfe) { - nfe.printStackTrace(); - } - result = formatter.format(longValue); - return result; - } - - /** - * - * @param date - * Date - * @return String - */ - public String format(java.util.Date date) { - - return formatter.format(date); - } - -} diff --git a/src/main/java/it/grid/storm/asynch/InvalidPutDoneReplyAttributeException.java b/src/main/java/it/grid/storm/persistence/util/helper/MySqlFormat.java similarity index 55% rename from src/main/java/it/grid/storm/asynch/InvalidPutDoneReplyAttributeException.java rename to src/main/java/it/grid/storm/persistence/util/helper/MySqlFormat.java index 8b7a68c3f..7b43e604a 100644 --- a/src/main/java/it/grid/storm/asynch/InvalidPutDoneReplyAttributeException.java +++ b/src/main/java/it/grid/storm/persistence/util/helper/MySqlFormat.java @@ -15,26 +15,29 @@ * the License. */ -package it.grid.storm.asynch; +package it.grid.storm.persistence.util.helper; -/** - * Class that represents an exception thrown when an SRMPutDoneReply cannot be created because the - * supplied TReturnStatus is null. - * - * @author EGRID - ICTP Trieste - * @version 1.0 - * @date August, 2006 - */ -public class InvalidPutDoneReplyAttributeException extends Exception { +import java.text.SimpleDateFormat; - /** - * - */ - private static final long serialVersionUID = 1L; +public class MySqlFormat implements SQLFormat { - @Override - public String toString() { + private static final SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); - return "null supplied TReturnStatus"; + /** + * Create a string value of fields + * + * @param value Object + * @return String + */ + public String format(Object value) { + + if (value == null) { + return null; + } + if (value instanceof java.util.Date) { + return dateFormat.format(value); + } + return value.toString(); } + } diff --git a/src/main/java/it/grid/storm/persistence/util/db/SQLFormat.java b/src/main/java/it/grid/storm/persistence/util/helper/SQLFormat.java similarity index 93% rename from src/main/java/it/grid/storm/persistence/util/db/SQLFormat.java rename to src/main/java/it/grid/storm/persistence/util/helper/SQLFormat.java index c26dc972e..16121fd33 100644 --- a/src/main/java/it/grid/storm/persistence/util/db/SQLFormat.java +++ b/src/main/java/it/grid/storm/persistence/util/helper/SQLFormat.java @@ -15,7 +15,7 @@ * the License. */ -package it.grid.storm.persistence.util.db; +package it.grid.storm.persistence.util.helper; public interface SQLFormat { diff --git a/src/main/java/it/grid/storm/persistence/util/helper/StorageSpaceSQLHelper.java b/src/main/java/it/grid/storm/persistence/util/helper/StorageSpaceSQLHelper.java index d4b37aa5b..8bcc3659c 100644 --- a/src/main/java/it/grid/storm/persistence/util/helper/StorageSpaceSQLHelper.java +++ b/src/main/java/it/grid/storm/persistence/util/helper/StorageSpaceSQLHelper.java @@ -17,11 +17,6 @@ package it.grid.storm.persistence.util.helper; -import it.grid.storm.common.types.VO; -import it.grid.storm.griduser.GridUserInterface; -import it.grid.storm.persistence.model.StorageSpaceTO; -import it.grid.storm.persistence.util.db.SQLHelper; - import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; @@ -31,787 +26,758 @@ import java.util.LinkedList; import java.util.List; +import com.google.common.collect.Lists; + +import it.grid.storm.common.types.VO; +import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.model.SQLHelper; +import it.grid.storm.persistence.model.StorageSpaceTO; + public class StorageSpaceSQLHelper extends SQLHelper { - private final static String TABLE_NAME = "storage_space"; - private final static HashMap COLS = new HashMap(); - - private static final String[] COLUMN_NAMES = { "SS_ID", "USERDN", "VOGROUP", - "ALIAS", "SPACE_TOKEN", "CREATED", "TOTAL_SIZE", "GUAR_SIZE", "FREE_SIZE", - "SPACE_FILE", "STORAGE_INFO", "LIFETIME", "SPACE_TYPE", "USED_SIZE", - "BUSY_SIZE", "UNAVAILABLE_SIZE", "AVAILABLE_SIZE", "RESERVED_SIZE", - "UPDATE_TIME" }; - - static { - COLS.put("storageSpaceId", "SS_ID"); - COLS.put("ownerName", "USERDN"); - COLS.put("ownerVO", "VOGROUP"); - COLS.put("alias", "ALIAS"); - COLS.put("token", "SPACE_TOKEN"); - COLS.put("created", "CREATED"); - COLS.put("spaceFile", "SPACE_FILE"); - COLS.put("storaqeInfo", "STORAGE_INFO"); - COLS.put("lifeTime", "LIFETIME"); - COLS.put("spaceType", "SPACE_TYPE"); - COLS.put("total_size", "TOTAL_SIZE"); - COLS.put("guar_size", "GUAR_SIZE"); - COLS.put("free_size", "FREE_SIZE"); - COLS.put("used_size", "USED_SIZE"); - COLS.put("busy_size", "BUSY_SIZE"); - COLS.put("unavailable_size", "UNAVAILABLE_SIZE"); - COLS.put("available_size", "AVAILABLE_SIZE"); - COLS.put("reserved_size", "RESERVED_SIZE"); - COLS.put("update_time", "UPDATE_TIME"); - } - - /** - * CONSTRUCTOR - */ - public StorageSpaceSQLHelper(String dbmsVendor) { - - super(dbmsVendor); - } - - /** - * - * @return String[] - */ - public String[] getColumnNames() { - - return COLUMN_NAMES; - } - - /** - * INSERT NEW ROW into TABLE - * - * @param ssTO - * StorageSpaceTO - * @return String - * @throws SQLException - */ - - public PreparedStatement insertQuery(Connection conn, StorageSpaceTO ssTO) - throws SQLException { - - List values = new LinkedList(); - - StringBuilder fields = new StringBuilder("("); - StringBuilder placeholders = new StringBuilder("("); - - if (ssTO != null) { - if (ssTO.getOwnerName() != null) { - fields.append(COLS.get("ownerName") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getOwnerName())); - } - - fields.append(COLS.get("ownerVO") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getVoName())); - - if (ssTO.getAlias() != null) { - fields.append(COLS.get("alias") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getAlias())); - } - if (ssTO.getSpaceToken() != null) { - fields.append(COLS.get("token") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getSpaceToken())); - } - if (ssTO.getCreated() != null) { - fields.append(COLS.get("created") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getCreated())); - } - if (ssTO.getSpaceFile() != null) { - fields.append(COLS.get("spaceFile") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getSpaceFile())); - } - if (ssTO.getStorageInfo() != null) { - fields.append(COLS.get("storaqeInfo") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getStorageInfo())); - } - if (ssTO.getLifetime() != -1) { - fields.append(COLS.get("lifeTime") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getLifetime())); - } - if (ssTO.getSpaceType() != null) { - fields.append(COLS.get("spaceType") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getSpaceType())); - } - if ((ssTO.getTotalSize() != 0) || (ssTO.getTotalSize() != -1)) { - fields.append(COLS.get("total_size") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getTotalSize())); - } - if ((ssTO.getGuaranteedSize() != 0) || (ssTO.getGuaranteedSize() != -1)) { - fields.append(COLS.get("guar_size") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getGuaranteedSize())); - } - if ((ssTO.getFreeSize() != 0) || (ssTO.getFreeSize() != -1)) { - fields.append(COLS.get("free_size") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getFreeSize())); - } - if ((ssTO.getUsedSize() != 0) || (ssTO.getUsedSize() != -1)) { - fields.append(COLS.get("used_size") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getUsedSize())); - } - if ((ssTO.getBusySize() != 0) || (ssTO.getBusySize() != -1)) { - fields.append(COLS.get("busy_size") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getBusySize())); - } - if ((ssTO.getUnavailableSize() != 0) || (ssTO.getUnavailableSize() != -1)) { - fields.append(COLS.get("unavailable_size") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getUnavailableSize())); - } - - if ((ssTO.getAvailableSize() != 0) || (ssTO.getAvailableSize() != -1)) { - fields.append(COLS.get("available_size") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getAvailableSize())); - } - if ((ssTO.getReservedSize() != 0) || (ssTO.getReservedSize() != -1)) { - fields.append(COLS.get("reserved_size") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getReservedSize())); - } - if (ssTO.getUpdateTime() != null) { - fields.append(COLS.get("update_time").concat(",")); - placeholders.append("?,"); - values.add(format(ssTO.getUpdateTime())); - } - } - - fields.deleteCharAt(fields.length() - 1); - fields.append(")"); - placeholders.deleteCharAt(placeholders.length() - 1); - placeholders.append(")"); - - String str = "INSERT INTO " + TABLE_NAME + " " + fields.toString() - + " VALUES " + placeholders.toString(); - PreparedStatement preparedStatement = conn.prepareStatement(str); - - int index = 1; - for (String val : values) { - preparedStatement.setString(index, val); - index++; - } - - return preparedStatement; - } - - /** - * Create a StorageSpace Transfer Object coming from Result Set - * - * @param res - * ResultSet - * @return StorageSpaceTO - */ - public StorageSpaceTO makeStorageSpaceTO(ResultSet res) { - - StorageSpaceTO ssTO = new StorageSpaceTO(); - - try { - ssTO.setStorageSpaceId(new Long(res.getLong("SS_ID"))); - - ssTO.setOwnerName(res.getString("USERDN")); - ssTO.setVoName(res.getString("VOGROUP")); - ssTO.setAlias(res.getString("ALIAS")); - ssTO.setSpaceToken(res.getString("SPACE_TOKEN")); - - java.sql.Timestamp createdTimeStamp = res.getTimestamp("CREATED"); - Date creationDate = new Date(createdTimeStamp.getTime()); - ssTO.setCreated(creationDate); - - ssTO.setSpaceFile(res.getString("SPACE_FILE")); - ssTO.setStorageInfo(res.getString("STORAGE_INFO")); - long tempLong = res.getLong("LIFETIME"); - if (!res.wasNull()) { - ssTO.setLifetime(tempLong); - } - - ssTO.setSpaceType(res.getString("SPACE_TYPE")); - - // Sizes - tempLong = res.getLong("TOTAL_SIZE"); - if (!res.wasNull()) { - ssTO.setTotalSize(tempLong); - } - tempLong = res.getLong("GUAR_SIZE"); - if (!res.wasNull()) { - ssTO.setGuaranteedSize(tempLong); - } - tempLong = res.getLong("RESERVED_SIZE"); - if (!res.wasNull()) { - ssTO.setReservedSize(tempLong); - } - tempLong = res.getLong("FREE_SIZE"); - if (!res.wasNull()) { - ssTO.setFreeSize(tempLong); - } - tempLong = res.getLong("AVAILABLE_SIZE"); - if (!res.wasNull()) { - ssTO.setAvailableSize(tempLong); - } - tempLong = res.getLong("USED_SIZE"); - if (!res.wasNull()) { - ssTO.setUsedSize(tempLong); - } - tempLong = res.getLong("BUSY_SIZE"); - if (!res.wasNull()) { - ssTO.setBusySize(tempLong); - } - tempLong = res.getLong("UNAVAILABLE_SIZE"); - if (!res.wasNull()) { - ssTO.setUnavailableSize(tempLong); - } - - // Last Update - java.sql.Timestamp updatedTimeStamp = res.getTimestamp("UPDATE_TIME"); - Date updateDate = new Date(updatedTimeStamp.getTime()); - ssTO.setUpdateTime(updateDate); - - } catch (SQLException ex) { - ex.printStackTrace(); - } - - return ssTO; - } - - // ************ HELPER Method *************** // - - /** - * @param vo - * @return - */ - private String getVOName(String vo) { - - String voStr = VO.makeNoVo().getValue(); - if (vo != null && !vo.trim().equals("")) { - voStr = vo.trim(); - } - return voStr; - } - - /** - * - * - * @param token - * String - * @param conn - * @return String - * @throws SQLException - */ - public PreparedStatement selectByTokenQuery(Connection conn, String token) - throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM storage_space where space_token=?"; - preparedStatement = conn.prepareStatement(str); - preparedStatement.setString(1, token); - - return preparedStatement; - } - - /** - * Returns the SQL string for selecting all columns from the table - * 'storage_space' in the 'storm_be_ISAM' database matching 'user' and - * 'spaceAlias'. 'spaceAlias' can be NULL or empty. - * - * @param user - * VomsGridUser. - * @param spaceAlias - * String. - * @return String. - * @throws SQLException - */ - public PreparedStatement selectBySpaceAliasQuery(Connection conn, - GridUserInterface user, String spaceAlias) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - String dn = user.getDn(); - - if ((spaceAlias == null) || (spaceAlias.length() == 0)) { - str = "SELECT * FROM storage_space where userdn=?"; - preparedStatement = conn.prepareStatement(str); - preparedStatement.setString(1, dn); - } else { - str = "SELECT * FROM storage_space where userdn=? AND alias=?"; - preparedStatement = conn.prepareStatement(str); - preparedStatement.setString(1, dn); - preparedStatement.setString(2, spaceAlias); - } - return preparedStatement; - } - - /** - * Returns the SQL string for selecting all columns from the table - * 'storage_space' in the 'storm_be_ISAM' database matching 'user' and - * 'spaceAlias'. 'spaceAlias' can be NULL or empty. - * - * @param user - * VomsGridUser. - * @param spaceAlias - * String. - * @return String. - * @throws SQLException - */ - public PreparedStatement selectBySpaceAliasOnlyQuery(Connection conn, - String spaceAlias) throws SQLException { - - /* - * This is to distinguish a client reseve space with a VOSpaceArea both with - * the same token. Only the one made by the namespace process contains a - * fake dn - */ - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM storage_space where alias=?"; - preparedStatement = conn.prepareStatement(str); - preparedStatement.setString(1, spaceAlias); - - return preparedStatement; - } - - /** - * Returns the SQL string for selecting all columns from the table - * 'storage_space' in the 'storm_be_ISAM' database matching 'voname'. - * - * @param voname - * string - * @return String. - * @throws SQLException - */ - - public PreparedStatement selectBySpaceType(Connection conn, String voname) - throws SQLException { - - /* - * This is to distinguish a client reseve space with a VOSpaceArea both with - * the same token. Only the one made by the namespace process contains a - * fake dn - */ - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM storage_space where SPACE_TYPE=?"; - preparedStatement = conn.prepareStatement(str); - preparedStatement.setString(1, voname); - - return preparedStatement; - } - - /** - * This method return the SQL query to evaluate all expired space reservation - * requests. - * - * @param time - * Current time (in second) to compare to the reservationTime + - * lifetime - * @return String SQL query - * @throws SQLException - */ - public PreparedStatement selectExpiredQuery(Connection conn, - long currentTimeInSecond) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM storage_space where lifetime is not null and (UNIX_TIMESTAMP(created)+lifetime< ?)"; - preparedStatement = conn.prepareStatement(str); - preparedStatement.setLong(1, currentTimeInSecond); - - return preparedStatement; - - } - - /** - * @param size - * @return - * @throws SQLException - */ - public PreparedStatement selectByUnavailableUsedSpaceSizeQuery( - Connection conn, long unavailableSizeValue) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM storage_space where " + COLS.get("used_size") - + " IS NULL or " + COLS.get("used_size") + "=?"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setLong(1, unavailableSizeValue); - - return preparedStatement; - } - - /** - * @param lastUpdateTimestamp - * @return - * @throws SQLException - */ - - public PreparedStatement selectByPreviousOrNullLastUpdateQuery( - Connection conn, long lastUpdateTimestamp) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM storage_space where " + COLS.get("update_time") - + " IS NULL or UNIX_TIMESTAMP(" + COLS.get("update_time") + ") < ?"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setLong(1, lastUpdateTimestamp); - - return preparedStatement; - - } - - /** - * Returns the SQL query for removing a row from the table 'storage_space' in - * the 'storm_be_ISAM' database matching 'userDN' and 'spaceToken'. - * - * @param user - * @param spaceToken - * @return - * @throws SQLException - */ - public PreparedStatement removeByTokenQuery(Connection conn, - GridUserInterface user, String spaceToken) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "DELETE FROM storage_space WHERE ((USERDN=?) AND (SPACE_TOKEN=?))"; - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setString(1, user.getDn()); - preparedStatement.setString(2, spaceToken); - - return preparedStatement; - } - - /** - * Returns the SQL query for removing a row from the table 'storage_space' in - * the 'storm_be_ISAM' database matching 'spaceToken'. - * - * @param spaceToken - * @return - * @throws SQLException - */ - public PreparedStatement removeByTokenQuery(Connection conn, String spaceToken) - throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "DELETE FROM storage_space WHERE (SPACE_TOKEN=?)"; - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setString(1, spaceToken); - - return preparedStatement; - } - - /** - * Provides a query that updates all row fields accordingly to the provided - * StorageSpaceTO - * - * @param ssTO - * @return - * @throws IllegalArgumentException - * @throws SQLException - */ - public PreparedStatement updateByAliasAndTokenQuery(Connection conn, - StorageSpaceTO ssTO) throws IllegalArgumentException, SQLException { - - List values = new LinkedList(); - - if (ssTO == null) { - throw new IllegalArgumentException(); - } - String query = "UPDATE storage_space SET"; - if (ssTO.getOwnerName() != null) { - query += " " + COLS.get("ownerName") + " = ?" + " ,"; - values.add(format(ssTO.getOwnerName())); - } - - query += " " + COLS.get("ownerVO") + " = ?" + " ,"; - values.add(format(getVOName(ssTO.getVoName()))); - - if (ssTO.getCreated() != null) { - query += " " + COLS.get("created") + " = ?" + " ,"; - values.add(format(ssTO.getCreated())); - } - if (ssTO.getSpaceFile() != null) { - query += " " + COLS.get("spaceFile") + " = ?" + " ,"; - values.add(format(ssTO.getSpaceFile())); - } - if (ssTO.getStorageInfo() != null) { - query += " " + COLS.get("storaqeInfo") + " = ?" + " ,"; - values.add(format(ssTO.getStorageInfo())); - } - if (ssTO.getLifetime() != -1) { - query += " " + COLS.get("lifeTime") + " = ?" + " ,"; - values.add(format(ssTO.getLifetime())); - } - if (ssTO.getSpaceType() != null) { - query += " " + COLS.get("spaceType") + " = ?" + " ,"; - values.add(format(ssTO.getSpaceType())); - } - if ((ssTO.getTotalSize() != 0) || (ssTO.getTotalSize() != -1)) { - query += " " + COLS.get("total_size") + " = ?" + " ,"; - values.add(format(ssTO.getTotalSize())); - } - if ((ssTO.getGuaranteedSize() != 0) || (ssTO.getGuaranteedSize() != -1)) { - query += " " + COLS.get("guar_size") + " = ?" + " ,"; - values.add(format(ssTO.getGuaranteedSize())); - } - if ((ssTO.getFreeSize() != 0) || (ssTO.getFreeSize() != -1)) { - query += " " + COLS.get("free_size") + " = ?" + " ,"; - values.add(format(ssTO.getFreeSize())); - } - if ((ssTO.getUsedSize() != 0) || (ssTO.getUsedSize() != -1)) { - query += " " + COLS.get("used_size") + " = ?" + " ,"; - values.add(format(ssTO.getUsedSize())); - } - if ((ssTO.getBusySize() != 0) || (ssTO.getBusySize() != -1)) { - query += " " + COLS.get("busy_size") + " = ?" + " ,"; - values.add(format(ssTO.getBusySize())); - } - if ((ssTO.getUnavailableSize() != 0) || (ssTO.getUnavailableSize() != -1)) { - query += " " + COLS.get("unavailable_size") + " = ?" + " ,"; - values.add(format(ssTO.getUnavailableSize())); - } - if ((ssTO.getAvailableSize() != 0) || (ssTO.getAvailableSize() != -1)) { - query += " " + COLS.get("available_size") + " = ?" + " ,"; - values.add(format(ssTO.getAvailableSize())); - } - if ((ssTO.getReservedSize() != 0) || (ssTO.getReservedSize() != -1)) { - query += " " + COLS.get("reserved_size") + " = ?" + " ,"; - values.add(format(ssTO.getReservedSize())); - } - if (ssTO.getUpdateTime() != null) { - query += " " + COLS.get("update_time") + " = ?" + " ,"; - values.add(format(ssTO.getUpdateTime())); - } - if (query.charAt(query.length() - 1) == ',') { - query = query.substring(0, query.length() - 1); - } - query += " where " + COLS.get("alias") + " = ?" + " and " + COLS.get("token") + " = ?"; - - values.add(format(ssTO.getAlias())); - values.add(format(ssTO.getSpaceToken())); - - PreparedStatement preparedStatement = conn.prepareStatement(query); - - int index = 1; - for (String val : values) { - preparedStatement.setString(index, val); - index++; - } - - return preparedStatement; - } - - /** - * Provides a query that updates all row fields accordingly to the provided - * StorageSpaceTO and using SpaceToken as key - * - * @param ssTO - * @return - * @throws IllegalArgumentException - * @throws SQLException - */ - public PreparedStatement updateByTokenQuery(Connection conn, - StorageSpaceTO ssTO) throws IllegalArgumentException, SQLException { - - List values = new LinkedList(); - - if (ssTO == null) { - throw new IllegalArgumentException(); - } - String query = "UPDATE storage_space SET"; - if (ssTO.getOwnerName() != null) { - query += " " + COLS.get("ownerName") + " = ?" + " ,"; - values.add(format(ssTO.getOwnerName())); - } - - query += " " + COLS.get("ownerVO") + " = ?" + " ,"; - values.add((getVOName(ssTO.getVoName()))); - - if (ssTO.getCreated() != null) { - query += " " + COLS.get("created") + " = ?" + " ,"; - values.add(format(ssTO.getCreated())); - } - if (ssTO.getAlias() != null) { - query += " " + COLS.get("alias") + " = ?" + " ,"; - values.add(format(ssTO.getAlias())); - } - if (ssTO.getSpaceFile() != null) { - query += " " + COLS.get("spaceFile") + " = ?" + " ,"; - values.add(format(ssTO.getSpaceFile())); - } - if (ssTO.getStorageInfo() != null) { - query += " " + COLS.get("storaqeInfo") + " = ?" + " ,"; - values.add(format(ssTO.getStorageInfo())); - } - if (ssTO.getLifetime() != -1) { - query += " " + COLS.get("lifeTime") + " = ?" + " ,"; - values.add(format(ssTO.getLifetime())); - } - if (ssTO.getSpaceType() != null) { - query += " " + COLS.get("spaceType") + " = ?" + " ,"; - values.add(format(ssTO.getSpaceType())); - } - if ((ssTO.getTotalSize() != 0) || (ssTO.getTotalSize() != -1)) { - query += " " + COLS.get("total_size") + " = ?" + " ,"; - values.add(format(ssTO.getTotalSize())); - } - if ((ssTO.getGuaranteedSize() != 0) || (ssTO.getGuaranteedSize() != -1)) { - query += " " + COLS.get("guar_size") + " = ?" + " ,"; - values.add(format(ssTO.getGuaranteedSize())); - } - if ((ssTO.getFreeSize() != 0) || (ssTO.getFreeSize() != -1)) { - query += " " + COLS.get("free_size") + " = ?" + " ,"; - values.add(format(ssTO.getFreeSize())); - } - if ((ssTO.getUsedSize() != 0) || (ssTO.getUsedSize() != -1)) { - query += " " + COLS.get("used_size") + " = ?" + " ,"; - values.add(format(ssTO.getUsedSize())); - } - if ((ssTO.getBusySize() != 0) || (ssTO.getBusySize() != -1)) { - query += " " + COLS.get("busy_size") + " = ?" + " ,"; - values.add(format(ssTO.getBusySize())); - } - if ((ssTO.getUnavailableSize() != 0) || (ssTO.getUnavailableSize() != -1)) { - query += " " + COLS.get("unavailable_size") + " = ?" + " ,"; - values.add(format(ssTO.getUnavailableSize())); - } - if ((ssTO.getAvailableSize() != 0) || (ssTO.getAvailableSize() != -1)) { - query += " " + COLS.get("available_size") + " = ?" + " ,"; - values.add(format(ssTO.getAvailableSize())); - } - if ((ssTO.getReservedSize() != 0) || (ssTO.getReservedSize() != -1)) { - query += " " + COLS.get("reserved_size") + " = ?" + " ,"; - values.add(format(ssTO.getReservedSize())); - } - if (ssTO.getUpdateTime() != null) { - query += " " + COLS.get("update_time") + " = ?" + " ,"; - values.add(format(ssTO.getUpdateTime())); - } - if (query.charAt(query.length() - 1) == ',') { - query = query.substring(0, query.length() - 1); - } - query += " where " + COLS.get("token") + " = ?"; - - values.add(format(format(ssTO.getSpaceToken()))); - - PreparedStatement preparedStatement = conn.prepareStatement(query); - - int index = 1; - for (String val : values) { - preparedStatement.setString(index, val); - index++; - } - - return preparedStatement; - } - - /** - * - * @param token - * String - * @param freeSpace - * long - * @return String - * @throws SQLException - */ - public PreparedStatement updateFreeSpaceByTokenQuery(Connection conn, - String token, long freeSpace, Date updateTimestamp) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "UPDATE storage_space SET free_size=?" + " , " + "UPDATE_TIME=?" - + " WHERE space_token=?"; - - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setLong(1, freeSpace); - preparedStatement.setString(2, format(updateTimestamp)); - preparedStatement.setString(3, token); - - return preparedStatement; - } - - public PreparedStatement increaseUsedSpaceByTokenQuery(Connection conn, - String token, long usedSpaceToAdd) - throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "UPDATE storage_space " - + " SET USED_SIZE = USED_SIZE + ?, BUSY_SIZE = BUSY_SIZE + ?, " - + " FREE_SIZE = FREE_SIZE - ?, AVAILABLE_SIZE = AVAILABLE_SIZE - ?, " - + " UPDATE_TIME = NOW() " - + " WHERE space_token=? AND USED_SIZE + ? <= TOTAL_SIZE "; - - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setLong(1, usedSpaceToAdd); - preparedStatement.setLong(2, usedSpaceToAdd); - preparedStatement.setLong(3, usedSpaceToAdd); - preparedStatement.setLong(4, usedSpaceToAdd); - preparedStatement.setString(5, token); - preparedStatement.setLong(6, usedSpaceToAdd); - - return preparedStatement; - - } - - public PreparedStatement decreaseUsedSpaceByTokenQuery(Connection conn, - String token, long usedSpaceToRemove) - throws SQLException { + private final static String TABLE_NAME = "storage_space"; + private final static HashMap COLS = new HashMap(); + + private static final String[] COLUMN_NAMES = + {"SS_ID", "USERDN", "VOGROUP", "ALIAS", "SPACE_TOKEN", "CREATED", "TOTAL_SIZE", "GUAR_SIZE", + "FREE_SIZE", "SPACE_FILE", "STORAGE_INFO", "LIFETIME", "SPACE_TYPE", "USED_SIZE", + "BUSY_SIZE", "UNAVAILABLE_SIZE", "AVAILABLE_SIZE", "RESERVED_SIZE", "UPDATE_TIME"}; + + static { + COLS.put("storageSpaceId", "SS_ID"); + COLS.put("ownerName", "USERDN"); + COLS.put("ownerVO", "VOGROUP"); + COLS.put("alias", "ALIAS"); + COLS.put("token", "SPACE_TOKEN"); + COLS.put("created", "CREATED"); + COLS.put("spaceFile", "SPACE_FILE"); + COLS.put("storaqeInfo", "STORAGE_INFO"); + COLS.put("lifeTime", "LIFETIME"); + COLS.put("spaceType", "SPACE_TYPE"); + COLS.put("total_size", "TOTAL_SIZE"); + COLS.put("guar_size", "GUAR_SIZE"); + COLS.put("free_size", "FREE_SIZE"); + COLS.put("used_size", "USED_SIZE"); + COLS.put("busy_size", "BUSY_SIZE"); + COLS.put("unavailable_size", "UNAVAILABLE_SIZE"); + COLS.put("available_size", "AVAILABLE_SIZE"); + COLS.put("reserved_size", "RESERVED_SIZE"); + COLS.put("update_time", "UPDATE_TIME"); + } + + /** + * + * @return String[] + */ + public String[] getColumnNames() { + + return COLUMN_NAMES; + } + + /** + * INSERT NEW ROW into TABLE + * + * @param ssTO StorageSpaceTO + * @return String + * @throws SQLException + */ + + public PreparedStatement insertQuery(Connection conn, StorageSpaceTO ssTO) throws SQLException { + + List values = Lists.newLinkedList(); + + StringBuilder fields = new StringBuilder("("); + StringBuilder placeholders = new StringBuilder("("); + + if (ssTO != null) { + if (ssTO.getOwnerName() != null) { + fields.append(COLS.get("ownerName") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getOwnerName())); + } + + fields.append(COLS.get("ownerVO") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getVoName())); + + if (ssTO.getAlias() != null) { + fields.append(COLS.get("alias") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getAlias())); + } + if (ssTO.getSpaceToken() != null) { + fields.append(COLS.get("token") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getSpaceToken())); + } + if (ssTO.getCreated() != null) { + fields.append(COLS.get("created") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getCreated())); + } + if (ssTO.getSpaceFile() != null) { + fields.append(COLS.get("spaceFile") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getSpaceFile())); + } + if (ssTO.getStorageInfo() != null) { + fields.append(COLS.get("storaqeInfo") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getStorageInfo())); + } + if (ssTO.getLifetime() != -1) { + fields.append(COLS.get("lifeTime") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getLifetime())); + } + if (ssTO.getSpaceType() != null) { + fields.append(COLS.get("spaceType") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getSpaceType())); + } + if ((ssTO.getTotalSize() != 0) || (ssTO.getTotalSize() != -1)) { + fields.append(COLS.get("total_size") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getTotalSize())); + } + if ((ssTO.getGuaranteedSize() != 0) || (ssTO.getGuaranteedSize() != -1)) { + fields.append(COLS.get("guar_size") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getGuaranteedSize())); + } + if ((ssTO.getFreeSize() != 0) || (ssTO.getFreeSize() != -1)) { + fields.append(COLS.get("free_size") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getFreeSize())); + } + if ((ssTO.getUsedSize() != 0) || (ssTO.getUsedSize() != -1)) { + fields.append(COLS.get("used_size") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getUsedSize())); + } + if ((ssTO.getBusySize() != 0) || (ssTO.getBusySize() != -1)) { + fields.append(COLS.get("busy_size") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getBusySize())); + } + if ((ssTO.getUnavailableSize() != 0) || (ssTO.getUnavailableSize() != -1)) { + fields.append(COLS.get("unavailable_size") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getUnavailableSize())); + } + + if ((ssTO.getAvailableSize() != 0) || (ssTO.getAvailableSize() != -1)) { + fields.append(COLS.get("available_size") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getAvailableSize())); + } + if ((ssTO.getReservedSize() != 0) || (ssTO.getReservedSize() != -1)) { + fields.append(COLS.get("reserved_size") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getReservedSize())); + } + if (ssTO.getUpdateTime() != null) { + fields.append(COLS.get("update_time").concat(",")); + placeholders.append("?,"); + values.add(format(ssTO.getUpdateTime())); + } + } - String str = null; - PreparedStatement preparedStatement = null; + fields.deleteCharAt(fields.length() - 1); + fields.append(")"); + placeholders.deleteCharAt(placeholders.length() - 1); + placeholders.append(")"); - str = "UPDATE storage_space " - + " SET USED_SIZE = USED_SIZE - ?, BUSY_SIZE = BUSY_SIZE - ?, " - + " FREE_SIZE = FREE_SIZE + ?, AVAILABLE_SIZE = AVAILABLE_SIZE + ?, " - + " UPDATE_TIME = NOW() " - + " WHERE space_token=? AND USED_SIZE - ? >= 0 "; + String str = "INSERT INTO " + TABLE_NAME + " " + fields.toString() + " VALUES " + + placeholders.toString(); + PreparedStatement preparedStatement = conn.prepareStatement(str); + + int index = 1; + for (String val : values) { + preparedStatement.setString(index, val); + index++; + } + + return preparedStatement; + } + + /** + * Create a StorageSpace Transfer Object coming from Result Set + * + * @param res ResultSet + * @return StorageSpaceTO + */ + public StorageSpaceTO makeStorageSpaceTO(ResultSet res) { + + StorageSpaceTO ssTO = new StorageSpaceTO(); + + try { + ssTO.setStorageSpaceId(Long.valueOf(res.getLong("SS_ID"))); + + ssTO.setOwnerName(res.getString("USERDN")); + ssTO.setVoName(res.getString("VOGROUP")); + ssTO.setAlias(res.getString("ALIAS")); + ssTO.setSpaceToken(res.getString("SPACE_TOKEN")); + + java.sql.Timestamp createdTimeStamp = res.getTimestamp("CREATED"); + Date creationDate = new Date(createdTimeStamp.getTime()); + ssTO.setCreated(creationDate); + + ssTO.setSpaceFile(res.getString("SPACE_FILE")); + ssTO.setStorageInfo(res.getString("STORAGE_INFO")); + long tempLong = res.getLong("LIFETIME"); + if (!res.wasNull()) { + ssTO.setLifetime(tempLong); + } + + ssTO.setSpaceType(res.getString("SPACE_TYPE")); + + // Sizes + tempLong = res.getLong("TOTAL_SIZE"); + if (!res.wasNull()) { + ssTO.setTotalSize(tempLong); + } + tempLong = res.getLong("GUAR_SIZE"); + if (!res.wasNull()) { + ssTO.setGuaranteedSize(tempLong); + } + tempLong = res.getLong("RESERVED_SIZE"); + if (!res.wasNull()) { + ssTO.setReservedSize(tempLong); + } + tempLong = res.getLong("FREE_SIZE"); + if (!res.wasNull()) { + ssTO.setFreeSize(tempLong); + } + tempLong = res.getLong("AVAILABLE_SIZE"); + if (!res.wasNull()) { + ssTO.setAvailableSize(tempLong); + } + tempLong = res.getLong("USED_SIZE"); + if (!res.wasNull()) { + ssTO.setUsedSize(tempLong); + } + tempLong = res.getLong("BUSY_SIZE"); + if (!res.wasNull()) { + ssTO.setBusySize(tempLong); + } + tempLong = res.getLong("UNAVAILABLE_SIZE"); + if (!res.wasNull()) { + ssTO.setUnavailableSize(tempLong); + } + + // Last Update + java.sql.Timestamp updatedTimeStamp = res.getTimestamp("UPDATE_TIME"); + Date updateDate = new Date(updatedTimeStamp.getTime()); + ssTO.setUpdateTime(updateDate); + + } catch (SQLException ex) { + ex.printStackTrace(); + } + return ssTO; + } + + // ************ HELPER Method *************** // + + /** + * @param vo + * @return + */ + private String getVOName(String vo) { + + String voStr = VO.makeNoVo().getValue(); + if (vo != null && !vo.trim().equals("")) { + voStr = vo.trim(); + } + return voStr; + } + + /** + * + * + * @param token String + * @param conn + * @return String + * @throws SQLException + */ + public PreparedStatement selectByTokenQuery(Connection conn, String token) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT * FROM storage_space where space_token=?"; + preparedStatement = conn.prepareStatement(str); + preparedStatement.setString(1, token); + + return preparedStatement; + } + + /** + * Returns the SQL string for selecting all columns from the table 'storage_space' in the + * 'storm_be_ISAM' database matching 'user' and 'spaceAlias'. 'spaceAlias' can be NULL or empty. + * + * @param user VomsGridUser. + * @param spaceAlias String. + * @return String. + * @throws SQLException + */ + public PreparedStatement selectBySpaceAliasQuery(Connection conn, GridUserInterface user, + String spaceAlias) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + String dn = user.getDn(); + + if ((spaceAlias == null) || (spaceAlias.length() == 0)) { + str = "SELECT * FROM storage_space where userdn=?"; preparedStatement = conn.prepareStatement(str); + preparedStatement.setString(1, dn); + } else { + str = "SELECT * FROM storage_space where userdn=? AND alias=?"; + preparedStatement = conn.prepareStatement(str); + preparedStatement.setString(1, dn); + preparedStatement.setString(2, spaceAlias); + } + return preparedStatement; + } + + /** + * Returns the SQL string for selecting all columns from the table 'storage_space' in the + * 'storm_be_ISAM' database matching 'user' and 'spaceAlias'. 'spaceAlias' can be NULL or empty. + * + * @param user VomsGridUser. + * @param spaceAlias String. + * @return String. + * @throws SQLException + */ + public PreparedStatement selectBySpaceAliasOnlyQuery(Connection conn, String spaceAlias) + throws SQLException { + + /* + * This is to distinguish a client reseve space with a VOSpaceArea both with the same token. + * Only the one made by the namespace process contains a fake dn + */ + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT * FROM storage_space where alias=?"; + preparedStatement = conn.prepareStatement(str); + preparedStatement.setString(1, spaceAlias); + + return preparedStatement; + } + + /** + * Returns the SQL string for selecting all columns from the table 'storage_space' in the + * 'storm_be_ISAM' database matching 'voname'. + * + * @param voname string + * @return String. + * @throws SQLException + */ + + public PreparedStatement selectBySpaceType(Connection conn, String voname) throws SQLException { + + /* + * This is to distinguish a client reseve space with a VOSpaceArea both with the same token. + * Only the one made by the namespace process contains a fake dn + */ + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT * FROM storage_space where SPACE_TYPE=?"; + preparedStatement = conn.prepareStatement(str); + preparedStatement.setString(1, voname); + + return preparedStatement; + } + + /** + * This method return the SQL query to evaluate all expired space reservation requests. + * + * @param time Current time (in second) to compare to the reservationTime + lifetime + * @return String SQL query + * @throws SQLException + */ + public PreparedStatement selectExpiredQuery(Connection conn, long currentTimeInSecond) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = + "SELECT * FROM storage_space where lifetime is not null and (UNIX_TIMESTAMP(created)+lifetime< ?)"; + preparedStatement = conn.prepareStatement(str); + preparedStatement.setLong(1, currentTimeInSecond); + + return preparedStatement; + + } + + /** + * @param size + * @return + * @throws SQLException + */ + public PreparedStatement selectByUnavailableUsedSpaceSizeQuery(Connection conn, + long unavailableSizeValue) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; - preparedStatement.setLong(1, usedSpaceToRemove); - preparedStatement.setLong(2, usedSpaceToRemove); - preparedStatement.setLong(3, usedSpaceToRemove); - preparedStatement.setLong(4, usedSpaceToRemove); - preparedStatement.setString(5, token); - preparedStatement.setLong(6, usedSpaceToRemove); + str = "SELECT * FROM storage_space where " + COLS.get("used_size") + " IS NULL or " + + COLS.get("used_size") + "=?"; - return preparedStatement; + preparedStatement = conn.prepareStatement(str); + preparedStatement.setLong(1, unavailableSizeValue); + return preparedStatement; + } + + /** + * @param lastUpdateTimestamp + * @return + * @throws SQLException + */ + + public PreparedStatement selectByPreviousOrNullLastUpdateQuery(Connection conn, + long lastUpdateTimestamp) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT * FROM storage_space where " + COLS.get("update_time") + + " IS NULL or UNIX_TIMESTAMP(" + COLS.get("update_time") + ") < ?"; + + preparedStatement = conn.prepareStatement(str); + preparedStatement.setLong(1, lastUpdateTimestamp); + + return preparedStatement; + + } + + /** + * Returns the SQL query for removing a row from the table 'storage_space' in the 'storm_be_ISAM' + * database matching 'userDN' and 'spaceToken'. + * + * @param user + * @param spaceToken + * @return + * @throws SQLException + */ + public PreparedStatement removeByTokenQuery(Connection conn, GridUserInterface user, + String spaceToken) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "DELETE FROM storage_space WHERE ((USERDN=?) AND (SPACE_TOKEN=?))"; + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setString(1, user.getDn()); + preparedStatement.setString(2, spaceToken); + + return preparedStatement; + } + + /** + * Returns the SQL query for removing a row from the table 'storage_space' in the 'storm_be_ISAM' + * database matching 'spaceToken'. + * + * @param spaceToken + * @return + * @throws SQLException + */ + public PreparedStatement removeByTokenQuery(Connection conn, String spaceToken) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "DELETE FROM storage_space WHERE (SPACE_TOKEN=?)"; + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setString(1, spaceToken); + + return preparedStatement; + } + + /** + * Provides a query that updates all row fields accordingly to the provided StorageSpaceTO + * + * @param ssTO + * @return + * @throws IllegalArgumentException + * @throws SQLException + */ + public PreparedStatement updateByAliasAndTokenQuery(Connection conn, StorageSpaceTO ssTO) + throws IllegalArgumentException, SQLException { + + List values = new LinkedList(); + + if (ssTO == null) { + throw new IllegalArgumentException(); + } + String query = "UPDATE storage_space SET"; + if (ssTO.getOwnerName() != null) { + query += " " + COLS.get("ownerName") + " = ?" + " ,"; + values.add(format(ssTO.getOwnerName())); + } + + query += " " + COLS.get("ownerVO") + " = ?" + " ,"; + values.add(format(getVOName(ssTO.getVoName()))); + + if (ssTO.getCreated() != null) { + query += " " + COLS.get("created") + " = ?" + " ,"; + values.add(format(ssTO.getCreated())); + } + if (ssTO.getSpaceFile() != null) { + query += " " + COLS.get("spaceFile") + " = ?" + " ,"; + values.add(format(ssTO.getSpaceFile())); + } + if (ssTO.getStorageInfo() != null) { + query += " " + COLS.get("storaqeInfo") + " = ?" + " ,"; + values.add(format(ssTO.getStorageInfo())); + } + if (ssTO.getLifetime() != -1) { + query += " " + COLS.get("lifeTime") + " = ?" + " ,"; + values.add(format(ssTO.getLifetime())); + } + if (ssTO.getSpaceType() != null) { + query += " " + COLS.get("spaceType") + " = ?" + " ,"; + values.add(format(ssTO.getSpaceType())); + } + if ((ssTO.getTotalSize() != 0) || (ssTO.getTotalSize() != -1)) { + query += " " + COLS.get("total_size") + " = ?" + " ,"; + values.add(format(ssTO.getTotalSize())); } + if ((ssTO.getGuaranteedSize() != 0) || (ssTO.getGuaranteedSize() != -1)) { + query += " " + COLS.get("guar_size") + " = ?" + " ,"; + values.add(format(ssTO.getGuaranteedSize())); + } + if ((ssTO.getFreeSize() != 0) || (ssTO.getFreeSize() != -1)) { + query += " " + COLS.get("free_size") + " = ?" + " ,"; + values.add(format(ssTO.getFreeSize())); + } + if ((ssTO.getUsedSize() != 0) || (ssTO.getUsedSize() != -1)) { + query += " " + COLS.get("used_size") + " = ?" + " ,"; + values.add(format(ssTO.getUsedSize())); + } + if ((ssTO.getBusySize() != 0) || (ssTO.getBusySize() != -1)) { + query += " " + COLS.get("busy_size") + " = ?" + " ,"; + values.add(format(ssTO.getBusySize())); + } + if ((ssTO.getUnavailableSize() != 0) || (ssTO.getUnavailableSize() != -1)) { + query += " " + COLS.get("unavailable_size") + " = ?" + " ,"; + values.add(format(ssTO.getUnavailableSize())); + } + if ((ssTO.getAvailableSize() != 0) || (ssTO.getAvailableSize() != -1)) { + query += " " + COLS.get("available_size") + " = ?" + " ,"; + values.add(format(ssTO.getAvailableSize())); + } + if ((ssTO.getReservedSize() != 0) || (ssTO.getReservedSize() != -1)) { + query += " " + COLS.get("reserved_size") + " = ?" + " ,"; + values.add(format(ssTO.getReservedSize())); + } + if (ssTO.getUpdateTime() != null) { + query += " " + COLS.get("update_time") + " = ?" + " ,"; + values.add(format(ssTO.getUpdateTime())); + } + if (query.charAt(query.length() - 1) == ',') { + query = query.substring(0, query.length() - 1); + } + query += " where " + COLS.get("alias") + " = ?" + " and " + COLS.get("token") + " = ?"; + + values.add(format(ssTO.getAlias())); + values.add(format(ssTO.getSpaceToken())); + + PreparedStatement preparedStatement = conn.prepareStatement(query); + + int index = 1; + for (String val : values) { + preparedStatement.setString(index, val); + index++; + } + + return preparedStatement; + } + + /** + * Provides a query that updates all row fields accordingly to the provided StorageSpaceTO and + * using SpaceToken as key + * + * @param ssTO + * @return + * @throws IllegalArgumentException + * @throws SQLException + */ + public PreparedStatement updateByTokenQuery(Connection conn, StorageSpaceTO ssTO) + throws IllegalArgumentException, SQLException { + + List values = new LinkedList(); + + if (ssTO == null) { + throw new IllegalArgumentException(); + } + String query = "UPDATE storage_space SET"; + if (ssTO.getOwnerName() != null) { + query += " " + COLS.get("ownerName") + " = ?" + " ,"; + values.add(format(ssTO.getOwnerName())); + } + + query += " " + COLS.get("ownerVO") + " = ?" + " ,"; + values.add((getVOName(ssTO.getVoName()))); + + if (ssTO.getCreated() != null) { + query += " " + COLS.get("created") + " = ?" + " ,"; + values.add(format(ssTO.getCreated())); + } + if (ssTO.getAlias() != null) { + query += " " + COLS.get("alias") + " = ?" + " ,"; + values.add(format(ssTO.getAlias())); + } + if (ssTO.getSpaceFile() != null) { + query += " " + COLS.get("spaceFile") + " = ?" + " ,"; + values.add(format(ssTO.getSpaceFile())); + } + if (ssTO.getStorageInfo() != null) { + query += " " + COLS.get("storaqeInfo") + " = ?" + " ,"; + values.add(format(ssTO.getStorageInfo())); + } + if (ssTO.getLifetime() != -1) { + query += " " + COLS.get("lifeTime") + " = ?" + " ,"; + values.add(format(ssTO.getLifetime())); + } + if (ssTO.getSpaceType() != null) { + query += " " + COLS.get("spaceType") + " = ?" + " ,"; + values.add(format(ssTO.getSpaceType())); + } + if ((ssTO.getTotalSize() != 0) || (ssTO.getTotalSize() != -1)) { + query += " " + COLS.get("total_size") + " = ?" + " ,"; + values.add(format(ssTO.getTotalSize())); + } + if ((ssTO.getGuaranteedSize() != 0) || (ssTO.getGuaranteedSize() != -1)) { + query += " " + COLS.get("guar_size") + " = ?" + " ,"; + values.add(format(ssTO.getGuaranteedSize())); + } + if ((ssTO.getFreeSize() != 0) || (ssTO.getFreeSize() != -1)) { + query += " " + COLS.get("free_size") + " = ?" + " ,"; + values.add(format(ssTO.getFreeSize())); + } + if ((ssTO.getUsedSize() != 0) || (ssTO.getUsedSize() != -1)) { + query += " " + COLS.get("used_size") + " = ?" + " ,"; + values.add(format(ssTO.getUsedSize())); + } + if ((ssTO.getBusySize() != 0) || (ssTO.getBusySize() != -1)) { + query += " " + COLS.get("busy_size") + " = ?" + " ,"; + values.add(format(ssTO.getBusySize())); + } + if ((ssTO.getUnavailableSize() != 0) || (ssTO.getUnavailableSize() != -1)) { + query += " " + COLS.get("unavailable_size") + " = ?" + " ,"; + values.add(format(ssTO.getUnavailableSize())); + } + if ((ssTO.getAvailableSize() != 0) || (ssTO.getAvailableSize() != -1)) { + query += " " + COLS.get("available_size") + " = ?" + " ,"; + values.add(format(ssTO.getAvailableSize())); + } + if ((ssTO.getReservedSize() != 0) || (ssTO.getReservedSize() != -1)) { + query += " " + COLS.get("reserved_size") + " = ?" + " ,"; + values.add(format(ssTO.getReservedSize())); + } + if (ssTO.getUpdateTime() != null) { + query += " " + COLS.get("update_time") + " = ?" + " ,"; + values.add(format(ssTO.getUpdateTime())); + } + if (query.charAt(query.length() - 1) == ',') { + query = query.substring(0, query.length() - 1); + } + query += " where " + COLS.get("token") + " = ?"; + + values.add(format(format(ssTO.getSpaceToken()))); + + PreparedStatement preparedStatement = conn.prepareStatement(query); + + int index = 1; + for (String val : values) { + preparedStatement.setString(index, val); + index++; + } + + return preparedStatement; + } + + /** + * + * @param token String + * @param freeSpace long + * @return String + * @throws SQLException + */ + public PreparedStatement updateFreeSpaceByTokenQuery(Connection conn, String token, + long freeSpace, Date updateTimestamp) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "UPDATE storage_space SET free_size=?" + " , " + "UPDATE_TIME=?" + " WHERE space_token=?"; + + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setLong(1, freeSpace); + preparedStatement.setString(2, format(updateTimestamp)); + preparedStatement.setString(3, token); + + return preparedStatement; + } + + public PreparedStatement increaseUsedSpaceByTokenQuery(Connection conn, String token, + long usedSpaceToAdd) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "UPDATE storage_space " + " SET USED_SIZE = USED_SIZE + ?, BUSY_SIZE = BUSY_SIZE + ?, " + + " FREE_SIZE = FREE_SIZE - ?, AVAILABLE_SIZE = AVAILABLE_SIZE - ?, " + + " UPDATE_TIME = NOW() " + " WHERE space_token=? AND USED_SIZE + ? <= TOTAL_SIZE "; + + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setLong(1, usedSpaceToAdd); + preparedStatement.setLong(2, usedSpaceToAdd); + preparedStatement.setLong(3, usedSpaceToAdd); + preparedStatement.setLong(4, usedSpaceToAdd); + preparedStatement.setString(5, token); + preparedStatement.setLong(6, usedSpaceToAdd); + + return preparedStatement; + + } + + public PreparedStatement decreaseUsedSpaceByTokenQuery(Connection conn, String token, + long usedSpaceToRemove) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "UPDATE storage_space " + " SET USED_SIZE = USED_SIZE - ?, BUSY_SIZE = BUSY_SIZE - ?, " + + " FREE_SIZE = FREE_SIZE + ?, AVAILABLE_SIZE = AVAILABLE_SIZE + ?, " + + " UPDATE_TIME = NOW() " + " WHERE space_token=? AND USED_SIZE - ? >= 0 "; + + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setLong(1, usedSpaceToRemove); + preparedStatement.setLong(2, usedSpaceToRemove); + preparedStatement.setLong(3, usedSpaceToRemove); + preparedStatement.setLong(4, usedSpaceToRemove); + preparedStatement.setString(5, token); + preparedStatement.setLong(6, usedSpaceToRemove); + + return preparedStatement; + + } } diff --git a/src/main/java/it/grid/storm/persistence/util/helper/TapeRecallMySQLHelper.java b/src/main/java/it/grid/storm/persistence/util/helper/TapeRecallMySQLHelper.java index f912c47f8..5f682baf0 100644 --- a/src/main/java/it/grid/storm/persistence/util/helper/TapeRecallMySQLHelper.java +++ b/src/main/java/it/grid/storm/persistence/util/helper/TapeRecallMySQLHelper.java @@ -17,8 +17,8 @@ package it.grid.storm.persistence.util.helper; +import it.grid.storm.persistence.model.SQLHelper; import it.grid.storm.persistence.model.TapeRecallTO; -import it.grid.storm.persistence.util.db.SQLHelper; import it.grid.storm.tape.recalltable.model.TapeRecallStatus; import java.sql.Connection; @@ -30,560 +30,538 @@ public class TapeRecallMySQLHelper extends SQLHelper { - private static final String TABLE_NAME = "tape_recall"; - - // primary key COL_TASK_ID + COL_REQUEST_TOKEN - public static final String COL_TASK_ID = "taskId"; - public static final String COL_REQUEST_TOKEN = "requestToken"; - public static final String COL_REQUEST_TYPE = "requestType"; - public static final String COL_FILE_NAME = "fileName"; - public static final String COL_PIN_LIFETIME = "pinLifetime"; - public static final String COL_STATUS = "status"; - public static final String COL_USER_ID = "userID"; - public static final String COL_VO_NAME = "voName"; - public static final String COL_DATE = "timeStamp"; - public static final String COL_RETRY_ATTEMPT = "retryAttempt"; - public static final String COL_DEFERRED_STARTTIME = "deferredStartTime"; - public static final String COL_GROUP_TASK_ID = "groupTaskId"; - public static final String COL_IN_PROGRESS_DATE = "inProgressTime"; - public static final String COL_FINAL_STATUS_DATE = "finalStatusTime"; - - private static final String QUERY_DELETE_N_OLD_AND_COMPLETED_TASKS; - private static final String QUERY_DELETE_ALL_OLD_AND_COMPLETED_TASKS; - - static { - - QUERY_DELETE_N_OLD_AND_COMPLETED_TASKS = - "DELETE FROM tape_recall WHERE status<>1 AND status<>2 " - + "AND timeStamp <= DATE_SUB(CURRENT_TIMESTAMP(), INTERVAL ? SECOND) " - + "LIMIT ?"; - - QUERY_DELETE_ALL_OLD_AND_COMPLETED_TASKS = - "DELETE FROM tape_recall WHERE status<>1 AND status<>2 " - + "AND timeStamp <= DATE_SUB(CURRENT_TIMESTAMP(), INTERVAL ? SECOND) "; - } - - public TapeRecallMySQLHelper(String dbmsVendor) { - - super(dbmsVendor); - } - - /** - * Verifies if the given string is the name of one of the timestamp columns - * - * @param columnName - * @return - */ - private static boolean validTimestampColumnName(String columnName) { - - return COL_DATE.equals(columnName) - || COL_IN_PROGRESS_DATE.equals(columnName) - || COL_FINAL_STATUS_DATE.equals(columnName); - } - - /** - * @param conn - * @param recallTask - * @return a PreparedStatement for the requested query - */ - public PreparedStatement getQueryInsertTask(Connection conn, - TapeRecallTO recallTask) { - - if (recallTask == null) { - return null; - } - - String query = "INSERT INTO " + TABLE_NAME + " (" + COL_TASK_ID + ", " - + COL_REQUEST_TOKEN + ", " + COL_REQUEST_TYPE + ", " + COL_FILE_NAME - + ", " + COL_PIN_LIFETIME + ", " + COL_STATUS + ", " + COL_VO_NAME + ", " - + COL_USER_ID + ", " + COL_RETRY_ATTEMPT + ", " + COL_DEFERRED_STARTTIME - + ", " + COL_DATE + ", " + COL_GROUP_TASK_ID - + ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; - - try { - PreparedStatement prepStat = conn.prepareStatement(query); - - int idx = 1; - prepStat.setString(idx++, recallTask.getTaskId().toString()); - prepStat.setString(idx++, recallTask.getRequestToken().getValue()); - prepStat.setString(idx++, recallTask.getRequestType().name()); - prepStat.setString(idx++, recallTask.getFileName()); - prepStat.setInt(idx++, recallTask.getPinLifetime()); - prepStat.setInt(idx++, recallTask.getStatusId()); - - prepStat.setString(idx++, recallTask.getVoName()); - prepStat.setString(idx++, recallTask.getUserID()); - prepStat.setInt(idx++, recallTask.getRetryAttempt()); - prepStat.setTimestamp(idx++, new java.sql.Timestamp(recallTask - .getDeferredRecallInstant().getTime())); - prepStat.setTimestamp(idx++, new java.sql.Timestamp(recallTask - .getInsertionInstant().getTime())); - prepStat.setString(idx++, recallTask.getGroupTaskId().toString()); - return prepStat; - - } catch (SQLException e) { - return null; - } - } - - /** - * @param taskId - * @param requestToken - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryGetTask(Connection conn, UUID taskId, - String requestToken) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM " + TABLE_NAME + " WHERE " + COL_TASK_ID + "=?" - + " AND " + COL_REQUEST_TOKEN + "=?"; - - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setString(1, taskId.toString()); - preparedStatement.setString(2, requestToken); - - return preparedStatement; - } - - /** - * @param groupTaskId - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryGetGroupTasks(Connection conn, - UUID groupTaskId) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM " + TABLE_NAME + " WHERE " + COL_GROUP_TASK_ID + "=?"; - - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setString(1, groupTaskId.toString()); - - return preparedStatement; - } - - /** - * @param taskId - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryGetGroupTaskIds(Connection conn, UUID taskId) - throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT DISTINCT " + COL_GROUP_TASK_ID + " , " + COL_STATUS + " , " - + COL_IN_PROGRESS_DATE + " , " + COL_FINAL_STATUS_DATE + " FROM " - + TABLE_NAME + " WHERE " + COL_TASK_ID + "=?"; - - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setString(1, taskId.toString()); - - return preparedStatement; - } - - /** - * @param taskId - * @param statuses - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryGetGroupTaskIds(Connection conn, - UUID taskId, int[] statuses) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT DISTINCT " + COL_GROUP_TASK_ID + " , " + COL_STATUS + " , " - + COL_IN_PROGRESS_DATE + " , " + COL_FINAL_STATUS_DATE + " FROM " - + TABLE_NAME + " WHERE " + COL_TASK_ID + "=?" + " AND " + COL_STATUS - + " IN ( "; - - boolean first = true; - for (int status : statuses) { - if (first) { - first = false; - } else { - str += " , "; - } - str += status; - } - str += " )"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setString(1, taskId.toString()); - - return preparedStatement; - } - - /** - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryNumberQueued(Connection conn) - throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; + private static final String TABLE_NAME = "tape_recall"; + + // primary key COL_TASK_ID + COL_REQUEST_TOKEN + public static final String COL_TASK_ID = "taskId"; + public static final String COL_REQUEST_TOKEN = "requestToken"; + public static final String COL_REQUEST_TYPE = "requestType"; + public static final String COL_FILE_NAME = "fileName"; + public static final String COL_PIN_LIFETIME = "pinLifetime"; + public static final String COL_STATUS = "status"; + public static final String COL_USER_ID = "userID"; + public static final String COL_VO_NAME = "voName"; + public static final String COL_DATE = "timeStamp"; + public static final String COL_RETRY_ATTEMPT = "retryAttempt"; + public static final String COL_DEFERRED_STARTTIME = "deferredStartTime"; + public static final String COL_GROUP_TASK_ID = "groupTaskId"; + public static final String COL_IN_PROGRESS_DATE = "inProgressTime"; + public static final String COL_FINAL_STATUS_DATE = "finalStatusTime"; + + private static final String QUERY_DELETE_N_OLD_AND_COMPLETED_TASKS; + private static final String QUERY_DELETE_ALL_OLD_AND_COMPLETED_TASKS; + + static { + + QUERY_DELETE_N_OLD_AND_COMPLETED_TASKS = + "DELETE FROM tape_recall WHERE status<>1 AND status<>2 " + + "AND timeStamp <= DATE_SUB(CURRENT_TIMESTAMP(), INTERVAL ? SECOND) " + "LIMIT ?"; + + QUERY_DELETE_ALL_OLD_AND_COMPLETED_TASKS = + "DELETE FROM tape_recall WHERE status<>1 AND status<>2 " + + "AND timeStamp <= DATE_SUB(CURRENT_TIMESTAMP(), INTERVAL ? SECOND) "; + } + + /** + * Verifies if the given string is the name of one of the timestamp columns + * + * @param columnName + * @return + */ + private static boolean validTimestampColumnName(String columnName) { + + return COL_DATE.equals(columnName) || COL_IN_PROGRESS_DATE.equals(columnName) + || COL_FINAL_STATUS_DATE.equals(columnName); + } + + /** + * @param conn + * @param recallTask + * @return a PreparedStatement for the requested query + */ + public PreparedStatement getQueryInsertTask(Connection conn, TapeRecallTO recallTask) { + + if (recallTask == null) { + return null; + } + + String query = "INSERT INTO " + TABLE_NAME + " (" + COL_TASK_ID + ", " + COL_REQUEST_TOKEN + + ", " + COL_REQUEST_TYPE + ", " + COL_FILE_NAME + ", " + COL_PIN_LIFETIME + ", " + + COL_STATUS + ", " + COL_VO_NAME + ", " + COL_USER_ID + ", " + COL_RETRY_ATTEMPT + ", " + + COL_DEFERRED_STARTTIME + ", " + COL_DATE + ", " + COL_GROUP_TASK_ID + + ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; + + try { + PreparedStatement prepStat = conn.prepareStatement(query); + + int idx = 1; + prepStat.setString(idx++, recallTask.getTaskId().toString()); + prepStat.setString(idx++, recallTask.getRequestToken().getValue()); + prepStat.setString(idx++, recallTask.getRequestType().name()); + prepStat.setString(idx++, recallTask.getFileName()); + prepStat.setInt(idx++, recallTask.getPinLifetime()); + prepStat.setInt(idx++, recallTask.getStatusId()); + + prepStat.setString(idx++, recallTask.getVoName()); + prepStat.setString(idx++, recallTask.getUserID()); + prepStat.setInt(idx++, recallTask.getRetryAttempt()); + prepStat.setTimestamp(idx++, + new java.sql.Timestamp(recallTask.getDeferredRecallInstant().getTime())); + prepStat.setTimestamp(idx++, + new java.sql.Timestamp(recallTask.getInsertionInstant().getTime())); + prepStat.setString(idx++, recallTask.getGroupTaskId().toString()); + return prepStat; + + } catch (SQLException e) { + return null; + } + } + + /** + * @param taskId + * @param requestToken + * @return the requested query as string + * @throws SQLException + */ + public PreparedStatement getQueryGetTask(Connection conn, UUID taskId, String requestToken) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT * FROM " + TABLE_NAME + " WHERE " + COL_TASK_ID + "=?" + " AND " + + COL_REQUEST_TOKEN + "=?"; + + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setString(1, taskId.toString()); + preparedStatement.setString(2, requestToken); + + return preparedStatement; + } + + /** + * @param groupTaskId + * @return the requested query as string + * @throws SQLException + */ + public PreparedStatement getQueryGetGroupTasks(Connection conn, UUID groupTaskId) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT * FROM " + TABLE_NAME + " WHERE " + COL_GROUP_TASK_ID + "=?"; + + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setString(1, groupTaskId.toString()); + + return preparedStatement; + } + + /** + * @param taskId + * @return the requested query as string + * @throws SQLException + */ + public PreparedStatement getQueryGetGroupTaskIds(Connection conn, UUID taskId) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT DISTINCT " + COL_GROUP_TASK_ID + " , " + COL_STATUS + " , " + COL_IN_PROGRESS_DATE + + " , " + COL_FINAL_STATUS_DATE + " FROM " + TABLE_NAME + " WHERE " + COL_TASK_ID + "=?"; + + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setString(1, taskId.toString()); + + return preparedStatement; + } + + /** + * @param taskId + * @param statuses + * @return the requested query as string + * @throws SQLException + */ + public PreparedStatement getQueryGetGroupTaskIds(Connection conn, UUID taskId, int[] statuses) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT DISTINCT " + COL_GROUP_TASK_ID + " , " + COL_STATUS + " , " + COL_IN_PROGRESS_DATE + + " , " + COL_FINAL_STATUS_DATE + " FROM " + TABLE_NAME + " WHERE " + COL_TASK_ID + "=?" + + " AND " + COL_STATUS + " IN ( "; + + boolean first = true; + for (int status : statuses) { + if (first) { + first = false; + } else { + str += " , "; + } + str += status; + } + str += " )"; - str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME - + " WHERE " + COL_STATUS + "=?"; + preparedStatement = conn.prepareStatement(str); + preparedStatement.setString(1, taskId.toString()); - preparedStatement = conn.prepareStatement(str); - preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); + return preparedStatement; + } - return preparedStatement; - } - - /** - * @param voName - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryNumberQueued(Connection conn, String voName) - throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME - + " WHERE " + COL_STATUS + "=?" + " AND " + COL_VO_NAME + "=?"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); - preparedStatement.setString(2, voName); - - return preparedStatement; - } - - /** - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryReadyForTakeOver(Connection conn) - throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME - + " WHERE " + COL_STATUS + "=?" + " AND " + COL_DEFERRED_STARTTIME - + "<=NOW()"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); - - return preparedStatement; - } - - /** - * @param voName - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryReadyForTakeOver(Connection conn, - String voName) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME - + " WHERE " + COL_STATUS + "=?" + " AND " + COL_VO_NAME + "=?" + " AND " - + COL_DEFERRED_STARTTIME + "<=NOW()"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); - preparedStatement.setString(2, voName); - - return preparedStatement; - } - - /** - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryNumberInProgress(Connection conn) - throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME - + " WHERE " + COL_STATUS + "=?"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setInt(1, TapeRecallStatus.IN_PROGRESS.getStatusId()); - - return preparedStatement; - } - - /** - * @param voName - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryNumberInProgress(Connection conn, - String voName) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME - + " WHERE " + COL_STATUS + "=?" + " AND " + COL_VO_NAME + "=?"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setInt(1, TapeRecallStatus.IN_PROGRESS.getStatusId()); - preparedStatement.setString(2, voName); - - return preparedStatement; - } - - /** - * @param numberOfTasks - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryGetTakeoverTasksWithDoubles(Connection conn, - int numberOfTasks) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM " + TABLE_NAME + " WHERE " + COL_STATUS + "=?" - + " AND " + COL_DEFERRED_STARTTIME + "<=NOW() ORDER BY " - + COL_DEFERRED_STARTTIME + " LIMIT ?"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); - preparedStatement.setInt(2, numberOfTasks); - - return preparedStatement; - } - - /** - * @param numberOfTasks - * @param voName - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryGetTakeoverTasksWithDoubles(Connection conn, - int numberOfTasks, String voName) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM " + TABLE_NAME + " WHERE " + COL_STATUS + "=?" - + " AND " + COL_VO_NAME + "=?" + " AND " + COL_DEFERRED_STARTTIME - + "<=NOW() ORDER BY " + COL_DEFERRED_STARTTIME + " LIMIT ?"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); - preparedStatement.setString(2, voName); - preparedStatement.setInt(3, numberOfTasks); - - return preparedStatement; - } - - /** - * Creates the query string for looking up all the information related to in - * progress tasks in the recall database. - * - * @param numberOfTasks - * the maximum number of task returned - * @return the query string - * @throws SQLException - */ - public PreparedStatement getQueryGetAllTasksInProgress(Connection conn, - int numberOfTasks) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM " + TABLE_NAME + " WHERE " + COL_STATUS + "=?" - + " ORDER BY " + COL_IN_PROGRESS_DATE + " ASC LIMIT ?"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setInt(1, TapeRecallStatus.IN_PROGRESS.getStatusId()); - preparedStatement.setInt(2, numberOfTasks); - - return preparedStatement; - - } - - /** - * @param taskList - * @param date - * @param j - * @return - * @throws SQLException - */ - public PreparedStatement getQueryUpdateTasksStatus(Connection conn, - List taskList, int statusId, String timestampColumn, - Date timestamp) throws IllegalArgumentException, SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - if (taskList.size() == 0) { - return null; - } - if (validTimestampColumnName(timestampColumn)) { - str = "UPDATE " + TABLE_NAME + " SET " + COL_STATUS + "=?" + " , " - + timestampColumn + "=?" + " WHERE " + COL_GROUP_TASK_ID + "=?"; - - for (int i = 1; i < taskList.size(); i++) { - str += " OR " + COL_GROUP_TASK_ID + "=?"; - } - - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setInt(1, statusId); - preparedStatement.setTimestamp(2, - new java.sql.Timestamp(timestamp.getTime())); - preparedStatement.setString(3, taskList.get(0).getGroupTaskId() - .toString()); - - int idx = 4; - for (int i = 1; i < taskList.size(); i++) { - preparedStatement.setString(idx, taskList.get(i).getGroupTaskId() - .toString()); - idx++; - } - } else { - throw new IllegalArgumentException( - "Unable to update row status and timestamp. The priovided timestamp column \'" - + timestampColumn + "\' is not valid"); - } - - return preparedStatement; - } - - /** - * @param groupTaskId - * @param status - * @param timestampColumn - * @param timestamp - * @return - * @throws IllegalArgumentException - * @throws SQLException - */ - public PreparedStatement getQueryUpdateGroupTaskStatus(Connection conn, - UUID groupTaskId, int status, String timestampColumn, Date timestamp) - throws IllegalArgumentException, SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - if (validTimestampColumnName(timestampColumn)) { - str = "UPDATE " + TABLE_NAME + " SET " + COL_STATUS + "=?" + " , " - + timestampColumn + "=?" + " WHERE " + COL_GROUP_TASK_ID + "=?" - + " AND " + COL_STATUS + "!=?"; - - } else { - throw new IllegalArgumentException( - "Unable to update row status and timestamp. The priovided timestamp column \'" - + timestampColumn + "\' is not valid"); - } - - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setInt(1, status); - preparedStatement.setTimestamp(2, - new java.sql.Timestamp(timestamp.getTime())); - preparedStatement.setString(3, groupTaskId.toString()); - preparedStatement.setInt(4, status); - - return preparedStatement; - - } - - /** - * @param groupTaskId - * @param status - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQuerySetGroupTaskStatus(Connection conn, - UUID groupTaskId, int status) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "UPDATE " + TABLE_NAME + " SET " + COL_STATUS + "=?" + " WHERE " - + COL_GROUP_TASK_ID + "=?" + " AND " + COL_STATUS + "!=?"; - - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setInt(1, status); - preparedStatement.setString(2, groupTaskId.toString()); - preparedStatement.setInt(3, status); - - return preparedStatement; - } - - /** - * @param groupTaskId - * @param value - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQuerySetGroupTaskRetryValue(Connection conn, - UUID groupTaskId, int value) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "UPDATE " + TABLE_NAME + " SET " + COL_RETRY_ATTEMPT + "=?" - + " WHERE " + COL_GROUP_TASK_ID + "=?"; - - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setInt(1, value); - preparedStatement.setString(2, groupTaskId.toString()); - - return preparedStatement; - } - - /** - * @param con - * @param expirationTime - * @return the requested query as @PreparedStatement - * @throws SQLException - */ - public PreparedStatement getQueryDeleteCompletedTasks(Connection con, long expirationTime) - throws SQLException { - - PreparedStatement ps = con.prepareStatement(QUERY_DELETE_ALL_OLD_AND_COMPLETED_TASKS); - ps.setLong(1, expirationTime); - - return ps; - } - - /** - * @param con - * @param expirationTime - * @param maxNumTasks - * @return the requested query as @PreparedStatement - * @throws SQLException - */ - public PreparedStatement getQueryDeleteCompletedTasks(Connection con, long expirationTime, - int maxNumTasks) throws SQLException { - - PreparedStatement ps = con.prepareStatement(QUERY_DELETE_N_OLD_AND_COMPLETED_TASKS); + /** + * @return the requested query as string + * @throws SQLException + */ + public PreparedStatement getQueryNumberQueued(Connection conn) throws SQLException { - ps.setLong(1, expirationTime); - ps.setInt(2, maxNumTasks); + String str = null; + PreparedStatement preparedStatement = null; - return ps; - } + str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME + " WHERE " + + COL_STATUS + "=?"; + + preparedStatement = conn.prepareStatement(str); + preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); + + return preparedStatement; + } + + /** + * @param voName + * @return the requested query as string + * @throws SQLException + */ + public PreparedStatement getQueryNumberQueued(Connection conn, String voName) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME + " WHERE " + + COL_STATUS + "=?" + " AND " + COL_VO_NAME + "=?"; + + preparedStatement = conn.prepareStatement(str); + preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); + preparedStatement.setString(2, voName); + + return preparedStatement; + } + + /** + * @return the requested query as string + * @throws SQLException + */ + public PreparedStatement getQueryReadyForTakeOver(Connection conn) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME + " WHERE " + + COL_STATUS + "=?" + " AND " + COL_DEFERRED_STARTTIME + "<=NOW()"; + + preparedStatement = conn.prepareStatement(str); + preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); + + return preparedStatement; + } + + /** + * @param voName + * @return the requested query as string + * @throws SQLException + */ + public PreparedStatement getQueryReadyForTakeOver(Connection conn, String voName) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME + " WHERE " + + COL_STATUS + "=?" + " AND " + COL_VO_NAME + "=?" + " AND " + COL_DEFERRED_STARTTIME + + "<=NOW()"; + + preparedStatement = conn.prepareStatement(str); + preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); + preparedStatement.setString(2, voName); + + return preparedStatement; + } + + /** + * @return the requested query as string + * @throws SQLException + */ + public PreparedStatement getQueryNumberInProgress(Connection conn) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME + " WHERE " + + COL_STATUS + "=?"; + + preparedStatement = conn.prepareStatement(str); + preparedStatement.setInt(1, TapeRecallStatus.IN_PROGRESS.getStatusId()); + + return preparedStatement; + } + + /** + * @param voName + * @return the requested query as string + * @throws SQLException + */ + public PreparedStatement getQueryNumberInProgress(Connection conn, String voName) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME + " WHERE " + + COL_STATUS + "=?" + " AND " + COL_VO_NAME + "=?"; + + preparedStatement = conn.prepareStatement(str); + preparedStatement.setInt(1, TapeRecallStatus.IN_PROGRESS.getStatusId()); + preparedStatement.setString(2, voName); + + return preparedStatement; + } + + /** + * @param numberOfTasks + * @return the requested query as string + * @throws SQLException + */ + public PreparedStatement getQueryGetTakeoverTasksWithDoubles(Connection conn, int numberOfTasks) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT * FROM " + TABLE_NAME + " WHERE " + COL_STATUS + "=?" + " AND " + + COL_DEFERRED_STARTTIME + "<=NOW() ORDER BY " + COL_DEFERRED_STARTTIME + " LIMIT ?"; + + preparedStatement = conn.prepareStatement(str); + preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); + preparedStatement.setInt(2, numberOfTasks); + + return preparedStatement; + } + + /** + * @param numberOfTasks + * @param voName + * @return the requested query as string + * @throws SQLException + */ + public PreparedStatement getQueryGetTakeoverTasksWithDoubles(Connection conn, int numberOfTasks, + String voName) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT * FROM " + TABLE_NAME + " WHERE " + COL_STATUS + "=?" + " AND " + COL_VO_NAME + + "=?" + " AND " + COL_DEFERRED_STARTTIME + "<=NOW() ORDER BY " + COL_DEFERRED_STARTTIME + + " LIMIT ?"; + + preparedStatement = conn.prepareStatement(str); + preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); + preparedStatement.setString(2, voName); + preparedStatement.setInt(3, numberOfTasks); + + return preparedStatement; + } + + /** + * Creates the query string for looking up all the information related to in progress tasks in the + * recall database. + * + * @param numberOfTasks the maximum number of task returned + * @return the query string + * @throws SQLException + */ + public PreparedStatement getQueryGetAllTasksInProgress(Connection conn, int numberOfTasks) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT * FROM " + TABLE_NAME + " WHERE " + COL_STATUS + "=?" + " ORDER BY " + + COL_IN_PROGRESS_DATE + " ASC LIMIT ?"; + + preparedStatement = conn.prepareStatement(str); + preparedStatement.setInt(1, TapeRecallStatus.IN_PROGRESS.getStatusId()); + preparedStatement.setInt(2, numberOfTasks); + + return preparedStatement; + + } + + /** + * @param taskList + * @param date + * @param j + * @return + * @throws SQLException + */ + public PreparedStatement getQueryUpdateTasksStatus(Connection conn, List taskList, + int statusId, String timestampColumn, Date timestamp) + throws IllegalArgumentException, SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + if (taskList.size() == 0) { + return null; + } + if (validTimestampColumnName(timestampColumn)) { + str = "UPDATE " + TABLE_NAME + " SET " + COL_STATUS + "=?" + " , " + timestampColumn + "=?" + + " WHERE " + COL_GROUP_TASK_ID + "=?"; + + for (int i = 1; i < taskList.size(); i++) { + str += " OR " + COL_GROUP_TASK_ID + "=?"; + } + + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setInt(1, statusId); + preparedStatement.setTimestamp(2, new java.sql.Timestamp(timestamp.getTime())); + preparedStatement.setString(3, taskList.get(0).getGroupTaskId().toString()); + + int idx = 4; + for (int i = 1; i < taskList.size(); i++) { + preparedStatement.setString(idx, taskList.get(i).getGroupTaskId().toString()); + idx++; + } + } else { + throw new IllegalArgumentException( + "Unable to update row status and timestamp. The priovided timestamp column \'" + + timestampColumn + "\' is not valid"); + } + + return preparedStatement; + } + + /** + * @param groupTaskId + * @param status + * @param timestampColumn + * @param timestamp + * @return + * @throws IllegalArgumentException + * @throws SQLException + */ + public PreparedStatement getQueryUpdateGroupTaskStatus(Connection conn, UUID groupTaskId, + int status, String timestampColumn, Date timestamp) + throws IllegalArgumentException, SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + if (validTimestampColumnName(timestampColumn)) { + str = "UPDATE " + TABLE_NAME + " SET " + COL_STATUS + "=?" + " , " + timestampColumn + "=?" + + " WHERE " + COL_GROUP_TASK_ID + "=?" + " AND " + COL_STATUS + "!=?"; + + } else { + throw new IllegalArgumentException( + "Unable to update row status and timestamp. The priovided timestamp column \'" + + timestampColumn + "\' is not valid"); + } + + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setInt(1, status); + preparedStatement.setTimestamp(2, new java.sql.Timestamp(timestamp.getTime())); + preparedStatement.setString(3, groupTaskId.toString()); + preparedStatement.setInt(4, status); + + return preparedStatement; + + } + + /** + * @param groupTaskId + * @param status + * @return the requested query as string + * @throws SQLException + */ + public PreparedStatement getQuerySetGroupTaskStatus(Connection conn, UUID groupTaskId, int status) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "UPDATE " + TABLE_NAME + " SET " + COL_STATUS + "=?" + " WHERE " + COL_GROUP_TASK_ID + + "=?" + " AND " + COL_STATUS + "!=?"; + + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setInt(1, status); + preparedStatement.setString(2, groupTaskId.toString()); + preparedStatement.setInt(3, status); + + return preparedStatement; + } + + /** + * @param groupTaskId + * @param value + * @return the requested query as string + * @throws SQLException + */ + public PreparedStatement getQuerySetGroupTaskRetryValue(Connection conn, UUID groupTaskId, + int value) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "UPDATE " + TABLE_NAME + " SET " + COL_RETRY_ATTEMPT + "=?" + " WHERE " + + COL_GROUP_TASK_ID + "=?"; + + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setInt(1, value); + preparedStatement.setString(2, groupTaskId.toString()); + + return preparedStatement; + } + + /** + * @param con + * @param expirationTime + * @return the requested query as @PreparedStatement + * @throws SQLException + */ + public PreparedStatement getQueryDeleteCompletedTasks(Connection con, long expirationTime) + throws SQLException { + + PreparedStatement ps = con.prepareStatement(QUERY_DELETE_ALL_OLD_AND_COMPLETED_TASKS); + ps.setLong(1, expirationTime); + + return ps; + } + + /** + * @param con + * @param expirationTime + * @param maxNumTasks + * @return the requested query as @PreparedStatement + * @throws SQLException + */ + public PreparedStatement getQueryDeleteCompletedTasks(Connection con, long expirationTime, + int maxNumTasks) throws SQLException { + + PreparedStatement ps = con.prepareStatement(QUERY_DELETE_N_OLD_AND_COMPLETED_TASKS); + + ps.setLong(1, expirationTime); + ps.setInt(2, maxNumTasks); + + return ps; + } } diff --git a/src/main/java/it/grid/storm/rest/RestServer.java b/src/main/java/it/grid/storm/rest/RestServer.java index bc207fff6..feb2f75e1 100644 --- a/src/main/java/it/grid/storm/rest/RestServer.java +++ b/src/main/java/it/grid/storm/rest/RestServer.java @@ -47,11 +47,10 @@ import it.grid.storm.info.remote.resources.SpaceStatusResource; import it.grid.storm.metrics.NamedInstrumentedSelectChannelConnector; import it.grid.storm.metrics.NamedInstrumentedThreadPool; -import it.grid.storm.namespace.remote.resource.VirtualFSResource; -import it.grid.storm.namespace.remote.resource.VirtualFSResourceCompat_1_0; -import it.grid.storm.namespace.remote.resource.VirtualFSResourceCompat_1_1; -import it.grid.storm.namespace.remote.resource.VirtualFSResourceCompat_1_2; import it.grid.storm.rest.auth.RestTokenFilter; +import it.grid.storm.rest.info.endpoint.EndpointResource; +import it.grid.storm.rest.info.namespace.NamespaceInfoEndpoint; +import it.grid.storm.rest.info.storageareas.StorageAreasResource; import it.grid.storm.rest.metadata.Metadata; import it.grid.storm.tape.recalltable.providers.TapeRecallTOListMessageBodyWriter; import it.grid.storm.tape.recalltable.resources.TaskResource; @@ -66,9 +65,6 @@ */ public class RestServer { - public static final int DEFAULT_MAX_THREAD_NUM = 100; - public static final int DEFAULT_MAX_QUEUE_SIZE = 1000; - private static final Logger LOG = LoggerFactory.getLogger(RestServer.class); private final Server server; @@ -115,15 +111,14 @@ private void configure() { resourceConfig.register(TasksResource.class); resourceConfig.register(TasksCardinality.class); resourceConfig.register(TapeRecallTOListMessageBodyWriter.class); - resourceConfig.register(AuthorizationResource.class); - resourceConfig.register(AuthorizationResourceCompat_1_0.class); - resourceConfig.register(VirtualFSResource.class); - resourceConfig.register(VirtualFSResourceCompat_1_0.class); - resourceConfig.register(VirtualFSResourceCompat_1_1.class); - resourceConfig.register(VirtualFSResourceCompat_1_2.class); - resourceConfig.register(StormEAResource.class); +// resourceConfig.register(AuthorizationResource.class); +// resourceConfig.register(AuthorizationResourceCompat_1_0.class); + resourceConfig.register(StorageAreasResource.class); + resourceConfig.register(NamespaceInfoEndpoint.class); + resourceConfig.register(EndpointResource.class); +// resourceConfig.register(StormEAResource.class); resourceConfig.register(Metadata.class); - resourceConfig.register(Ping.class); +// resourceConfig.register(Ping.class); resourceConfig.register(SpaceStatusResource.class); /* JSON POJO support: */ resourceConfig.register(JacksonFeature.class); diff --git a/src/main/java/it/grid/storm/rest/info/endpoint/EndpointResource.java b/src/main/java/it/grid/storm/rest/info/endpoint/EndpointResource.java new file mode 100644 index 000000000..719ae8cf6 --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/endpoint/EndpointResource.java @@ -0,0 +1,72 @@ +package it.grid.storm.rest.info.endpoint; + +import java.util.Map; +import java.util.Set; + +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.Produces; +import javax.ws.rs.core.MediaType; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; + +import it.grid.storm.config.Configuration; +import it.grid.storm.namespace.Namespace; +import it.grid.storm.namespace.NamespaceException; +import it.grid.storm.namespace.model.Authority; +import it.grid.storm.namespace.model.Protocol; +import it.grid.storm.rest.info.endpoint.model.EndpointInfo; +import it.grid.storm.rest.info.storageareas.model.SAInfo; + +@Path("/info") +public class EndpointResource { + + private static final Logger log = LoggerFactory.getLogger(EndpointResource.class); + + private EndpointInfo endpoint; + + public EndpointResource() { + + this(Configuration.getInstance(), Namespace.getInstance()); + } + + public EndpointResource(Configuration config, Namespace ns) { + + endpoint = new EndpointInfo(); + endpoint.setSiteName(config.getSiteName()); + endpoint.setQualityLevel(config.getQualityLevel()); + String version = getClass().getPackage().getImplementationVersion(); + endpoint.setVersion(version != null ? version : "unknown"); + endpoint.setVos(ns.getSupportedVOs()); + endpoint.setSrmEndpoints(config.getSrmEndpoints()); + endpoint.setGridftpEndpoints(ns.getManagedEndpoints(Protocol.GSIFTP)); + Set davEndpoints = Sets.newHashSet(); + davEndpoints.addAll(ns.getManagedEndpoints(Protocol.HTTPS)); + davEndpoints.addAll(ns.getManagedEndpoints(Protocol.HTTP)); + endpoint.setDavEndpoints(davEndpoints); + Set xrootEndpoints = Sets.newHashSet(); + xrootEndpoints.addAll(ns.getManagedEndpoints(Protocol.XROOT)); + xrootEndpoints.addAll(ns.getManagedEndpoints(Protocol.ROOT)); + endpoint.setXrootEndpoints(xrootEndpoints); + + Map sas = Maps.newHashMap(); + ns.getAllDefinedVFS().forEach(vfs -> { + try { + sas.put(vfs.getAliasName(), SAInfo.buildFromVFS(vfs)); + } catch (NamespaceException e) { + log.error(e.getMessage(), e); + } + }); + endpoint.setStorageAreas(sas); + } + + @GET + @Produces(MediaType.APPLICATION_JSON) + public EndpointInfo getEndpointInfo() { + return endpoint; + } +} diff --git a/src/main/java/it/grid/storm/rest/info/endpoint/model/EndpointInfo.java b/src/main/java/it/grid/storm/rest/info/endpoint/model/EndpointInfo.java new file mode 100644 index 000000000..0e025354a --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/endpoint/model/EndpointInfo.java @@ -0,0 +1,119 @@ +package it.grid.storm.rest.info.endpoint.model; + +import java.util.List; +import java.util.Map; +import java.util.Set; + +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; + +import it.grid.storm.config.model.v2.SrmEndpoint; +import it.grid.storm.namespace.model.Authority; +import it.grid.storm.config.ConfigurationDefaults; +import it.grid.storm.config.model.v2.QualityLevel; +import it.grid.storm.rest.info.storageareas.model.SAInfo; + +public class EndpointInfo { + + private String siteName; + private QualityLevel qualityLevel; + private String version; + private Set vos; + private List srmEndpoints; + private Set gridftpEndpoints; + private Set davEndpoints; + private Set xrootEndpoints; + private Map storageAreas; + + public EndpointInfo() { + siteName = ConfigurationDefaults.DEFAULT_SITENAME; + qualityLevel = ConfigurationDefaults.DEFAULT_QUALITY_LEVEL; + version = "unknown"; + vos = Sets.newHashSet(); + srmEndpoints = Lists.newArrayList(); + gridftpEndpoints = Sets.newHashSet(); + davEndpoints = Sets.newHashSet(); + xrootEndpoints = Sets.newHashSet(); + storageAreas = Maps.newHashMap(); + } + + public String getSiteName() { + return siteName; + } + + public void setSiteName(String siteName) { + this.siteName = siteName; + } + + public QualityLevel getQualityLevel() { + return qualityLevel; + } + + public void setQualityLevel(QualityLevel qualityLevel) { + this.qualityLevel = qualityLevel; + } + + public String getVersion() { + return version; + } + + public void setVersion(String version) { + this.version = version; + } + + public Map getStorageAreas() { + return storageAreas; + } + + public void setStorageAreas(Map storageAreas) { + this.storageAreas.clear(); + this.storageAreas.putAll(storageAreas); + } + + public List getSrmEndpoints() { + return srmEndpoints; + } + + public void setSrmEndpoints(List srmEndpoints) { + this.srmEndpoints.clear(); + this.srmEndpoints.addAll(srmEndpoints); + } + + public Set getGridftpEndpoints() { + return gridftpEndpoints; + } + + public void setGridftpEndpoints(Set gridftpEndpoints) { + this.gridftpEndpoints.clear(); + this.gridftpEndpoints.addAll(gridftpEndpoints); + } + + public Set getDavEndpoints() { + return davEndpoints; + } + + public void setDavEndpoints(Set davEndpoints) { + this.davEndpoints.clear(); + this.davEndpoints.addAll(davEndpoints); + } + + public Set getXrootEndpoints() { + return xrootEndpoints; + } + + public void setXrootEndpoints(Set xrootEndpoints) { + this.xrootEndpoints.clear(); + this.xrootEndpoints.addAll(xrootEndpoints); + } + + public Set getVos() { + return vos; + } + + public void setVos(Set vos) { + this.vos.clear(); + this.vos.addAll(vos); + } + +} diff --git a/src/main/java/it/grid/storm/rest/info/namespace/NamespaceInfoEndpoint.java b/src/main/java/it/grid/storm/rest/info/namespace/NamespaceInfoEndpoint.java new file mode 100644 index 000000000..45b91c488 --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/namespace/NamespaceInfoEndpoint.java @@ -0,0 +1,44 @@ +package it.grid.storm.rest.info.namespace; + +import static javax.ws.rs.core.MediaType.APPLICATION_JSON; + +import java.io.File; +import java.io.IOException; + +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.Produces; + +import com.fasterxml.jackson.core.JsonParseException; +import com.fasterxml.jackson.databind.JsonMappingException; +import com.fasterxml.jackson.dataformat.xml.XmlMapper; + +import it.grid.storm.config.Configuration; +import it.grid.storm.rest.info.namespace.model.Namespace; + +@Path("/info/namespace") +public class NamespaceInfoEndpoint { + + private Namespace info; + + public NamespaceInfoEndpoint() throws JsonParseException, JsonMappingException, IOException { + + buildNamespace(); + } + + private void buildNamespace() throws JsonParseException, JsonMappingException, IOException { + + // read XML from namespace.xml and load it into Namespace.class + File namespaceFile = new File(Configuration.getInstance().getNamespaceConfigFilePath()); + XmlMapper xmlMapper = new XmlMapper(); + info = xmlMapper.readValue(namespaceFile, Namespace.class); + } + + @GET + @Produces(APPLICATION_JSON) + public Namespace getNamespace() { + + return info; + } + +} diff --git a/src/main/java/it/grid/storm/rest/info/namespace/model/AccessLatency.java b/src/main/java/it/grid/storm/rest/info/namespace/model/AccessLatency.java new file mode 100644 index 000000000..3497d527b --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/namespace/model/AccessLatency.java @@ -0,0 +1,6 @@ +package it.grid.storm.rest.info.namespace.model; + +public enum AccessLatency { + + online, nearline, offline +} diff --git a/src/main/java/it/grid/storm/rest/info/namespace/model/AclEntry.java b/src/main/java/it/grid/storm/rest/info/namespace/model/AclEntry.java new file mode 100644 index 000000000..6a939f3c4 --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/namespace/model/AclEntry.java @@ -0,0 +1,34 @@ +package it.grid.storm.rest.info.namespace.model; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonInclude.Include; + +@JsonInclude(Include.NON_NULL) +public class AclEntry { + + private String groupName; + private PermissionType permissions; + + @JsonProperty("groupName") + public String getGroupName() { + return groupName; + } + + @JsonProperty("groupName") + public void setGroupName(String groupName) { + this.groupName = groupName; + } + + @JsonProperty("permissions") + public PermissionType getPermissions() { + return permissions; + } + + @JsonProperty("permissions") + public void setPermissions(PermissionType permissions) { + this.permissions = permissions; + } + + +} diff --git a/src/main/java/it/grid/storm/rest/info/namespace/model/AclMode.java b/src/main/java/it/grid/storm/rest/info/namespace/model/AclMode.java new file mode 100644 index 000000000..de77df459 --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/namespace/model/AclMode.java @@ -0,0 +1,6 @@ +package it.grid.storm.rest.info.namespace.model; + +public enum AclMode { + + AoT, JiT +} diff --git a/src/main/java/it/grid/storm/rest/info/namespace/model/ApproachableRule.java b/src/main/java/it/grid/storm/rest/info/namespace/model/ApproachableRule.java new file mode 100644 index 000000000..ae8e13b2b --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/namespace/model/ApproachableRule.java @@ -0,0 +1,56 @@ +package it.grid.storm.rest.info.namespace.model; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonInclude.Include; + +@JsonInclude(Include.NON_NULL) +public class ApproachableRule { + + private String name; + private Subject subjects; + private String approachableFs; + private Boolean anonymousHttpRead; + + @JsonProperty("name") + public String getName() { + return name; + } + + @JsonProperty("name") + public void setName(String name) { + this.name = name; + } + + @JsonProperty("subjects") + public Subject getSubjects() { + return subjects; + } + + @JsonProperty("subjects") + public void setSubjects(Subject subjects) { + this.subjects = subjects; + } + + @JsonProperty("approachableFs") + public String getApproachableFs() { + return approachableFs; + } + + @JsonProperty("approachable-fs") + public void setApproachableFs(String approachableFs) { + this.approachableFs = approachableFs; + } + + @JsonProperty("anonymousHttpRead") + public Boolean getAnonymousHttpRead() { + return anonymousHttpRead; + } + + @JsonProperty("anonymous-http-read") + public void setAnonymousHttpRead(Boolean anonymousHttpRead) { + this.anonymousHttpRead = anonymousHttpRead; + } + + +} diff --git a/src/main/java/it/grid/storm/rest/info/namespace/model/BalanceStrategy.java b/src/main/java/it/grid/storm/rest/info/namespace/model/BalanceStrategy.java new file mode 100644 index 000000000..35e31311a --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/namespace/model/BalanceStrategy.java @@ -0,0 +1,19 @@ +package it.grid.storm.rest.info.namespace.model; + +import com.fasterxml.jackson.annotation.JsonValue; + +public enum BalanceStrategy { + + ROUND_ROBIN("round-robin"), RANDOM("random"), WEIGHT("weight"), SMART_ROUND_ROBIN("smart-rr"); + + private String value; + + private BalanceStrategy(String value) { + this.value = value; + } + + @JsonValue + public String getValue() { + return value; + } +} diff --git a/src/main/java/it/grid/storm/rest/info/namespace/model/Capabilities.java b/src/main/java/it/grid/storm/rest/info/namespace/model/Capabilities.java new file mode 100644 index 000000000..a988136ab --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/namespace/model/Capabilities.java @@ -0,0 +1,69 @@ +package it.grid.storm.rest.info.namespace.model; + +import java.util.List; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonInclude.Include; + +@JsonInclude(Include.NON_NULL) +public class Capabilities { + + private AclMode aclMode; + private List defaultAcl; + private List quota; + private List transProt; + private Pool pool; + + @JsonProperty("aclMode") + public AclMode getAclMode() { + return aclMode; + } + + @JsonProperty("aclMode") + public void setAclMode(AclMode aclMode) { + this.aclMode = aclMode; + } + + @JsonProperty("defaultAcl") + public List getDefaultAcl() { + return defaultAcl; + } + + @JsonProperty("default-acl") + public void setDefaultAcl(List defaultAcl) { + this.defaultAcl = defaultAcl; + } + + @JsonProperty("quota") + public List getQuota() { + return quota; + } + + @JsonProperty("quota") + public void setQuota(List quota) { + this.quota = quota; + } + + @JsonProperty("transferProtocols") + public List getTransProt() { + return transProt; + } + + @JsonProperty("trans-prot") + public void setTransProt(List transProt) { + this.transProt = transProt; + } + + @JsonProperty("pool") + public Pool getPool() { + return pool; + } + + @JsonProperty("pool") + public void setPool(Pool pool) { + this.pool = pool; + } + + +} diff --git a/src/main/java/it/grid/storm/rest/info/namespace/model/DefaultAcl.java b/src/main/java/it/grid/storm/rest/info/namespace/model/DefaultAcl.java new file mode 100644 index 000000000..b90e9b04e --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/namespace/model/DefaultAcl.java @@ -0,0 +1,25 @@ +package it.grid.storm.rest.info.namespace.model; + +import java.util.List; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonInclude.Include; + +@JsonInclude(Include.NON_NULL) +public class DefaultAcl { + + private List aclEntry; + + @JsonProperty("aclEntries") + public List getAclEntry() { + return aclEntry; + } + + @JsonProperty("acl-entry") + public void setAclEntry(List aclEntry) { + this.aclEntry = aclEntry; + } + + +} diff --git a/src/main/java/it/grid/storm/rest/info/namespace/model/ExpirationMode.java b/src/main/java/it/grid/storm/rest/info/namespace/model/ExpirationMode.java new file mode 100644 index 000000000..1602f000a --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/namespace/model/ExpirationMode.java @@ -0,0 +1,6 @@ +package it.grid.storm.rest.info.namespace.model; + +public enum ExpirationMode { + + neverExpire, warnWhenExpire, releaseWhenExpire +} diff --git a/src/main/java/it/grid/storm/rest/info/namespace/model/Filesystem.java b/src/main/java/it/grid/storm/rest/info/namespace/model/Filesystem.java new file mode 100644 index 000000000..1a2e46d68 --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/namespace/model/Filesystem.java @@ -0,0 +1,122 @@ +package it.grid.storm.rest.info.namespace.model; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonInclude.Include; + +@JsonInclude(Include.NON_NULL) +public class Filesystem { + + private String name; + private FilesystemType fsType; + private String spaceTokenDescription; + private StorageClass storageClass; + private String root; + private FilesystemDriver filesystemDriver; + private SpacesystemDriver spacesystemDriver; + private Object storageAreaAuthz; + private Properties properties; + private Capabilities capabilities; + + @JsonProperty("name") + public String getName() { + return name; + } + + @JsonProperty("name") + public void setName(String name) { + this.name = name; + } + + @JsonProperty("fsType") + public FilesystemType getFsType() { + return fsType; + } + + @JsonProperty("fs_type") + public void setFsType(FilesystemType fsType) { + this.fsType = fsType; + } + + @JsonProperty("spaceToken") + public String getSpaceTokenDescription() { + return spaceTokenDescription; + } + + @JsonProperty("space-token-description") + public void setSpaceTokenDescription(String spaceTokenDescription) { + this.spaceTokenDescription = spaceTokenDescription; + } + + @JsonProperty("storageClass") + public StorageClass getStorageClass() { + return storageClass; + } + + @JsonProperty("storage-class") + public void setStorageClass(StorageClass storageClass) { + this.storageClass = storageClass; + } + + @JsonProperty("rootPath") + public String getRoot() { + return root; + } + + @JsonProperty("root") + public void setRoot(String root) { + this.root = root; + } + + @JsonProperty("filesystemDriver") + public FilesystemDriver getFilesystemDriver() { + return filesystemDriver; + } + + @JsonProperty("filesystem-driver") + public void setFilesystemDriver(FilesystemDriver filesystemDriver) { + this.filesystemDriver = filesystemDriver; + } + + @JsonProperty("spacesystemDriver") + public SpacesystemDriver getSpacesystemDriver() { + return spacesystemDriver; + } + + @JsonProperty("spacesystem-driver") + public void setSpacesystemDriver(SpacesystemDriver spacesystemDriver) { + this.spacesystemDriver = spacesystemDriver; + } + + @JsonProperty("storageAreaAuthz") + public Object getStorageAreaAuthz() { + return storageAreaAuthz; + } + + @JsonProperty("storage-area-authz") + public void setStorageAreaAuthz(Object storageAreaAuthz) { + this.storageAreaAuthz = storageAreaAuthz; + } + + @JsonProperty("properties") + public Properties getProperties() { + return properties; + } + + @JsonProperty("properties") + public void setProperties(Properties properties) { + this.properties = properties; + } + + @JsonProperty("capabilities") + public Capabilities getCapabilities() { + return capabilities; + } + + @JsonProperty("capabilities") + public void setCapabilities(Capabilities capabilities) { + this.capabilities = capabilities; + } + + +} diff --git a/src/main/java/it/grid/storm/rest/info/namespace/model/FilesystemDriver.java b/src/main/java/it/grid/storm/rest/info/namespace/model/FilesystemDriver.java new file mode 100644 index 000000000..4861dff11 --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/namespace/model/FilesystemDriver.java @@ -0,0 +1,23 @@ +package it.grid.storm.rest.info.namespace.model; + +import com.fasterxml.jackson.annotation.JsonValue; + +public enum FilesystemDriver { + + POSIX_FS("it.grid.storm.filesystem.swig.posixfs"), + GPSF("it.grid.storm.filesystem.swig.gpfs"), + GPFS23("it.grid.storm.filesystem.swig.gpfs23"), + XFS("it.grid.storm.filesystem.swig.xfs"), + TEST("it.grid.storm.filesystem.swig.test"); + + private String value; + + private FilesystemDriver(String value) { + this.value = value; + } + + @JsonValue + public String getValue() { + return value; + } +} diff --git a/src/main/java/it/grid/storm/rest/info/namespace/model/FilesystemType.java b/src/main/java/it/grid/storm/rest/info/namespace/model/FilesystemType.java new file mode 100644 index 000000000..8f3ab7623 --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/namespace/model/FilesystemType.java @@ -0,0 +1,6 @@ +package it.grid.storm.rest.info.namespace.model; + +public enum FilesystemType { + + ext3, gpfs, xfs +} diff --git a/src/main/java/it/grid/storm/rest/info/namespace/model/MappingRule.java b/src/main/java/it/grid/storm/rest/info/namespace/model/MappingRule.java new file mode 100644 index 000000000..44910f5c6 --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/namespace/model/MappingRule.java @@ -0,0 +1,44 @@ +package it.grid.storm.rest.info.namespace.model; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonInclude.Include; + +@JsonInclude(Include.NON_NULL) +public class MappingRule { + + private String name; + private String stfnRoot; + private String mappedFs; + + @JsonProperty("name") + public String getName() { + return name; + } + + @JsonProperty("name") + public void setName(String name) { + this.name = name; + } + + @JsonProperty("stfnRoot") + public String getStfnRoot() { + return stfnRoot; + } + + @JsonProperty("stfn-root") + public void setStfnRoot(String stfnRoot) { + this.stfnRoot = stfnRoot; + } + + @JsonProperty("mappedFs") + public String getMappedFs() { + return mappedFs; + } + + @JsonProperty("mapped-fs") + public void setMappedFs(String mappedFs) { + this.mappedFs = mappedFs; + } + +} diff --git a/src/main/java/it/grid/storm/rest/info/namespace/model/Member.java b/src/main/java/it/grid/storm/rest/info/namespace/model/Member.java new file mode 100644 index 000000000..c962f2009 --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/namespace/model/Member.java @@ -0,0 +1,35 @@ +package it.grid.storm.rest.info.namespace.model; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonInclude.Include; + +@JsonInclude(Include.NON_NULL) +public class Member { + + private Integer memberId; + private Integer weight; + + @JsonProperty("memberId") + public Integer getMemberId() { + return memberId; + } + + @JsonProperty("member-id") + public void setMemberId(Integer memberId) { + this.memberId = memberId; + } + + @JsonProperty("weight") + public Integer getWeight() { + return weight; + } + + @JsonProperty("weight") + public void setWeight(Integer weight) { + this.weight = weight; + } + + + +} diff --git a/src/main/java/it/grid/storm/rest/info/namespace/model/Namespace.java b/src/main/java/it/grid/storm/rest/info/namespace/model/Namespace.java new file mode 100644 index 000000000..c38e7db08 --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/namespace/model/Namespace.java @@ -0,0 +1,57 @@ +package it.grid.storm.rest.info.namespace.model; + +import java.util.List; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties({"noNamespaceSchemaLocation"}) +public class Namespace { + + private String version; + private List filesystems; + private List mappingRules; + private List approachableRules; + + @JsonProperty("version") + public String getVersion() { + return version; + } + + @JsonProperty("version") + public void setVersion(String version) { + this.version = version; + } + + @JsonProperty("filesystems") + public List getFilesystems() { + return filesystems; + } + + @JsonProperty("filesystems") + public void setFilesystems(List filesystems) { + this.filesystems = filesystems; + } + + @JsonProperty("mappingRules") + public List getMappingRules() { + return mappingRules; + } + + @JsonProperty("mapping-rules") + public void setMappingRules(List mappingRules) { + this.mappingRules = mappingRules; + } + + @JsonProperty("approachableRules") + public List getApproachableRules() { + return approachableRules; + } + + @JsonProperty("approachable-rules") + public void setApproachableRules(List approachableRules) { + this.approachableRules = approachableRules; + } + + +} diff --git a/src/main/java/it/grid/storm/rest/info/namespace/model/PermissionType.java b/src/main/java/it/grid/storm/rest/info/namespace/model/PermissionType.java new file mode 100644 index 000000000..d92ab41c3 --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/namespace/model/PermissionType.java @@ -0,0 +1,6 @@ +package it.grid.storm.rest.info.namespace.model; + +public enum PermissionType { + + R, W, RW; +} diff --git a/src/main/java/it/grid/storm/rest/info/namespace/model/Pool.java b/src/main/java/it/grid/storm/rest/info/namespace/model/Pool.java new file mode 100644 index 000000000..395712893 --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/namespace/model/Pool.java @@ -0,0 +1,36 @@ +package it.grid.storm.rest.info.namespace.model; + +import java.util.List; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonInclude.Include; + +@JsonInclude(Include.NON_NULL) +public class Pool { + + private BalanceStrategy balanceStrategy; + private List members; + + @JsonProperty("balanceStrategy") + public BalanceStrategy getBalanceStrategy() { + return balanceStrategy; + } + + @JsonProperty("balance-strategy") + public void setBalanceStrategy(BalanceStrategy balanceStrategy) { + this.balanceStrategy = balanceStrategy; + } + + @JsonProperty("members") + public List getMembers() { + return members; + } + + @JsonProperty("members") + public void setMembers(List members) { + this.members = members; + } + + +} diff --git a/src/main/java/it/grid/storm/rest/info/namespace/model/Properties.java b/src/main/java/it/grid/storm/rest/info/namespace/model/Properties.java new file mode 100644 index 000000000..2202839c7 --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/namespace/model/Properties.java @@ -0,0 +1,66 @@ +package it.grid.storm.rest.info.namespace.model; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonInclude.Include; + +@JsonInclude(Include.NON_NULL) +public class Properties { + + private RetentionPolicy retentionPolicy; + private AccessLatency accessLatency; + private ExpirationMode expirationMode; + private TotalOnlineSize totalOnlineSize; + private TotalNearLineSize totalNearlineSize; + + @JsonProperty("retentionPolicy") + public RetentionPolicy getRetentionPolicy() { + return retentionPolicy; + } + + @JsonProperty("RetentionPolicy") + public void setRetentionPolicy(RetentionPolicy retentionPolicy) { + this.retentionPolicy = retentionPolicy; + } + + @JsonProperty("accessLatency") + public AccessLatency getAccessLatency() { + return accessLatency; + } + + @JsonProperty("AccessLatency") + public void setAccessLatency(AccessLatency accessLatency) { + this.accessLatency = accessLatency; + } + + @JsonProperty("expirationMode") + public ExpirationMode getExpirationMode() { + return expirationMode; + } + + @JsonProperty("ExpirationMode") + public void setExpirationMode(ExpirationMode expirationMode) { + this.expirationMode = expirationMode; + } + + @JsonProperty("totalOnlineSize") + public TotalOnlineSize getTotalOnlineSize() { + return totalOnlineSize; + } + + @JsonProperty("TotalOnlineSize") + public void setTotalOnlineSize(TotalOnlineSize totalOnlineSize) { + this.totalOnlineSize = totalOnlineSize; + } + + @JsonProperty("totalNearlineSize") + public TotalNearLineSize getTotalNearlineSize() { + return totalNearlineSize; + } + + @JsonProperty("TotalNearlineSize") + public void setTotalNearlineSize(TotalNearLineSize totalNearlineSize) { + this.totalNearlineSize = totalNearlineSize; + } + +} diff --git a/src/main/java/it/grid/storm/rest/info/namespace/model/Prot.java b/src/main/java/it/grid/storm/rest/info/namespace/model/Prot.java new file mode 100644 index 000000000..8aa30486c --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/namespace/model/Prot.java @@ -0,0 +1,67 @@ +package it.grid.storm.rest.info.namespace.model; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonInclude.Include; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonInclude(Include.NON_NULL) +public class Prot { + + private String name; + private Integer id; + private String schema; + private String host; + private Integer port; + + @JsonProperty("name") + public String getName() { + return name; + } + + @JsonProperty("name") + public void setName(String name) { + this.name = name; + } + + @JsonProperty("id") + public Integer getId() { + return id; + } + + @JsonProperty("id") + public void setId(Integer id) { + this.id = id; + } + + @JsonProperty("schema") + public String getSchema() { + return schema; + } + + @JsonProperty("schema") + public void setSchema(String schema) { + this.schema = schema; + } + + @JsonProperty("host") + public String getHost() { + return host; + } + + @JsonProperty("host") + public void setHost(String host) { + this.host = host; + } + + @JsonProperty("port") + public Integer getPort() { + return port; + } + + @JsonProperty("port") + public void setPort(Integer port) { + this.port = port; + } + + +} diff --git a/src/main/java/it/grid/storm/rest/info/namespace/model/Protocol.java b/src/main/java/it/grid/storm/rest/info/namespace/model/Protocol.java new file mode 100644 index 000000000..3fdb46569 --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/namespace/model/Protocol.java @@ -0,0 +1,21 @@ +package it.grid.storm.rest.info.namespace.model; + +import com.fasterxml.jackson.annotation.JsonPropertyOrder; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; + +@JsonPropertyOrder({"id", "schema", "hostname", "port"}) +public class Protocol { + + @JacksonXmlProperty(isAttribute = true, localName = "name") + String name; + + @JacksonXmlProperty(localName = "id") + Integer id; + @JacksonXmlProperty(localName = "schema") + String schema; + @JacksonXmlProperty(localName = "hostname") + String hostname; + @JacksonXmlProperty(localName = "port") + Integer port; + +} diff --git a/src/main/java/it/grid/storm/rest/info/namespace/model/Quota.java b/src/main/java/it/grid/storm/rest/info/namespace/model/Quota.java new file mode 100644 index 000000000..a8c7d98a2 --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/namespace/model/Quota.java @@ -0,0 +1,45 @@ +package it.grid.storm.rest.info.namespace.model; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonInclude.Include; + +@JsonInclude(Include.NON_NULL) +public class Quota { + + private Boolean enabled; + private String device; + private Object quotaElement; + + @JsonProperty("enabled") + public Boolean getEnabled() { + return enabled; + } + + @JsonProperty("enabled") + public void setEnabled(Boolean enabled) { + this.enabled = enabled; + } + + @JsonProperty("device") + public String getDevice() { + return device; + } + + @JsonProperty("device") + public void setDevice(String device) { + this.device = device; + } + + @JsonProperty("quotaElement") + public Object getQuotaElement() { + return quotaElement; + } + + @JsonProperty("quotaElement") + public void setQuotaElement(Object quotaElement) { + this.quotaElement = quotaElement; + } + + +} diff --git a/src/main/java/it/grid/storm/rest/info/namespace/model/RetentionPolicy.java b/src/main/java/it/grid/storm/rest/info/namespace/model/RetentionPolicy.java new file mode 100644 index 000000000..5c03fb44d --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/namespace/model/RetentionPolicy.java @@ -0,0 +1,5 @@ +package it.grid.storm.rest.info.namespace.model; + +public enum RetentionPolicy { + custodial, output, replica +} diff --git a/src/main/java/it/grid/storm/rest/info/namespace/model/SpacesystemDriver.java b/src/main/java/it/grid/storm/rest/info/namespace/model/SpacesystemDriver.java new file mode 100644 index 000000000..ae36d2914 --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/namespace/model/SpacesystemDriver.java @@ -0,0 +1,22 @@ +package it.grid.storm.rest.info.namespace.model; + +import com.fasterxml.jackson.annotation.JsonValue; + +public enum SpacesystemDriver { + + GPFS_SPACESYSTEM("it.grid.storm.filesystem.GPFSSpaceSystem"), + MOCK_SPACESYSTEM("it.grid.storm.filesystem.MockSpaceSystem"), + XFS_SPACESYSTEM("it.grid.storm.filesystem.XFSSpaceSystem"); + + private String value; + + private SpacesystemDriver(String value) { + this.value = value; + } + + @JsonValue + public String getValue() { + return value; + } +} + diff --git a/src/main/java/it/grid/storm/rest/info/namespace/model/StorageClass.java b/src/main/java/it/grid/storm/rest/info/namespace/model/StorageClass.java new file mode 100644 index 000000000..1749198e3 --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/namespace/model/StorageClass.java @@ -0,0 +1,6 @@ +package it.grid.storm.rest.info.namespace.model; + +public enum StorageClass { + + T0D0, T0D1, T1D1, T1D0 +} diff --git a/src/main/java/it/grid/storm/rest/info/namespace/model/Subject.java b/src/main/java/it/grid/storm/rest/info/namespace/model/Subject.java new file mode 100644 index 000000000..8c7047610 --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/namespace/model/Subject.java @@ -0,0 +1,34 @@ +package it.grid.storm.rest.info.namespace.model; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonInclude.Include; + +@JsonInclude(Include.NON_NULL) +public class Subject { + + private String dn; + private String voName; + + @JsonProperty("dn") + public String getDn() { + return dn; + } + + @JsonProperty("dn") + public void setDn(String dn) { + this.dn = dn; + } + + @JsonProperty("voName") + public String getVoName() { + return voName; + } + + @JsonProperty("vo-name") + public void setVoName(String voName) { + this.voName = voName; + } + + +} diff --git a/src/main/java/it/grid/storm/rest/info/namespace/model/TotalNearLineSize.java b/src/main/java/it/grid/storm/rest/info/namespace/model/TotalNearLineSize.java new file mode 100644 index 000000000..965f2eb59 --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/namespace/model/TotalNearLineSize.java @@ -0,0 +1,34 @@ +package it.grid.storm.rest.info.namespace.model; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonInclude.Include; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; + +@JsonDeserialize(using = TotalNearLineSizeDeserializer.class) +@JsonInclude(Include.NON_NULL) +public class TotalNearLineSize { + + private UnitType unit; + private Long value; + + @JsonProperty("unit") + public UnitType getUnit() { + return unit; + } + + @JsonProperty("unit") + public void setUnit(UnitType unit) { + this.unit = unit; + } + + @JsonProperty("value") + public Long getValue() { + return value; + } + + @JsonProperty("value") + public void setValue(Long value) { + this.value = value; + } +} diff --git a/src/main/java/it/grid/storm/rest/info/namespace/model/TotalNearLineSizeDeserializer.java b/src/main/java/it/grid/storm/rest/info/namespace/model/TotalNearLineSizeDeserializer.java new file mode 100644 index 000000000..3ac823426 --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/namespace/model/TotalNearLineSizeDeserializer.java @@ -0,0 +1,44 @@ +package it.grid.storm.rest.info.namespace.model; + +import java.io.IOException; + +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.DeserializationContext; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.deser.std.StdDeserializer; + +public class TotalNearLineSizeDeserializer extends StdDeserializer { + + /** + * + */ + private static final long serialVersionUID = 1L; + + protected TotalNearLineSizeDeserializer() { + this(null); + } + + protected TotalNearLineSizeDeserializer(Class vc) { + super(vc); + } + + @Override + public TotalNearLineSize deserialize(JsonParser jp, DeserializationContext ctxt) + throws IOException, JsonProcessingException { + + TotalNearLineSize result = new TotalNearLineSize(); + JsonNode node = jp.getCodec().readTree(jp); + // unit is optional + JsonNode unitNode = node.get("unit"); + if (unitNode != null) { + result.setUnit(UnitType.valueOf(unitNode.asText())); + } else { + result.setUnit(UnitType.TB); + } + // value is required + result.setValue(node.get("").asLong()); + return result; + } + +} diff --git a/src/main/java/it/grid/storm/rest/info/namespace/model/TotalOnlineSize.java b/src/main/java/it/grid/storm/rest/info/namespace/model/TotalOnlineSize.java new file mode 100644 index 000000000..1719a9034 --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/namespace/model/TotalOnlineSize.java @@ -0,0 +1,47 @@ +package it.grid.storm.rest.info.namespace.model; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonInclude.Include; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; + +@JsonDeserialize(using = TotalOnlineSizeDeserializer.class) +@JsonInclude(Include.NON_NULL) +public class TotalOnlineSize { + + private UnitType unit; + private Boolean limitedSize; + private Long value; + + @JsonProperty("unit") + public UnitType getUnit() { + return unit; + } + + @JsonProperty("unit") + public void setUnit(UnitType unit) { + this.unit = unit; + } + + @JsonProperty("limitedSize") + public Boolean getLimitedSize() { + return limitedSize; + } + + @JsonProperty("limited-size") + public void setLimitedSize(Boolean limitedSize) { + this.limitedSize = limitedSize; + } + + @JsonProperty("value") + public Long getValue() { + return value; + } + + @JsonProperty("value") + public void setValue(Long value) { + this.value = value; + } + + +} diff --git a/src/main/java/it/grid/storm/rest/info/namespace/model/TotalOnlineSizeDeserializer.java b/src/main/java/it/grid/storm/rest/info/namespace/model/TotalOnlineSizeDeserializer.java new file mode 100644 index 000000000..c389db372 --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/namespace/model/TotalOnlineSizeDeserializer.java @@ -0,0 +1,46 @@ +package it.grid.storm.rest.info.namespace.model; + +import java.io.IOException; + +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.DeserializationContext; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.deser.std.StdDeserializer; + +public class TotalOnlineSizeDeserializer extends StdDeserializer { + + /** + * + */ + private static final long serialVersionUID = 1L; + + protected TotalOnlineSizeDeserializer() { + this(null); + } + + protected TotalOnlineSizeDeserializer(Class vc) { + super(vc); + } + + @Override + public TotalOnlineSize deserialize(JsonParser jp, DeserializationContext ctxt) + throws IOException, JsonProcessingException { + + TotalOnlineSize result = new TotalOnlineSize(); + JsonNode node = jp.getCodec().readTree(jp); + // unit is optional + JsonNode unitNode = node.get("unit"); + if (unitNode != null) { + result.setUnit(UnitType.valueOf(unitNode.asText())); + } else { + result.setUnit(UnitType.TB); + } + // limited size is required + result.setLimitedSize(node.get("limited-size").asBoolean()); + // value is required + result.setValue(node.get("").asLong()); + return result; + } + +} diff --git a/src/main/java/it/grid/storm/rest/info/namespace/model/UnitType.java b/src/main/java/it/grid/storm/rest/info/namespace/model/UnitType.java new file mode 100644 index 000000000..d392c1307 --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/namespace/model/UnitType.java @@ -0,0 +1,6 @@ +package it.grid.storm.rest.info.namespace.model; + +public enum UnitType { + + Byte, KB, MB, GB, TB +} diff --git a/src/main/java/it/grid/storm/rest/info/storageareas/README.md b/src/main/java/it/grid/storm/rest/info/storageareas/README.md new file mode 100644 index 000000000..adcdd338e --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/storageareas/README.md @@ -0,0 +1,68 @@ +# Storage Areas Resource + +This interface allows to retrieve info about the defined storage areas. + +The endpoint path is `/info/storage-areas`: + + GET /info/storage-areas + + +## The Storage Area object + +The list of attributes returned for each storage area are: + +| Attribute | Value +|:---------------------|:--------------------- +| `name` | The name of the storage area. +| `token` | The space token value. +| `vos` | The list of VO names allowed to access this storage area. +| `rootPath` | The absolute path of storage area's root directory. +| `storageClass` | The Storage Area class type. One of the values: "T0D1", "T1D0", "T1D1". +| `accessPoints` | The list of access points. +| `retentionPolicy` | The retention policy. +| `accessLatency` | The access latency. +| `protocols` | The list of the supported transfer protocols. +| `anonymous` | Permissions for anonymous users. +| `availableNearlineSpace` | Total nearline space size. +| `approachableRules` | List of approachable rules. + + +## Examples + +CURL command: + + curl http://storm.test.infn.it:9998/info/storage-areas + +Output: + +```JSON + { + "TESTVO-FS": { + "name": "TESTVO-FS", + "token": "TESTVO_TOKEN", + "vos": [ + "test.vo" + ], + "rootPath": "/storage/test.vo", + "storageClass": "T0D1", + "accessPoints": [ + "/test.vo" + ], + "retentionPolicy": "replica", + "accessLatency": "online", + "protocols": [ + "xroot", + "https", + "http", + "root", + "gsiftp", + "file" + ], + "anonymous": "NOREAD", + "availableNearlineSpace": 0, + "approachableRules": [ + "vo:test.vo" + ] + } + } +``` diff --git a/src/main/java/it/grid/storm/namespace/remote/resource/VirtualFSResource.java b/src/main/java/it/grid/storm/rest/info/storageareas/StorageAreasResource.java similarity index 52% rename from src/main/java/it/grid/storm/namespace/remote/resource/VirtualFSResource.java rename to src/main/java/it/grid/storm/rest/info/storageareas/StorageAreasResource.java index 4643456b8..ccc616172 100644 --- a/src/main/java/it/grid/storm/namespace/remote/resource/VirtualFSResource.java +++ b/src/main/java/it/grid/storm/rest/info/storageareas/StorageAreasResource.java @@ -1,4 +1,4 @@ -package it.grid.storm.namespace.remote.resource; +package it.grid.storm.rest.info.storageareas; import java.util.List; import java.util.Map; @@ -13,33 +13,28 @@ import com.google.common.collect.Maps; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.VirtualFSInterface; -import it.grid.storm.namespace.model.SAInfo; -import it.grid.storm.namespace.remote.Constants; +import it.grid.storm.namespace.model.VirtualFS; +import it.grid.storm.rest.info.storageareas.model.SAInfo; -/** - * @author Michele Dibenedetto - */ -@Path("/" + Constants.RESOURCE + "/" + Constants.VERSION) -public class VirtualFSResource { +@Path("/info/storage-areas") +public class StorageAreasResource { - private static final Logger log = LoggerFactory.getLogger(VirtualFSResource.class); + private static final Logger log = LoggerFactory.getLogger(StorageAreasResource.class); /** * @return */ @GET - @Path("/" + Constants.LIST_ALL_VFS) @Produces(MediaType.APPLICATION_JSON) public Map listVFS() { log.debug("Serving VFS resource listing"); - List vfsCollection = NamespaceDirector.getNamespace().getAllDefinedVFS(); + List vfsCollection = Namespace.getInstance().getAllDefinedVFS(); Map output = Maps.newHashMap(); - for (VirtualFSInterface vfs : vfsCollection) { + for (VirtualFS vfs : vfsCollection) { try { output.put(vfs.getAliasName(), SAInfo.buildFromVFS(vfs)); } catch (NamespaceException e) { diff --git a/src/main/java/it/grid/storm/rest/info/storageareas/model/HttpPerms.java b/src/main/java/it/grid/storm/rest/info/storageareas/model/HttpPerms.java new file mode 100644 index 000000000..949b05dc8 --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/storageareas/model/HttpPerms.java @@ -0,0 +1,5 @@ +package it.grid.storm.rest.info.storageareas.model; + +public enum HttpPerms { + NOREAD, READ, READWRITE; +} diff --git a/src/main/java/it/grid/storm/rest/info/storageareas/model/SAInfo.java b/src/main/java/it/grid/storm/rest/info/storageareas/model/SAInfo.java new file mode 100644 index 000000000..15ed853fa --- /dev/null +++ b/src/main/java/it/grid/storm/rest/info/storageareas/model/SAInfo.java @@ -0,0 +1,211 @@ +package it.grid.storm.rest.info.storageareas.model; + +import java.util.Iterator; +import java.util.List; + +import com.google.common.collect.Lists; + +import it.grid.storm.namespace.NamespaceException; +import it.grid.storm.namespace.model.AccessLatency; +import it.grid.storm.namespace.model.ApproachableRule; +import it.grid.storm.namespace.model.Protocol; +import it.grid.storm.namespace.model.RetentionPolicy; +import it.grid.storm.namespace.model.StorageClassType; +import it.grid.storm.namespace.model.VirtualFS; + +public class SAInfo { + + private String name; + private String token; + private List vos; + private String rootPath; + private StorageClassType storageClass; + private List accessPoints; + private RetentionPolicy retentionPolicy; + private AccessLatency accessLatency; + private List protocols; + private HttpPerms anonymous; + private long availableNearlineSpace; + private List approachableRules; + + // Must have no-argument constructor + public SAInfo() { + + vos = Lists.newArrayList(); + accessPoints = Lists.newArrayList(); + protocols = Lists.newArrayList(); + approachableRules = Lists.newArrayList(); + } + + public String getName() { + + return name; + } + + public void setName(String name) { + + this.name = name; + } + + public String getToken() { + + return token; + } + + public void setToken(String token) { + + this.token = token; + } + + public List getVos() { + + return vos; + } + + public void addVo(String voName) { + + this.vos.add(voName); + } + + public String getRootPath() { + + return rootPath; + } + + public void setRoot(String rootPath) { + + this.rootPath = rootPath; + } + + public StorageClassType getStorageClass() { + + return storageClass; + } + + public void setStorageClass(StorageClassType storageClass) { + + this.storageClass = storageClass; + } + + public List getAccessPoints() { + + return accessPoints; + } + + public void addAccessPoint(String accessPoint) { + + this.accessPoints.add(accessPoint); + } + + public RetentionPolicy getRetentionPolicy() { + + return retentionPolicy; + } + + public void setRetentionPolicy(RetentionPolicy retentionPolicy) { + + this.retentionPolicy = retentionPolicy; + } + + public AccessLatency getAccessLatency() { + + return accessLatency; + } + + public void setAccessLatency(AccessLatency accessLatency) { + + this.accessLatency = accessLatency; + } + + public List getProtocols() { + + return protocols; + } + + public void addProtocol(String protocol) { + + this.protocols.add(protocol); + } + + public HttpPerms getAnonymous() { + + return anonymous; + } + + public void setAnonymous(HttpPerms anonymous) { + + this.anonymous = anonymous; + } + + public long getAvailableNearlineSpace() { + + return availableNearlineSpace; + } + + public void setAvailableNearlineSpace(long availableNearlineSpace) { + + this.availableNearlineSpace = availableNearlineSpace; + } + + public List getApproachableRules() { + + return approachableRules; + } + + public void addApproachableRule(String approachableRule) { + + this.approachableRules.add(approachableRule); + } + + public static SAInfo buildFromVFS(VirtualFS vfs) throws NamespaceException { + + SAInfo sa = new SAInfo(); + + sa.setName(vfs.getAliasName()); + sa.setToken(vfs.getSpaceTokenDescription()); + vfs.getApproachableRules().forEach(ar -> { + sa.addVo(ar.getSubjectRules().getVONameMatchingRule().getVOName()); + }); + sa.setRoot(vfs.getRootPath()); + vfs.getMappingRules().forEach(mr -> { + sa.addAccessPoint(mr.getStFNRoot()); + }); + Iterator protocolsIterator = + vfs.getCapabilities().getAllManagedProtocols().iterator(); + while (protocolsIterator.hasNext()) { + sa.addProtocol(protocolsIterator.next().getSchema()); + } + if (vfs.isHttpWorldReadable()) { + if (vfs.isApproachableByAnonymous()) { + sa.setAnonymous(HttpPerms.READWRITE); + } else { + sa.setAnonymous(HttpPerms.READ); + } + } else { + sa.setAnonymous(HttpPerms.NOREAD); + } + sa.setStorageClass(vfs.getStorageClassType()); + sa.setRetentionPolicy(vfs.getProperties().getRetentionPolicy()); + sa.setAccessLatency(vfs.getProperties().getAccessLatency()); + sa.setAvailableNearlineSpace(vfs.getAvailableNearlineSpace().value()); + + for (ApproachableRule rule : vfs.getApproachableRules()) { + if (rule.getSubjectRules().getDNMatchingRule().isMatchAll() + && rule.getSubjectRules().getVONameMatchingRule().isMatchAll()) { + continue; + } + if (!rule.getSubjectRules().getDNMatchingRule().isMatchAll()) { + sa.addApproachableRule( + rule.getSubjectRules().getDNMatchingRule().toShortSlashSeparatedString()); + } + if (!rule.getSubjectRules().getVONameMatchingRule().isMatchAll()) { + sa.addApproachableRule("vo:" + rule.getSubjectRules().getVONameMatchingRule().getVOName()); + } + } + if (sa.getApproachableRules().size() == 0) { + sa.getApproachableRules().add("'ALL'"); + } + + return sa; + } +} diff --git a/src/main/java/it/grid/storm/rest/metadata/Metadata.java b/src/main/java/it/grid/storm/rest/metadata/Metadata.java index 4e1905454..57cb203f9 100644 --- a/src/main/java/it/grid/storm/rest/metadata/Metadata.java +++ b/src/main/java/it/grid/storm/rest/metadata/Metadata.java @@ -5,17 +5,6 @@ import static javax.ws.rs.core.Response.Status.INTERNAL_SERVER_ERROR; import static javax.ws.rs.core.Response.Status.NOT_FOUND; -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.NamespaceInterface; -import it.grid.storm.rest.metadata.model.StoriMetadata; -import it.grid.storm.rest.metadata.service.ResourceNotFoundException; -import it.grid.storm.rest.metadata.service.ResourceService; -import it.grid.storm.rest.metadata.service.StoriMetadataService; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.File; import javax.ws.rs.GET; @@ -24,6 +13,16 @@ import javax.ws.rs.Produces; import javax.ws.rs.WebApplicationException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import it.grid.storm.namespace.Namespace; +import it.grid.storm.namespace.NamespaceException; +import it.grid.storm.rest.metadata.model.StoriMetadata; +import it.grid.storm.rest.metadata.service.ResourceNotFoundException; +import it.grid.storm.rest.metadata.service.ResourceService; +import it.grid.storm.rest.metadata.service.StoriMetadataService; + @Path("/metadata") public class Metadata { @@ -33,7 +32,7 @@ public class Metadata { public Metadata() throws NamespaceException { - NamespaceInterface namespace = NamespaceDirector.getNamespace(); + Namespace namespace = Namespace.getInstance(); metadataService = new StoriMetadataService( new ResourceService(namespace.getAllDefinedVFS(), namespace.getAllDefinedMappingRules())); } diff --git a/src/main/java/it/grid/storm/rest/metadata/service/ResourceService.java b/src/main/java/it/grid/storm/rest/metadata/service/ResourceService.java index f847ebd99..44a0e0380 100644 --- a/src/main/java/it/grid/storm/rest/metadata/service/ResourceService.java +++ b/src/main/java/it/grid/storm/rest/metadata/service/ResourceService.java @@ -13,18 +13,18 @@ import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.StoRI; -import it.grid.storm.namespace.VirtualFSInterface; import it.grid.storm.namespace.model.MappingRule; import it.grid.storm.namespace.model.StoRIType; +import it.grid.storm.namespace.model.VirtualFS; public class ResourceService { private static final Logger log = LoggerFactory.getLogger(ResourceService.class); - protected Collection vfsList; + protected Collection vfsList; protected Collection rulesList; - public ResourceService(Collection vfsList, + public ResourceService(Collection vfsList, Collection rulesList) { checkNotNull(vfsList, "Invalid null list of Virtual FS"); diff --git a/src/main/java/it/grid/storm/rest/metadata/service/StoriMetadataService.java b/src/main/java/it/grid/storm/rest/metadata/service/StoriMetadataService.java index 4ad2dff03..9d5d8f15c 100644 --- a/src/main/java/it/grid/storm/rest/metadata/service/StoriMetadataService.java +++ b/src/main/java/it/grid/storm/rest/metadata/service/StoriMetadataService.java @@ -22,7 +22,7 @@ import it.grid.storm.namespace.InvalidDescendantsEmptyRequestException; import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.StoRI; -import it.grid.storm.namespace.VirtualFSInterface; +import it.grid.storm.namespace.model.VirtualFS; import it.grid.storm.rest.metadata.model.FileAttributes; import it.grid.storm.rest.metadata.model.StoriMetadata; import it.grid.storm.rest.metadata.model.VirtualFsMetadata; @@ -54,7 +54,7 @@ public StoriMetadata getMetadata(String stfnPath) private StoriMetadata buildFileMetadata(StoRI stori) throws IOException, FSException { - VirtualFSInterface vfs = stori.getVirtualFileSystem(); + VirtualFS vfs = stori.getVirtualFileSystem(); String canonicalPath = stori.getLocalFile().getCanonicalPath(); log.debug("VirtualFS is {}", vfs.getAliasName()); VirtualFsMetadata vfsMeta = diff --git a/src/main/java/it/grid/storm/scheduler/ChunkTask.java b/src/main/java/it/grid/storm/scheduler/ChunkTask.java index ed506ef24..1fccca308 100644 --- a/src/main/java/it/grid/storm/scheduler/ChunkTask.java +++ b/src/main/java/it/grid/storm/scheduler/ChunkTask.java @@ -114,7 +114,7 @@ public void setResult(boolean result) { * */ @Override - public int compareTo(Object o) { + public int compareTo(Task o) { return 0; } diff --git a/src/main/java/it/grid/storm/scheduler/CruncherTask.java b/src/main/java/it/grid/storm/scheduler/CruncherTask.java index 89c892f85..6faa4ea3c 100644 --- a/src/main/java/it/grid/storm/scheduler/CruncherTask.java +++ b/src/main/java/it/grid/storm/scheduler/CruncherTask.java @@ -41,81 +41,75 @@ public class CruncherTask extends Task { - private Delegable todo = null; + private Delegable todo = null; - public CruncherTask(Delegable todo) { + public CruncherTask(Delegable todo) { - super(); - this.todo = todo; - this.taskName = todo.getName(); - } + super(); + this.todo = todo; + this.taskName = todo.getName(); + } - /** - * Compares this object with the specified object for order. - * - * @param o - * the Object to be compared. - * @return a negative integer, zero, or a positive integer as this object is - * less than, equal to, or greater than the specified object. - * @todo Implement this java.lang.Comparable method - */ - public int compareTo(Object o) { + /** + * Compares this object with the specified object for order. + * + * @param o the Object to be compared. + * @return a negative integer, zero, or a positive integer as this object is less than, equal to, + * or greater than the specified object. + * @todo Implement this java.lang.Comparable method + */ + public int compareTo(Task o) { - /** - * @todo : make the implementation! - */ - return 0; - } + return 0; + } - /** - * When an object implementing interface Runnable is used to - * create a thread, starting the thread causes the object's run - * method to be called in that separately executing thread. - * - * @todo Implement this java.lang.Runnable method - */ - public void run() { + /** + * When an object implementing interface Runnable is used to create a thread, + * starting the thread causes the object's run method to be called in that separately + * executing thread. + * + * @todo Implement this java.lang.Runnable method + */ + public void run() { - this.runEvent(); - todo.doIt(); - this.endEvent(); - } + this.runEvent(); + todo.doIt(); + this.endEvent(); + } - /** - * Two CruncherTask are equals if and only - * - * if the inner Delegable object are equals AND if the name of the Task are - * equals - * - * @param o - * Object - * @return boolean - */ - public boolean equals(Object obj) { + /** + * Two CruncherTask are equals if and only + * + * if the inner Delegable object are equals AND if the name of the Task are equals + * + * @param o Object + * @return boolean + */ + public boolean equals(Object obj) { - if (obj == this) - return true; - if (!(obj instanceof CruncherTask)) - return false; - CruncherTask other = (CruncherTask) obj; - if (!(other.getName().equals(this.getName()))) - return false; - if (!(other.todo.equals(this.todo))) - return false; - else - return true; - } + if (obj == this) + return true; + if (!(obj instanceof CruncherTask)) + return false; + CruncherTask other = (CruncherTask) obj; + if (!(other.getName().equals(this.getName()))) + return false; + if (!(other.todo.equals(this.todo))) + return false; + else + return true; + } - /** - * - * @return int - */ - public int hashCode() { + /** + * + * @return int + */ + public int hashCode() { - int hash = 17; - if (this.taskName.length() != 0) - hash = 37 * hash + taskName.hashCode(); - hash = 37 * hash + this.todo.hashCode(); - return hash; - } + int hash = 17; + if (this.taskName.length() != 0) + hash = 37 * hash + taskName.hashCode(); + hash = 37 * hash + this.todo.hashCode(); + return hash; + } } diff --git a/src/main/java/it/grid/storm/scheduler/SchedulerException.java b/src/main/java/it/grid/storm/scheduler/SchedulerException.java index c924ede6a..b6983070e 100644 --- a/src/main/java/it/grid/storm/scheduler/SchedulerException.java +++ b/src/main/java/it/grid/storm/scheduler/SchedulerException.java @@ -41,32 +41,37 @@ */ public class SchedulerException extends Exception { - private String whichScheduler; + /** + * + */ + private static final long serialVersionUID = 1L; - public SchedulerException(String whichSched) { + private String whichScheduler; - super(); - whichScheduler = whichSched; - } + public SchedulerException(String whichSched) { - public SchedulerException(String whichSched, String message) { + super(); + whichScheduler = whichSched; + } - super(message); - } + public SchedulerException(String whichSched, String message) { - public SchedulerException(Throwable cause) { + super(message); + } - super(cause); - } + public SchedulerException(Throwable cause) { - public SchedulerException(String message, Throwable cause) { + super(cause); + } - super(message, cause); - } + public SchedulerException(String message, Throwable cause) { - public String toString() { + super(message, cause); + } - return "Exception occurred within scheduler type = " + whichScheduler; - } + public String toString() { + + return "Exception occurred within scheduler type = " + whichScheduler; + } } diff --git a/src/main/java/it/grid/storm/scheduler/Task.java b/src/main/java/it/grid/storm/scheduler/Task.java index 69903f032..d498a6b91 100644 --- a/src/main/java/it/grid/storm/scheduler/Task.java +++ b/src/main/java/it/grid/storm/scheduler/Task.java @@ -40,84 +40,95 @@ * */ -public abstract class Task implements Runnable, Comparable { +public abstract class Task implements Runnable, Comparable { - private static String UNDEF_TASKNAME = "undefined"; - private long creationTime = System.currentTimeMillis(); - private long enqueueTime = 0L; - private long startExecutionTime = 0L; - private long endExecutionTime = 0L; - private long abortingEventTime = 0L; - private long suspendingEventTime = 0L; - protected String taskName = null; + private static String UNDEF_TASKNAME = "undefined"; + private long creationTime = System.currentTimeMillis(); + private long enqueueTime = 0L; + private long startExecutionTime = 0L; + private long endExecutionTime = 0L; + private long abortingEventTime = 0L; + private long suspendingEventTime = 0L; + protected String taskName = null; - protected Task() { + protected Task() { - this(UNDEF_TASKNAME); - } + this(UNDEF_TASKNAME); + } - protected Task(String name) { + protected Task(String name) { - taskName = name; - if (taskName == null) { - taskName = UNDEF_TASKNAME; - } - creationTime = System.currentTimeMillis(); - } + taskName = name; + if (taskName == null) { + taskName = UNDEF_TASKNAME; + } + creationTime = System.currentTimeMillis(); + } - public long getStartExecutionTime() { + public long getStartExecutionTime() { - return this.startExecutionTime; - } + return startExecutionTime; + } - public long howlongBeforeUnqueue() { + public long getAbortingEventTime() { - return enqueueTime - creationTime; - } + return abortingEventTime; + } - public long howlongInQueue() { + public long getSuspendingEventTime() { - return startExecutionTime - enqueueTime; - } + return suspendingEventTime; + } - public long howlongInExecution() { + public long howlongBeforeUnqueue() { - return endExecutionTime - startExecutionTime; - } + return enqueueTime - creationTime; + } - protected void enqueueEvent() { + public long howlongInQueue() { - this.enqueueTime = System.currentTimeMillis(); - } + return startExecutionTime - enqueueTime; + } - protected void abortEvent() { + public long howlongInExecution() { - this.abortingEventTime = System.currentTimeMillis(); - } + return endExecutionTime - startExecutionTime; + } - protected void suspendEvent() { + protected void enqueueEvent() { - this.suspendingEventTime = System.currentTimeMillis(); - } + enqueueTime = System.currentTimeMillis(); + } - protected void runEvent() { + protected void abortEvent() { - this.startExecutionTime = System.currentTimeMillis(); - } + abortingEventTime = System.currentTimeMillis(); + } - protected void endEvent() { + protected void suspendEvent() { - this.endExecutionTime = System.currentTimeMillis(); - } + suspendingEventTime = System.currentTimeMillis(); + } - protected String getName() { + protected void runEvent() { - return taskName; - } + startExecutionTime = System.currentTimeMillis(); + } - public abstract void run(); + protected void endEvent() { - public abstract int compareTo(Object o); + endExecutionTime = System.currentTimeMillis(); + } + + protected String getName() { + + return taskName; + } + + public abstract void run(); + + public abstract int compareTo(Task o); + + public abstract boolean equals(Object o); - public abstract boolean equals(Object o); } diff --git a/src/main/java/it/grid/storm/space/IllegalSRMSpaceParameter.java b/src/main/java/it/grid/storm/space/IllegalSRMSpaceParameter.java index b4771574b..d47679d7e 100644 --- a/src/main/java/it/grid/storm/space/IllegalSRMSpaceParameter.java +++ b/src/main/java/it/grid/storm/space/IllegalSRMSpaceParameter.java @@ -22,8 +22,7 @@ import it.grid.storm.srm.types.TSizeInBytes; /** - * This class represents an Exception throws if SpaceResData is not well formed. - * * + * This class represents an Exception throws if SpaceResData is not well formed. * * * @author Magnoni Luca * @author Cnaf - INFN Bologna @@ -33,21 +32,26 @@ public class IllegalSRMSpaceParameter extends Exception { - private boolean nullAuth = true; - private boolean nullSpaceDes = true; - private boolean nullRetentionPolicyInfo = true; + /** + * + */ + private static final long serialVersionUID = 1L; - public IllegalSRMSpaceParameter(GridUserInterface guser, - TSizeInBytes spaceDes, TRetentionPolicyInfo retentionPolicyInfo) { + private boolean nullAuth = true; + private boolean nullSpaceDes = true; + private boolean nullRetentionPolicyInfo = true; - nullAuth = (guser == null); - nullSpaceDes = (spaceDes == null); - nullRetentionPolicyInfo = (retentionPolicyInfo == null); - } + public IllegalSRMSpaceParameter(GridUserInterface guser, TSizeInBytes spaceDes, + TRetentionPolicyInfo retentionPolicyInfo) { - public String toString() { + nullAuth = (guser == null); + nullSpaceDes = (spaceDes == null); + nullRetentionPolicyInfo = (retentionPolicyInfo == null); + } - return "The Problem is: null-Auth= " + nullAuth + ", nullSpaceDesired= " - + nullSpaceDes + ", nullRetentionPolicyInfo= " + nullRetentionPolicyInfo; - } + public String toString() { + + return "The Problem is: null-Auth= " + nullAuth + ", nullSpaceDesired= " + nullSpaceDes + + ", nullRetentionPolicyInfo= " + nullRetentionPolicyInfo; + } } diff --git a/src/main/java/it/grid/storm/space/NullSpaceUpdaterHelper.java b/src/main/java/it/grid/storm/space/NullSpaceUpdaterHelper.java index c73131033..679114009 100644 --- a/src/main/java/it/grid/storm/space/NullSpaceUpdaterHelper.java +++ b/src/main/java/it/grid/storm/space/NullSpaceUpdaterHelper.java @@ -3,26 +3,23 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import it.grid.storm.namespace.VirtualFSInterface; - public class NullSpaceUpdaterHelper implements SpaceUpdaterHelperInterface { - private static final Logger log = LoggerFactory - .getLogger(NullSpaceUpdaterHelper.class); + private static final Logger log = LoggerFactory.getLogger(NullSpaceUpdaterHelper.class); - @Override - public boolean increaseUsedSpace(VirtualFSInterface vfs, long size) { + @Override + public boolean increaseUsedSpace(long size) { - log.debug("NullSpaceUpdaterHelper doesn't increase used size!"); - return true; - } + log.debug("NullSpaceUpdaterHelper doesn't increase used size!"); + return true; + } - @Override - public boolean decreaseUsedSpace(VirtualFSInterface vfs, long size) { + @Override + public boolean decreaseUsedSpace(long size) { - log.debug("NullSpaceUpdaterHelper doesn't decrease used size!"); - return true; - } + log.debug("NullSpaceUpdaterHelper doesn't decrease used size!"); + return true; + } } diff --git a/src/main/java/it/grid/storm/space/SimpleSpaceUpdaterHelper.java b/src/main/java/it/grid/storm/space/SimpleSpaceUpdaterHelper.java index 30606ca65..efb918927 100644 --- a/src/main/java/it/grid/storm/space/SimpleSpaceUpdaterHelper.java +++ b/src/main/java/it/grid/storm/space/SimpleSpaceUpdaterHelper.java @@ -4,76 +4,70 @@ import org.slf4j.LoggerFactory; import it.grid.storm.catalogs.ReservedSpaceCatalog; -import it.grid.storm.namespace.VirtualFSInterface; +import it.grid.storm.namespace.model.VirtualFS; public class SimpleSpaceUpdaterHelper implements SpaceUpdaterHelperInterface { - private static final Logger log = LoggerFactory - .getLogger(SimpleSpaceUpdaterHelper.class); - - private ReservedSpaceCatalog rsc; - - public SimpleSpaceUpdaterHelper() { - rsc = new ReservedSpaceCatalog(); - } - - private StorageSpaceData getStorageSpaceDataForVFS(VirtualFSInterface vfs) { - - return rsc.getStorageSpaceByAlias(vfs.getSpaceTokenDescription()); - } - - @Override - public boolean increaseUsedSpace(VirtualFSInterface vfs, long size) { - - log.debug("Increase {} used space: {} bytes ", vfs.getAliasName(), size); - - if (size < 0) { - log.error("Size to add is a negative value: {}", size); - return false; - } - if (size == 0) { - log.debug("Size is zero, vfs {} used space won't be increased!", - vfs.getAliasName()); - return true; - } - - log.debug("Get StorageSpaceData from vfs ..."); - StorageSpaceData ssd = getStorageSpaceDataForVFS(vfs); - - if (ssd == null) { - log.error("Unable to get StorageSpaceData from alias name {}", - vfs.getAliasName()); - return false; - } - - return rsc.increaseUsedSpace(ssd.getSpaceToken().getValue(), size); - } - - @Override - public boolean decreaseUsedSpace(VirtualFSInterface vfs, long size) { - - log.debug("Decrease {} used space: {} bytes ", vfs.getAliasName(), size); - - if (size < 0) { - log.error("Size to remove is a negative value: {}", size); - return false; - } - if (size == 0) { - log.debug("Size is zero, vfs {} used space won't be decreased!", - vfs.getAliasName()); - return true; - } - - log.debug("Get StorageSpaceData from vfs ..."); - StorageSpaceData ssd = getStorageSpaceDataForVFS(vfs); - - if (ssd == null) { - log.error("Unable to get StorageSpaceData from alias name {}", - vfs.getAliasName()); - return false; - } - - return rsc.decreaseUsedSpace(ssd.getSpaceToken().getValue(), size); - } + private static final Logger log = LoggerFactory.getLogger(SimpleSpaceUpdaterHelper.class); + + private VirtualFS vfs; + + public SimpleSpaceUpdaterHelper(VirtualFS vfs) { + this.vfs = vfs; + } + + @Override + public boolean increaseUsedSpace(long size) { + + log.debug("Increase {} used space: {} bytes ", vfs.getAliasName(), size); + + if (size < 0) { + log.error("Size to add is a negative value: {}", size); + return false; + } + if (size == 0) { + log.debug("Size is zero, vfs {} used space won't be increased!", vfs.getAliasName()); + return true; + } + + ReservedSpaceCatalog rsc = ReservedSpaceCatalog.getInstance(); + + log.debug("Get StorageSpaceData from vfs ..."); + StorageSpaceData ssd = rsc.getStorageSpaceByAlias(vfs.getSpaceTokenDescription()); + + if (ssd == null) { + log.error("Unable to get StorageSpaceData from alias name {}", vfs.getAliasName()); + return false; + } + + return rsc.increaseUsedSpace(ssd.getSpaceToken().getValue(), size); + } + + @Override + public boolean decreaseUsedSpace(long size) { + + log.debug("Decrease {} used space: {} bytes ", vfs.getAliasName(), size); + + if (size < 0) { + log.error("Size to remove is a negative value: {}", size); + return false; + } + if (size == 0) { + log.debug("Size is zero, vfs {} used space won't be decreased!", vfs.getAliasName()); + return true; + } + + ReservedSpaceCatalog rsc = ReservedSpaceCatalog.getInstance(); + + log.debug("Get StorageSpaceData from vfs ..."); + StorageSpaceData ssd = rsc.getStorageSpaceByAlias(vfs.getSpaceTokenDescription()); + + if (ssd == null) { + log.error("Unable to get StorageSpaceData from alias name {}", vfs.getAliasName()); + return false; + } + + return rsc.decreaseUsedSpace(ssd.getSpaceToken().getValue(), size); + } } diff --git a/src/main/java/it/grid/storm/space/SpaceHelper.java b/src/main/java/it/grid/storm/space/SpaceHelper.java index 0f85454a7..90f42ff20 100644 --- a/src/main/java/it/grid/storm/space/SpaceHelper.java +++ b/src/main/java/it/grid/storm/space/SpaceHelper.java @@ -17,40 +17,29 @@ package it.grid.storm.space; -import it.grid.storm.catalogs.InvalidRetrievedDataException; -import it.grid.storm.catalogs.InvalidSpaceDataAttributesException; -import it.grid.storm.catalogs.MultipleDataEntriesException; -import it.grid.storm.catalogs.NoDataFoundException; -import it.grid.storm.catalogs.ReducedPtPChunkData; +import java.util.Iterator; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.catalogs.ReservedSpaceCatalog; import it.grid.storm.common.types.InvalidPFNAttributeException; import it.grid.storm.common.types.PFN; import it.grid.storm.common.types.SizeUnit; -import it.grid.storm.config.Configuration; -import it.grid.storm.filesystem.LocalFile; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.GridUserManager; -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.StoRI; -import it.grid.storm.namespace.VirtualFSInterface; +import it.grid.storm.namespace.model.VirtualFS; import it.grid.storm.persistence.exceptions.DataAccessException; +import it.grid.storm.persistence.exceptions.InvalidSpaceDataAttributesException; import it.grid.storm.persistence.model.TransferObjectDecodingException; import it.grid.storm.srm.types.ArrayOfTSpaceToken; import it.grid.storm.srm.types.InvalidTSizeAttributesException; import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TSURL; import it.grid.storm.srm.types.TSizeInBytes; import it.grid.storm.srm.types.TSpaceToken; import it.grid.storm.srm.types.TSpaceType; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** * This class is part of the StoRM project. Copyright (c) 2008 INFN-CNAF. *

@@ -65,365 +54,303 @@ public class SpaceHelper { - private static final int ADD_FREE_SPACE = 0; - private static final int REMOVE_FREE_SPACE = 1; - private Configuration config; - private static final Logger log = LoggerFactory.getLogger(SpaceHelper.class); - public static GridUserInterface storageAreaOwner = GridUserManager - .makeSAGridUser(); - - public SpaceHelper() { - - config = Configuration.getInstance(); - } - - public boolean isSAFull(Logger log, StoRI stori) { - - log.debug("Checking if the Storage Area is full"); - - VirtualFSInterface fs = stori.getVirtualFileSystem(); - ReservedSpaceCatalog catalog = new ReservedSpaceCatalog(); - - // Get StorageSpaceData from the database - String ssDesc = fs.getSpaceTokenDescription(); - StorageSpaceData spaceData = catalog.getStorageSpaceByAlias(ssDesc); - - if ((spaceData != null) && (spaceData.getAvailableSpaceSize().value() == 0)) { - log.debug("AvailableSize={}" , spaceData.getAvailableSpaceSize().value()); - return true; - } else { - return false; - } - - } - - public long getSAFreeSpace(Logger log, StoRI stori) { - - log.debug("Checking if the Storage Area is full"); - - VirtualFSInterface fs = stori.getVirtualFileSystem(); - ReservedSpaceCatalog catalog = new ReservedSpaceCatalog(); - - // Get StorageSpaceData from the database - String ssDesc = fs.getSpaceTokenDescription(); - StorageSpaceData spaceData = catalog.getStorageSpaceByAlias(ssDesc); - - if (spaceData != null) { - return spaceData.getAvailableSpaceSize().value(); - } else { - return -1; - } - - } - - /** - * Verifies if the storage area to which the provided stori belongs has been - * initialized The verification is made on used space field - * - * @param log - * @param stori - * @return - */ - public boolean isSAInitialized(Logger log, StoRI stori) { - - log.debug("Checking if the Storage Area is initialized"); - if (stori == null) { - throw new IllegalArgumentException( - "Unable to perform the SA initialization check, provided null parameters: log : " - + log + " , stori : " + stori); - } - boolean response = false; - VirtualFSInterface fs = stori.getVirtualFileSystem(); - ReservedSpaceCatalog catalog = new ReservedSpaceCatalog(); - // Get StorageSpaceData from the database - String ssDesc = fs.getSpaceTokenDescription(); - - StorageSpaceData spaceData = catalog.getStorageSpaceByAlias(ssDesc); - - if (spaceData != null && spaceData.getUsedSpaceSize() != null - && !spaceData.getUsedSpaceSize().isEmpty() - && spaceData.getUsedSpaceSize().value() >= 0) { - - response = true; - } - log.debug("The storage area is initialized with token alias {} is {} initialized" - , spaceData.getSpaceTokenAlias() , (response ? "" : "not")); - return response; - } - - /** - * - * @param log - * @param stori - * @return - */ - public TSpaceToken getTokenFromStoRI(Logger log, StoRI stori) { - - log.debug("SpaceHelper: getting space token from StoRI"); - VirtualFSInterface fs = stori.getVirtualFileSystem(); - return fs.getSpaceToken(); - - } - - /** - * Returns the spaceTokens associated to the 'user' AND 'spaceAlias'. If - * 'spaceAlias' is NULL or an empty string then this method returns all the - * space tokens this 'user' owns. - * - * @param user - * VomsGridUser user. - * @param spaceAlias - * User space token description. - */ - private Boolean isDefaultSpaceToken(TSpaceToken token) { - - Boolean found = false; - - config = Configuration.getInstance(); - List tokens = config.getListOfDefaultSpaceToken(); - for (int i = 0; i < tokens.size(); i++) { - if ((tokens.get(i)).toLowerCase().equals(token.getValue().toLowerCase())) { - found = true; - } - } - - return found; - } - - /** - * This method is used by the namespace parser component to insert a new Space - * Token Description data into the space catalog. In this way a standard Space - * Token is created, making it work for the GetSpaceMetaData request an - * SrmPreparateToPut with SpaceToken. - * - * The following code check if a SA_token with the same space description is - * already present into the catalog, if no data are found the new data are - * inserted, if yes the new data and the data already present are compared, - * and if needed an update operation is performed. - * - * The mandatory parameters are: - * - * @param spaceTokenAlias - * the space token description the user have to specify into the - * namespace.xml file - * @param totalOnLineSize - * the size the user have to specify into the namespace.xml file - * @param date - * @param spaceFileName - * the space file name will be used to get the free size. It is the - * StFNRoot. - */ - - public TSpaceToken createVOSA_Token(String spaceTokenAlias, - TSizeInBytes totalOnLineSize, String spaceFileName) { - - // TODO errors are not managed in this function - TSpaceToken spaceToken = null; - ArrayOfTSpaceToken tokenArray; - ReservedSpaceCatalog spaceCatalog = new ReservedSpaceCatalog(); - - // Try with fake user, if it does not work remove it and use different - // method - - // First, check if the same VOSpaceArea already exists - tokenArray = spaceCatalog.getSpaceTokensByAlias(spaceTokenAlias); - - if (tokenArray == null || tokenArray.size() == 0) { - // the VOSpaceArea does not exist yet - SpaceHelper.log.debug("VoSpaceArea {} still does not exists. Start creation process." , spaceTokenAlias); - - PFN sfname = null; - try { - sfname = PFN.make(spaceFileName); - } catch (InvalidPFNAttributeException e1) { - log.error("Error building PFN with {} : " , spaceFileName , e1); - } - - StorageSpaceData ssd = null; - - try { - ssd = new StorageSpaceData(storageAreaOwner, TSpaceType.VOSPACE, - spaceTokenAlias, totalOnLineSize, totalOnLineSize, - TLifeTimeInSeconds.makeInfinite(), null, null, sfname); - // ssd.setReservedSpaceSize(totalOnLineSize); - try { - ssd.setUnavailableSpaceSize(TSizeInBytes.make(0, SizeUnit.BYTES)); - ssd.setReservedSpaceSize(TSizeInBytes.make(0, SizeUnit.BYTES)); - - } catch (InvalidTSizeAttributesException e) { - // never thrown - log.error("Unexpected InvalidTSizeAttributesException: {}" - , e.getMessage(),e); - } - spaceToken = ssd.getSpaceToken(); - } catch (InvalidSpaceDataAttributesException e) { - log.error("Error building StorageSpaceData: " , e); - } - - try { - spaceCatalog.addStorageSpace(ssd); - } catch (DataAccessException e) { - log.error("Error storing StorageSpaceData on the DB: " , e); - } - // Track into global set to remove obsolete SA_token - ReservedSpaceCatalog.addSpaceToken(spaceToken); - - } else { - /* - * the VOspaceArea already exists. Compare new data and data already - * present to check if the parameter has changed or not, and then perform - * update operation into catalog if it is needed. Only static information - * changes determine an update of the exeisting row - */ - SpaceHelper.log.debug("VOSpaceArea for space token description " - + spaceTokenAlias + " already present into DB."); - - boolean equal = false; - spaceToken = tokenArray.getTSpaceToken(0); - StorageSpaceData catalog_ssd = null; - try { - catalog_ssd = spaceCatalog.getStorageSpace(spaceToken); - } catch (TransferObjectDecodingException e) { - log - .error("Unable to build StorageSpaceData from StorageSpaceTO. TransferObjectDecodingException: {}" - , e.getMessage(),e); - } catch (DataAccessException e) { - log.error("Unable to build get StorageSpaceTO. DataAccessException: {}" - , e.getMessage(),e); - } - - if (catalog_ssd != null) { - - if (catalog_ssd.getOwner().getDn().equals(storageAreaOwner.getDn()) - && (catalog_ssd.getSpaceTokenAlias().equals(spaceTokenAlias)) - && (catalog_ssd.getTotalSpaceSize().value() == totalOnLineSize - .value()) - && (catalog_ssd.getSpaceFileName().toString().equals(spaceFileName))) { - equal = true; - } - - } - - // false otherwise - if (equal) { - // Do nothing if equals, everything are already present into - // the DB - SpaceHelper.log.debug("VOSpaceArea for space token description {} is already up to date." - , spaceTokenAlias); - ReservedSpaceCatalog.addSpaceToken(spaceToken); - - } else { - // If the new data has been modified, update the data into the - // catalog - SpaceHelper.log.debug("VOSpaceArea for space token description {} is different in some parameters. Updating the catalog." - , spaceTokenAlias); - try { - catalog_ssd.setOwner(storageAreaOwner); - catalog_ssd.setTotalSpaceSize(totalOnLineSize); - catalog_ssd.setTotalGuaranteedSize(totalOnLineSize); - - PFN sfn = null; - try { - sfn = PFN.make(spaceFileName); - } catch (InvalidPFNAttributeException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - catalog_ssd.setSpaceFileName(sfn); - - spaceCatalog.updateAllStorageSpace(catalog_ssd); - ReservedSpaceCatalog.addSpaceToken(spaceToken); - - } catch (NoDataFoundException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } catch (InvalidRetrievedDataException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } catch (MultipleDataEntriesException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - - } - - // Warning. CHeck if there are multiple token with same alisa, this - // is not allowed - if (tokenArray.size() > 1) { - SpaceHelper.log - .error("Error: multiple Space Token found for the same space Alias: {}. Only one has been evaluated!" - , spaceTokenAlias); - } - - } - return spaceToken; - - } - - /** - * This method should be use at the end of the namespace insert process - * (through the createVO_SA_token(...)) to remmove from the database the old - * VO_SA_token inserted from the previous namsespace.xml configuration - * - */ - public void purgeOldVOSA_token() { - - purgeOldVOSA_token(SpaceHelper.log); - } - - public void purgeOldVOSA_token(Logger log) { - - ReservedSpaceCatalog spacec = new ReservedSpaceCatalog(); - log.debug("VO SA: garbage collecting obsolete VOSA_token"); - - Iterator iter = ReservedSpaceCatalog.getTokenSet().iterator(); - while (iter.hasNext()) { - log.debug("VO SA token REGISTRED: {}" , iter.next().getValue()); - } - - GridUserInterface stormServiceUser = GridUserManager.makeSAGridUser(); - - // Remove obsolete space - ArrayOfTSpaceToken token_a = spacec.getSpaceTokens(stormServiceUser, null); - for (int i = 0; i < token_a.size(); i++) { - log.debug("VO SA token IN CATALOG: {}" , token_a.getTSpaceToken(i).getValue()); - } - - if ((token_a != null) && (token_a.size() > 0)) { - for (int i = 0; i < token_a.size(); i++) { - - if (!ReservedSpaceCatalog.getTokenSet().contains( - token_a.getTSpaceToken(i))) { - // This VOSA_token is no more used, removing it from persistence - TSpaceToken tokenToRemove = token_a.getTSpaceToken(i); - log.debug("VO SA token {} is no more used, removing it from persistence." , tokenToRemove); - spacec.release(stormServiceUser, tokenToRemove); - } - } - } else { - log - .warn("Space Catalog garbage SA_Token: no SA TOKENs specified. Please check your namespace.xml file."); - } - - ReservedSpaceCatalog.clearTokenSet(); - - } - - /** - * @param spaceData - * @return - */ - public static boolean isStorageArea(StorageSpaceData spaceData) - throws IllegalArgumentException { - - if (spaceData == null) { - log.error("Received null spaceData parameter"); - throw new IllegalArgumentException("Received null spaceData parameter"); - } - boolean result = false; - if (spaceData.getOwner() != null) { - result = spaceData.getOwner().equals(SpaceHelper.storageAreaOwner); - } - return result; - } + private static final Logger log = LoggerFactory.getLogger(SpaceHelper.class); + public static GridUserInterface storageAreaOwner = GridUserManager.makeSAGridUser(); + +// private ReservedSpaceCatalog catalog = ReservedSpaceCatalog.getInstance(); + + public boolean isSAFull(Logger log, StoRI stori) { + + log.debug("Checking if the Storage Area is full"); + + VirtualFS fs = stori.getVirtualFileSystem(); + + // Get StorageSpaceData from the database + String ssDesc = fs.getSpaceTokenDescription(); + StorageSpaceData spaceData = ReservedSpaceCatalog.getInstance().getStorageSpaceByAlias(ssDesc); + + if ((spaceData != null) && (spaceData.getAvailableSpaceSize().value() == 0)) { + log.debug("AvailableSize={}", spaceData.getAvailableSpaceSize().value()); + return true; + } else { + return false; + } + + } + + public long getSAFreeSpace(Logger log, StoRI stori) { + + log.debug("Checking if the Storage Area is full"); + + VirtualFS fs = stori.getVirtualFileSystem(); + + // Get StorageSpaceData from the database + String ssDesc = fs.getSpaceTokenDescription(); + StorageSpaceData spaceData = ReservedSpaceCatalog.getInstance().getStorageSpaceByAlias(ssDesc); + + if (spaceData != null) { + return spaceData.getAvailableSpaceSize().value(); + } else { + return -1; + } + + } + + /** + * Verifies if the storage area to which the provided stori belongs has been initialized The + * verification is made on used space field + * + * @param log + * @param stori + * @return + */ + public boolean isSAInitialized(Logger log, StoRI stori) { + + log.debug("Checking if the Storage Area is initialized"); + if (stori == null) { + throw new IllegalArgumentException( + "Unable to perform the SA initialization check, provided null parameters: log : " + log + + " , stori : " + stori); + } + boolean response = false; + VirtualFS fs = stori.getVirtualFileSystem(); + // Get StorageSpaceData from the database + String ssDesc = fs.getSpaceTokenDescription(); + + StorageSpaceData spaceData = ReservedSpaceCatalog.getInstance().getStorageSpaceByAlias(ssDesc); + + if (spaceData != null && spaceData.getUsedSpaceSize() != null + && !spaceData.getUsedSpaceSize().isEmpty() && spaceData.getUsedSpaceSize().value() >= 0) { + + response = true; + } + log.debug("The storage area is initialized with token alias {} is {} initialized", + spaceData.getSpaceTokenAlias(), (response ? "" : "not")); + return response; + } + + /** + * + * @param log + * @param stori + * @return + */ + public TSpaceToken getTokenFromStoRI(Logger log, StoRI stori) { + + log.debug("SpaceHelper: getting space token from StoRI"); + VirtualFS fs = stori.getVirtualFileSystem(); + return fs.getSpaceToken(); + + } + + /** + * This method is used by the namespace parser component to insert a new Space Token Description + * data into the space catalog. In this way a standard Space Token is created, making it work for + * the GetSpaceMetaData request an SrmPreparateToPut with SpaceToken. + * + * The following code check if a SA_token with the same space description is already present into + * the catalog, if no data are found the new data are inserted, if yes the new data and the data + * already present are compared, and if needed an update operation is performed. + * + * The mandatory parameters are: + * + * @param spaceTokenAlias the space token description the user have to specify into the + * namespace.xml file + * @param totalOnLineSize the size the user have to specify into the namespace.xml file + * @param date + * @param spaceFileName the space file name will be used to get the free size. It is the StFNRoot. + */ + + public TSpaceToken createVOSA_Token(String spaceTokenAlias, TSizeInBytes totalOnLineSize, + String spaceFileName) { + + TSpaceToken spaceToken = null; + ArrayOfTSpaceToken tokenArray; + + // Try with fake user, if it does not work remove it and use different + // method + + // First, check if the same VOSpaceArea already exists + tokenArray = ReservedSpaceCatalog.getInstance().getSpaceTokensByAlias(spaceTokenAlias); + + if (tokenArray == null || tokenArray.size() == 0) { + // the VOSpaceArea does not exist yet + log.debug("VoSpaceArea {} still does not exists. Start creation process.", spaceTokenAlias); + + PFN sfname = null; + try { + sfname = PFN.make(spaceFileName); + } catch (InvalidPFNAttributeException e1) { + log.error("Error building PFN with {} : ", spaceFileName, e1); + } + + StorageSpaceData ssd = null; + + try { + ssd = new StorageSpaceData(storageAreaOwner, TSpaceType.VOSPACE, spaceTokenAlias, + totalOnLineSize, totalOnLineSize, TLifeTimeInSeconds.makeInfinite(), null, null, + sfname); + // ssd.setReservedSpaceSize(totalOnLineSize); + try { + ssd.setUnavailableSpaceSize(TSizeInBytes.make(0, SizeUnit.BYTES)); + ssd.setReservedSpaceSize(TSizeInBytes.make(0, SizeUnit.BYTES)); + + } catch (InvalidTSizeAttributesException e) { + // never thrown + log.error("Unexpected InvalidTSizeAttributesException: {}", e.getMessage(), e); + } + spaceToken = ssd.getSpaceToken(); + } catch (InvalidSpaceDataAttributesException e) { + log.error("Error building StorageSpaceData: ", e); + } + + try { + ReservedSpaceCatalog.getInstance().addStorageSpace(ssd); + } catch (DataAccessException e) { + log.error("Error storing StorageSpaceData on the DB: ", e); + } + // Track into global set to remove obsolete SA_token + ReservedSpaceCatalog.addSpaceToken(spaceToken); + + } else { + /* + * the VOspaceArea already exists. Compare new data and data already present to check if the + * parameter has changed or not, and then perform update operation into catalog if it is + * needed. Only static information changes determine an update of the exeisting row + */ + SpaceHelper.log.debug("VOSpaceArea for space token description " + spaceTokenAlias + + " already present into DB."); + + boolean equal = false; + spaceToken = tokenArray.getTSpaceToken(0); + StorageSpaceData catalog_ssd = null; + try { + catalog_ssd = ReservedSpaceCatalog.getInstance().getStorageSpace(spaceToken); + } catch (TransferObjectDecodingException e) { + log.error( + "Unable to build StorageSpaceData from StorageSpaceTO. TransferObjectDecodingException: {}", + e.getMessage(), e); + } catch (DataAccessException e) { + log.error("Unable to build get StorageSpaceTO. DataAccessException: {}", e.getMessage(), e); + } + + if (catalog_ssd != null) { + + if (catalog_ssd.getOwner().getDn().equals(storageAreaOwner.getDn()) + && (catalog_ssd.getSpaceTokenAlias().equals(spaceTokenAlias)) + && (catalog_ssd.getTotalSpaceSize().value() == totalOnLineSize.value()) + && (catalog_ssd.getSpaceFileName().toString().equals(spaceFileName))) { + equal = true; + } + + } + + // false otherwise + if (equal) { + // Do nothing if equals, everything are already present into + // the DB + SpaceHelper.log.debug("VOSpaceArea for space token description {} is already up to date.", + spaceTokenAlias); + ReservedSpaceCatalog.addSpaceToken(spaceToken); + + } else { + // If the new data has been modified, update the data into the + // catalog + SpaceHelper.log.debug( + "VOSpaceArea for space token description {} is different in some parameters. Updating the catalog.", + spaceTokenAlias); + catalog_ssd.setOwner(storageAreaOwner); + catalog_ssd.setTotalSpaceSize(totalOnLineSize); + catalog_ssd.setTotalGuaranteedSize(totalOnLineSize); + + PFN sfn = null; + try { + sfn = PFN.make(spaceFileName); + } catch (InvalidPFNAttributeException e) { + e.printStackTrace(); + } + catalog_ssd.setSpaceFileName(sfn); + + ReservedSpaceCatalog.getInstance().updateAllStorageSpace(catalog_ssd); + ReservedSpaceCatalog.addSpaceToken(spaceToken); + + } + + // Warning. CHeck if there are multiple token with same alisa, this + // is not allowed + if (tokenArray.size() > 1) { + SpaceHelper.log.error( + "Error: multiple Space Token found for the same space Alias: {}. Only one has been evaluated!", + spaceTokenAlias); + } + + } + return spaceToken; + + } + + /** + * This method should be use at the end of the namespace insert process (through the + * createVO_SA_token(...)) to remmove from the database the old VO_SA_token inserted from the + * previous namsespace.xml configuration + * + */ + public void purgeOldVOSA_token() { + + purgeOldVOSA_token(SpaceHelper.log); + } + + public void purgeOldVOSA_token(Logger log) { + + log.debug("VO SA: garbage collecting obsolete VOSA_token"); + + Iterator iter = ReservedSpaceCatalog.getTokenSet().iterator(); + while (iter.hasNext()) { + log.debug("VO SA token REGISTRED: {}", iter.next().getValue()); + } + + GridUserInterface stormServiceUser = GridUserManager.makeSAGridUser(); + + // Remove obsolete space + ArrayOfTSpaceToken token_a = ReservedSpaceCatalog.getInstance().getSpaceTokens(stormServiceUser, null); + for (int i = 0; i < token_a.size(); i++) { + log.debug("VO SA token IN CATALOG: {}", token_a.getTSpaceToken(i).getValue()); + } + + if ((token_a != null) && (token_a.size() > 0)) { + for (int i = 0; i < token_a.size(); i++) { + + if (!ReservedSpaceCatalog.getTokenSet().contains(token_a.getTSpaceToken(i))) { + // This VOSA_token is no more used, removing it from persistence + TSpaceToken tokenToRemove = token_a.getTSpaceToken(i); + log.debug("VO SA token {} is no more used, removing it from persistence.", + tokenToRemove); + ReservedSpaceCatalog.getInstance().release(stormServiceUser, tokenToRemove); + } + } + } else { + log.warn( + "Space Catalog garbage SA_Token: no SA TOKENs specified. Please check your namespace.xml file."); + } + + ReservedSpaceCatalog.clearTokenSet(); + + } + + /** + * @param spaceData + * @return + */ + public static boolean isStorageArea(StorageSpaceData spaceData) throws IllegalArgumentException { + + if (spaceData == null) { + log.error("Received null spaceData parameter"); + throw new IllegalArgumentException("Received null spaceData parameter"); + } + boolean result = false; + if (spaceData.getOwner() != null) { + result = spaceData.getOwner().equals(SpaceHelper.storageAreaOwner); + } + return result; + } } diff --git a/src/main/java/it/grid/storm/space/SpaceUpdaterHelperFactory.java b/src/main/java/it/grid/storm/space/SpaceUpdaterHelperFactory.java index 33b0745f4..4d7e02056 100644 --- a/src/main/java/it/grid/storm/space/SpaceUpdaterHelperFactory.java +++ b/src/main/java/it/grid/storm/space/SpaceUpdaterHelperFactory.java @@ -1,13 +1,12 @@ package it.grid.storm.space; import it.grid.storm.namespace.CapabilityInterface; -import it.grid.storm.namespace.VirtualFSInterface; import it.grid.storm.namespace.model.Quota; - +import it.grid.storm.namespace.model.VirtualFS; public class SpaceUpdaterHelperFactory { - public static SpaceUpdaterHelperInterface getSpaceUpdaterHelper(VirtualFSInterface vfs) { + public static SpaceUpdaterHelperInterface getSpaceUpdaterHelper(VirtualFS vfs) { if (vfs == null) { throw new IllegalArgumentException("VirtualFSInterface null!"); @@ -31,7 +30,7 @@ public static SpaceUpdaterHelperInterface getSpaceUpdaterHelper(VirtualFSInterfa } } - return new SimpleSpaceUpdaterHelper(); + return new SimpleSpaceUpdaterHelper(vfs); } } diff --git a/src/main/java/it/grid/storm/space/SpaceUpdaterHelperInterface.java b/src/main/java/it/grid/storm/space/SpaceUpdaterHelperInterface.java index ce69f90db..fdc5fe9a6 100644 --- a/src/main/java/it/grid/storm/space/SpaceUpdaterHelperInterface.java +++ b/src/main/java/it/grid/storm/space/SpaceUpdaterHelperInterface.java @@ -1,11 +1,9 @@ package it.grid.storm.space; -import it.grid.storm.namespace.VirtualFSInterface; - public interface SpaceUpdaterHelperInterface { - public boolean increaseUsedSpace(VirtualFSInterface vfs, long size); + public boolean increaseUsedSpace(long size); - public boolean decreaseUsedSpace(VirtualFSInterface vfs, long size); + public boolean decreaseUsedSpace(long size); } diff --git a/src/main/java/it/grid/storm/space/StorageSpaceData.java b/src/main/java/it/grid/storm/space/StorageSpaceData.java index 2aaa7d184..c2f8b349d 100644 --- a/src/main/java/it/grid/storm/space/StorageSpaceData.java +++ b/src/main/java/it/grid/storm/space/StorageSpaceData.java @@ -27,7 +27,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import it.grid.storm.catalogs.InvalidSpaceDataAttributesException; import it.grid.storm.common.types.InvalidPFNAttributeException; import it.grid.storm.common.types.PFN; import it.grid.storm.common.types.SizeUnit; @@ -35,6 +34,7 @@ import it.grid.storm.common.types.VO; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.GridUserManager; +import it.grid.storm.persistence.exceptions.InvalidSpaceDataAttributesException; import it.grid.storm.persistence.model.StorageSpaceTO; import it.grid.storm.srm.types.InvalidTSizeAttributesException; import it.grid.storm.srm.types.InvalidTSpaceTokenAttributesException; diff --git a/src/main/java/it/grid/storm/space/gpfsquota/GPFSFilesetQuotaInfo.java b/src/main/java/it/grid/storm/space/gpfsquota/GPFSFilesetQuotaInfo.java index 027fcffbb..d27a40301 100644 --- a/src/main/java/it/grid/storm/space/gpfsquota/GPFSFilesetQuotaInfo.java +++ b/src/main/java/it/grid/storm/space/gpfsquota/GPFSFilesetQuotaInfo.java @@ -1,7 +1,7 @@ package it.grid.storm.space.gpfsquota; import it.grid.storm.common.types.SizeUnit; -import it.grid.storm.namespace.VirtualFSInterface; +import it.grid.storm.namespace.model.VirtualFS; import it.grid.storm.srm.types.TSizeInBytes; public interface GPFSFilesetQuotaInfo { @@ -22,7 +22,7 @@ public interface GPFSFilesetQuotaInfo { public boolean isQuotaEnabled(); - public VirtualFSInterface getVFS(); + public VirtualFS getVFS(); public SizeUnit getSizeUnit(); } diff --git a/src/main/java/it/grid/storm/space/gpfsquota/GPFSQuotaInfo.java b/src/main/java/it/grid/storm/space/gpfsquota/GPFSQuotaInfo.java index f920bd6cd..865c50a39 100644 --- a/src/main/java/it/grid/storm/space/gpfsquota/GPFSQuotaInfo.java +++ b/src/main/java/it/grid/storm/space/gpfsquota/GPFSQuotaInfo.java @@ -2,18 +2,16 @@ import it.grid.storm.common.types.SizeUnit; import it.grid.storm.filesystem.swig.quota_info; -import it.grid.storm.namespace.VirtualFSInterface; +import it.grid.storm.namespace.model.VirtualFS; import it.grid.storm.srm.types.TSizeInBytes; import it.grid.storm.util.GPFSSizeHelper; /** * Describes information about quota block limits on a GPFS fileset. - * - * */ public class GPFSQuotaInfo implements GPFSFilesetQuotaInfo { - public static GPFSQuotaInfo fromNativeQuotaInfo(VirtualFSInterface fs, + public static GPFSQuotaInfo fromNativeQuotaInfo(VirtualFS fs, quota_info qi) { return new GPFSQuotaInfo(fs, qi); @@ -25,9 +23,9 @@ public static GPFSQuotaInfo fromNativeQuotaInfo(VirtualFSInterface fs, private String filesetName; private boolean quotaEnabled = false; - private VirtualFSInterface VFS; + private VirtualFS VFS; - private GPFSQuotaInfo(VirtualFSInterface fs, quota_info qi) { + private GPFSQuotaInfo(VirtualFS fs, quota_info qi) { this.VFS = fs; this.filesetName = qi.getFileset_name(); @@ -63,7 +61,7 @@ public SizeUnit getSizeUnit() { return SizeUnit.BYTES; } - public VirtualFSInterface getVFS() { + public VirtualFS getVFS() { return VFS; } @@ -94,7 +92,7 @@ public void setFilesetName(String filesetName) { this.filesetName = filesetName; } - public void setVFS(VirtualFSInterface vFS) { + public void setVFS(VirtualFS vFS) { VFS = vFS; } diff --git a/src/main/java/it/grid/storm/space/gpfsquota/GPFSQuotaManager.java b/src/main/java/it/grid/storm/space/gpfsquota/GPFSQuotaManager.java index 0f1a7fa07..fea4cbcbd 100644 --- a/src/main/java/it/grid/storm/space/gpfsquota/GPFSQuotaManager.java +++ b/src/main/java/it/grid/storm/space/gpfsquota/GPFSQuotaManager.java @@ -1,17 +1,5 @@ package it.grid.storm.space.gpfsquota; -import it.grid.storm.catalogs.ReservedSpaceCatalog; -import it.grid.storm.common.types.SizeUnit; -import it.grid.storm.concurrency.NamedThreadFactory; -import it.grid.storm.config.Configuration; -import it.grid.storm.filesystem.FilesystemError; -import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.VirtualFSInterface; -import it.grid.storm.persistence.exceptions.DataAccessException; -import it.grid.storm.space.StorageSpaceData; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.util.VirtualFSHelper; - import java.util.List; import java.util.concurrent.CompletionService; import java.util.concurrent.ExecutionException; @@ -24,6 +12,18 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import it.grid.storm.catalogs.ReservedSpaceCatalog; +import it.grid.storm.common.types.SizeUnit; +import it.grid.storm.concurrency.NamedThreadFactory; +import it.grid.storm.config.Configuration; +import it.grid.storm.filesystem.FilesystemError; +import it.grid.storm.namespace.NamespaceException; +import it.grid.storm.namespace.model.VirtualFS; +import it.grid.storm.persistence.exceptions.DataAccessException; +import it.grid.storm.space.StorageSpaceData; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.util.VirtualFSHelper; + /** * GPFSQuotaManager. Currently supports only GPFS fileset quotas. This manager starts periodic tasks * that fetch quota information from gpfs fs and update the space area data on the Storm database. @@ -72,7 +72,7 @@ public enum GPFSQuotaManager { /** * The list of GPFS filesystems which have quota enabled. */ - private List quotaEnabledFilesystems; + private List quotaEnabledFilesystems; /** * The last exception thrown by a GPFS quota calculation job. @@ -131,7 +131,7 @@ public void run() { int completedTasks = 0; - for (VirtualFSInterface vfs : quotaEnabledFilesystems) { + for (VirtualFS vfs : quotaEnabledFilesystems) { log.info("Submitting GPFS quota info computation for vfs rooted at {}", vfs.getRootPath()); quotaService.submit(new GetGPFSFilesetQuotaInfoCommand(vfs)); @@ -215,16 +215,16 @@ private void handleNoLimitsQuota(GPFSFilesetQuotaInfo info, StorageSpaceData ssd } } - private StorageSpaceData getStorageSpaceDataForVFS(VirtualFSInterface vfs) { + private StorageSpaceData getStorageSpaceDataForVFS(VirtualFS vfs) { - ReservedSpaceCatalog rsc = new ReservedSpaceCatalog(); + ReservedSpaceCatalog rsc = ReservedSpaceCatalog.getInstance(); String spaceToken = vfs.getSpaceTokenDescription(); return rsc.getStorageSpaceByAlias(spaceToken); } private void persistStorageSpaceData(StorageSpaceData ssd) throws DataAccessException { - ReservedSpaceCatalog rsc = new ReservedSpaceCatalog(); + ReservedSpaceCatalog rsc = ReservedSpaceCatalog.getInstance(); rsc.updateStorageSpace(ssd); } diff --git a/src/main/java/it/grid/storm/space/gpfsquota/GetGPFSFilesetQuotaInfoCommand.java b/src/main/java/it/grid/storm/space/gpfsquota/GetGPFSFilesetQuotaInfoCommand.java index 302d601c3..09986040f 100644 --- a/src/main/java/it/grid/storm/space/gpfsquota/GetGPFSFilesetQuotaInfoCommand.java +++ b/src/main/java/it/grid/storm/space/gpfsquota/GetGPFSFilesetQuotaInfoCommand.java @@ -1,16 +1,15 @@ package it.grid.storm.space.gpfsquota; -import it.grid.storm.filesystem.swig.gpfs; -import it.grid.storm.namespace.VirtualFSInterface; - import java.util.concurrent.Callable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import it.grid.storm.filesystem.swig.gpfs; +import it.grid.storm.namespace.model.VirtualFS; + /** * Computes GPFS fileset quota by leveraging {@link gpfs#get_fileset_quota_info(String)}. - * - * */ public class GetGPFSFilesetQuotaInfoCommand implements Callable { @@ -18,9 +17,9 @@ public class GetGPFSFilesetQuotaInfoCommand implements private static final Logger log = LoggerFactory .getLogger(GetGPFSFilesetQuotaInfoCommand.class); - private VirtualFSInterface vfs; + private VirtualFS vfs; - public GetGPFSFilesetQuotaInfoCommand(VirtualFSInterface vfs) { + public GetGPFSFilesetQuotaInfoCommand(VirtualFS vfs) { this.vfs = vfs; } diff --git a/src/main/java/it/grid/storm/srm/types/ArrayOfSURLs.java b/src/main/java/it/grid/storm/srm/types/ArrayOfSURLs.java index 878a5d861..1e64022cb 100644 --- a/src/main/java/it/grid/storm/srm/types/ArrayOfSURLs.java +++ b/src/main/java/it/grid/storm/srm/types/ArrayOfSURLs.java @@ -113,7 +113,7 @@ public List asStringList() { return stringList; } - public static ArrayOfSURLs decode(Map inputParam, String name) + public static ArrayOfSURLs decode(Map inputParam, String name) throws InvalidArrayOfSURLsAttributeException { List list = null; diff --git a/src/main/java/it/grid/storm/srm/types/ArrayOfTExtraInfo.java b/src/main/java/it/grid/storm/srm/types/ArrayOfTExtraInfo.java index b290c3f11..b1cb7f795 100644 --- a/src/main/java/it/grid/storm/srm/types/ArrayOfTExtraInfo.java +++ b/src/main/java/it/grid/storm/srm/types/ArrayOfTExtraInfo.java @@ -28,126 +28,127 @@ import java.io.*; import java.util.*; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; + public class ArrayOfTExtraInfo implements Serializable { - /** - * - */ + /** + * + */ private static final long serialVersionUID = 1L; - + public static String PNAME_STORAGESYSTEMINFO = "storageSystemInfo"; - private ArrayList extraInfoList; + private ArrayList extraInfoList; + + /** + * Constructor that requires a String. If it is null, then an + * InvalidArrayOfTExtraInfoAttributeException is thrown. + */ + public ArrayOfTExtraInfo(TExtraInfo[] infoArray) + throws InvalidArrayOfTExtraInfoAttributeException { + + if (infoArray == null) { + throw new InvalidArrayOfTExtraInfoAttributeException(infoArray); + } + } + + public ArrayOfTExtraInfo() { + + extraInfoList = new ArrayList(); + } + + public Object[] getArray() { + + return extraInfoList.toArray(); + } - /** - * Constructor that requires a String. If it is null, then an - * InvalidArrayOfTExtraInfoAttributeException is thrown. - */ - public ArrayOfTExtraInfo(TExtraInfo[] infoArray) - throws InvalidArrayOfTExtraInfoAttributeException { - - if (infoArray == null) { - throw new InvalidArrayOfTExtraInfoAttributeException(infoArray); - } - } - - public ArrayOfTExtraInfo() { - - extraInfoList = new ArrayList(); - } - - public Object[] getArray() { - - return extraInfoList.toArray(); - } - - public TExtraInfo getTSpaceToken(int i) { - - return extraInfoList.get(i); - } - - public void setTExtraInfo(int index, TExtraInfo info) { - - extraInfoList.set(index, info); - } - - public void addTExtraInfo(TExtraInfo info) { - - extraInfoList.add(info); - } - - public int size() { - - return extraInfoList.size(); - } - - /** - * Fills this class using the values found in a structure inside a Hashtable. - * The Hashtable may contain different structures inside, all are identifiend - * by a name. Used for communication with the FE. - * - * @param inputParam - * Hashtable to read. - * @param fieldName - * Name that identifies the ArrayOfTExtraInfo structure in the - * Hashtable. - * @return A new ArrayOfTExtraInfo instance. - */ - public static ArrayOfTExtraInfo decode(Map inputParam, String fieldName) - throws InvalidArrayOfTExtraInfoAttributeException { - - List list = null; - try { - list = Arrays.asList((Object[]) inputParam.get(fieldName)); - } catch (NullPointerException e) { - // log.warn("Empty SURL array found!"); - } - - if (list == null) { - throw new InvalidArrayOfTExtraInfoAttributeException(null); - } - - ArrayOfTExtraInfo extraInfoArray = new ArrayOfTExtraInfo(); - - for (int i = 0; i < list.size(); i++) { - Hashtable extraInfo; - - extraInfo = (Hashtable) list.get(i); - try { - extraInfoArray.addTExtraInfo(TExtraInfo.decode(extraInfo)); - } catch (InvalidTExtraInfoAttributeException e) { - throw new InvalidArrayOfTExtraInfoAttributeException(null); - } - } - return extraInfoArray; - } - - public void encode(Map outputParam, String name) { - - Vector> vector = new Vector>(); - - for (TExtraInfo extraInfo : extraInfoList) { - Hashtable extraInfoMap = new Hashtable(); - extraInfo.encode(extraInfoMap); - vector.add(extraInfoMap); - } - outputParam.put(name, vector); - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - if (extraInfoList != null) { - sb.append("["); - for (Iterator it = extraInfoList.iterator(); it.hasNext();) { - TExtraInfo element = (TExtraInfo) it.next(); - sb.append(element.toString()); - } - sb.append("]"); - } else { - sb.append("EMPTY LIST"); - } - return sb.toString(); - } + public TExtraInfo getTSpaceToken(int i) { + + return extraInfoList.get(i); + } + + public void setTExtraInfo(int index, TExtraInfo info) { + + extraInfoList.set(index, info); + } + + public void addTExtraInfo(TExtraInfo info) { + + extraInfoList.add(info); + } + + public int size() { + + return extraInfoList.size(); + } + + /** + * Fills this class using the values found in a structure inside a HashMap. The HashMap may + * contain different structures inside, all are identified by a name. Used for communication with + * the FE. + * + * @param inputParam HashMap to read. + * @param fieldName Name that identifies the ArrayOfTExtraInfo structure in the HashMap. + * @return A new ArrayOfTExtraInfo instance. + */ + @SuppressWarnings("unchecked") + public static ArrayOfTExtraInfo decode(Map inputParam, String fieldName) + throws InvalidArrayOfTExtraInfoAttributeException { + + List list = null; + try { + list = Arrays.asList((Object[]) inputParam.get(fieldName)); + } catch (NullPointerException e) { + // log.warn("Empty SURL array found!"); + } + + if (list == null) { + throw new InvalidArrayOfTExtraInfoAttributeException(null); + } + + ArrayOfTExtraInfo extraInfoArray = new ArrayOfTExtraInfo(); + + for (int i = 0; i < list.size(); i++) { + Map extraInfo; + + extraInfo = (Map) list.get(i); + try { + extraInfoArray.addTExtraInfo(TExtraInfo.decode(extraInfo)); + } catch (InvalidTExtraInfoAttributeException e) { + throw new InvalidArrayOfTExtraInfoAttributeException(null); + } + } + return extraInfoArray; + } + + public void encode(Map outputParam, String name) { + + List> vector = Lists.newArrayList(); + + for (TExtraInfo extraInfo : extraInfoList) { + Map extraInfoMap = Maps.newHashMap(); + extraInfo.encode(extraInfoMap); + vector.add(extraInfoMap); + } + outputParam.put(name, vector); + } + + public String toString() { + + StringBuilder sb = new StringBuilder(); + if (extraInfoList != null) { + sb.append("["); + for (Iterator it = extraInfoList.iterator(); it.hasNext();) { + TExtraInfo element = (TExtraInfo) it.next(); + sb.append(element.toString()); + } + sb.append("]"); + } else { + sb.append("EMPTY LIST"); + } + return sb.toString(); + } } diff --git a/src/main/java/it/grid/storm/srm/types/ArrayOfTMetaDataPathDetail.java b/src/main/java/it/grid/storm/srm/types/ArrayOfTMetaDataPathDetail.java index 4695e777d..318d3d554 100644 --- a/src/main/java/it/grid/storm/srm/types/ArrayOfTMetaDataPathDetail.java +++ b/src/main/java/it/grid/storm/srm/types/ArrayOfTMetaDataPathDetail.java @@ -25,78 +25,78 @@ package it.grid.storm.srm.types; -import java.util.ArrayList; -import java.util.Hashtable; +import java.io.Serializable; import java.util.List; import java.util.Map; -import java.util.Vector; -import it.grid.storm.srm.types.TSpaceToken; -import java.io.Serializable; +import com.google.common.collect.Lists; public class ArrayOfTMetaDataPathDetail implements Serializable { - public static String PNAME_DETAILS = "details"; - public static String PNAME_ARRAYOFSUBPATHS = "arrayOfSubPaths"; - ArrayList metaDataList; + /** + * + */ + private static final long serialVersionUID = 1L; + + public static String PNAME_DETAILS = "details"; + public static String PNAME_ARRAYOFSUBPATHS = "arrayOfSubPaths"; + List metaDataList; - public ArrayOfTMetaDataPathDetail() { + public ArrayOfTMetaDataPathDetail() { - metaDataList = new ArrayList(); - } + metaDataList = Lists.newArrayList(); + } - public Object[] getArray() { + public Object[] getArray() { - return metaDataList.toArray(); - } + return metaDataList.toArray(); + } - public TMetaDataPathDetail getTMetaDataPathDetail(int i) { + public TMetaDataPathDetail getTMetaDataPathDetail(int i) { - return (TMetaDataPathDetail) metaDataList.get(i); - } + return metaDataList.get(i); + } - public void setTMetaDataPathDetail(int index, TMetaDataPathDetail elem) { + public void setTMetaDataPathDetail(int index, TMetaDataPathDetail elem) { - metaDataList.set(index, elem); - } + metaDataList.set(index, elem); + } - public void addTMetaDataPathDetail(TMetaDataPathDetail elem) { + public void addTMetaDataPathDetail(TMetaDataPathDetail elem) { - metaDataList.add(elem); - } + metaDataList.add(elem); + } - public int size() { + public int size() { - return metaDataList.size(); - } + return metaDataList.size(); + } - /** - * Encode method, used to create a structured paramter representing this - * object, for FE communication. - * - * @param outputParam - * structured Parameter that must be filled whit ArrayOfTMetaDataPath - * information. - * @param name - * name of the paramter - */ - public void encode(Map outputParam, String name) { + /** + * Encode method, used to create a structured parameter representing this object, for FE + * communication. + * + * @param outputParam structured Parameter that must be filled whit ArrayOfTMetaDataPath + * information. + * @param name name of the parameter + */ + public void encode(Map outputParam, String name) { - List list = new ArrayList(); - for (int i = 0; i < metaDataList.size(); i++) { - ((TMetaDataPathDetail) metaDataList.get(i)).encode(list); - } - outputParam.put(name, list); - } + List list = Lists.newArrayList(); + for (int i = 0; i < metaDataList.size(); i++) { + ((TMetaDataPathDetail) metaDataList.get(i)).encode(list); + } + outputParam.put(name, list); + } - public String toString() { + public String toString() { - StringBuilder sb = new StringBuilder(); - for (int i = 0; i < metaDataList.size(); i++) { - sb.append("MetaData[" + i + "]:\n"); - sb.append(((TMetaDataPathDetail) metaDataList.get(i)).toString()); - } - return sb.toString(); - } + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < metaDataList.size(); i++) { + sb.append("MetaData[" + i + "]:\n"); + sb.append(((TMetaDataPathDetail) metaDataList.get(i)).toString()); + } + return sb.toString(); + } } diff --git a/src/main/java/it/grid/storm/srm/types/ArrayOfTMetaDataSpace.java b/src/main/java/it/grid/storm/srm/types/ArrayOfTMetaDataSpace.java index c3cb09d4c..f732e38f7 100644 --- a/src/main/java/it/grid/storm/srm/types/ArrayOfTMetaDataSpace.java +++ b/src/main/java/it/grid/storm/srm/types/ArrayOfTMetaDataSpace.java @@ -25,62 +25,68 @@ package it.grid.storm.srm.types; +import java.io.Serializable; import java.util.ArrayList; -import java.util.HashMap; +import java.util.List; import java.util.Map; -import it.grid.storm.srm.types.TMetaDataSpace; -import java.io.Serializable; + +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; public class ArrayOfTMetaDataSpace implements Serializable { - public static String PNAME_ARRAYOFSPACEDETAILS = "arrayOfSpaceDetails"; + /** + * + */ + private static final long serialVersionUID = 1L; + + public static String PNAME_ARRAYOFSPACEDETAILS = "arrayOfSpaceDetails"; - ArrayList metaDataList; + ArrayList metaDataList; - public ArrayOfTMetaDataSpace() { + public ArrayOfTMetaDataSpace() { - metaDataList = new ArrayList(); - } + metaDataList = new ArrayList(); + } - public TMetaDataSpace[] getArray() { + public TMetaDataSpace[] getArray() { - return (TMetaDataSpace[]) metaDataList - .toArray(new TMetaDataSpace[metaDataList.size()]); - } + return (TMetaDataSpace[]) metaDataList.toArray(new TMetaDataSpace[metaDataList.size()]); + } - public TMetaDataSpace getTMetaDataSpace(int i) { + public TMetaDataSpace getTMetaDataSpace(int i) { - return (TMetaDataSpace) metaDataList.get(i); - } + return (TMetaDataSpace) metaDataList.get(i); + } - public void setTMetaDataSpace(int index, TMetaDataSpace data) { + public void setTMetaDataSpace(int index, TMetaDataSpace data) { - metaDataList.set(index, data); - } + metaDataList.set(index, data); + } - public void addTMetaDataSpace(TMetaDataSpace data) { + public void addTMetaDataSpace(TMetaDataSpace data) { - metaDataList.add(data); - } + metaDataList.add(data); + } - public int size() { + public int size() { - return metaDataList.size(); - } + return metaDataList.size(); + } - public void encode(Map outputParam, String fieldName) { + public void encode(Map outputParam, String fieldName) { - ArrayList metaDataSpaceList = new ArrayList(); - int arraySize = this.size(); + List> metaDataSpaceList = Lists.newArrayList(); + int arraySize = this.size(); - for (int i = 0; i < arraySize; i++) { - Map metaDataSpace = new HashMap(); - TMetaDataSpace metaDataElement = this.getTMetaDataSpace(i); - metaDataElement.encode(metaDataSpace); + for (int i = 0; i < arraySize; i++) { + Map metaDataSpace = Maps.newHashMap(); + TMetaDataSpace metaDataElement = this.getTMetaDataSpace(i); + metaDataElement.encode(metaDataSpace); - metaDataSpaceList.add(metaDataSpace); - } + metaDataSpaceList.add(metaDataSpace); + } - outputParam.put(fieldName, metaDataSpaceList); - } + outputParam.put(fieldName, metaDataSpaceList); + } } diff --git a/src/main/java/it/grid/storm/srm/types/ArrayOfTSURLLifetimeReturnStatus.java b/src/main/java/it/grid/storm/srm/types/ArrayOfTSURLLifetimeReturnStatus.java index a7dd7532f..2f8ce05e6 100644 --- a/src/main/java/it/grid/storm/srm/types/ArrayOfTSURLLifetimeReturnStatus.java +++ b/src/main/java/it/grid/storm/srm/types/ArrayOfTSURLLifetimeReturnStatus.java @@ -25,118 +25,67 @@ */ package it.grid.storm.srm.types; -import java.util.ArrayList; -import java.util.Hashtable; +import java.util.List; import java.util.Map; -import java.util.Vector; + +import com.google.common.collect.Lists; public class ArrayOfTSURLLifetimeReturnStatus { - public static String PNAME_ARRAYOFFILESTATUSES = "arrayOfFileStatuses"; - - ArrayList array; - - /** - * Constructs an ArrayOfTSURLLifetimeReturnStatus of 'numItems' empty - * elements. - * - * @param numItems - */ - public ArrayOfTSURLLifetimeReturnStatus(int numItems) { - - array = new ArrayList(numItems); - } - - // /** - // * Constructor that requires a String. If it is null, then an - // * InvalidArrayOfTExtraInfoAttributeException is thrown. - // */ - // public ArrayOfTSURLLifetimeReturnStatus(TSURLReturnStatus[] surlArray) - // throws InvalidArrayOfTSURLReturnStatusAttributeException { - // - // if (surlArray == null) throw new - // InvalidArrayOfTSURLReturnStatusAttributeException(surlArray); - // //FIXME this.tokenArray = tokenArray; - // } - - /** - * Constructs an empty ArrayOfTSURLLifetimeReturnStatus. - */ - public ArrayOfTSURLLifetimeReturnStatus() { - - array = new ArrayList(); - } - - /** - * Get the array list. - * - * @return ArrayList - */ - public ArrayList getArray() { - - return array; - } - - /** - * Get the i-th element of the array. - * - * @param i - * int - * @return TSURLLifetimeReturnStatus - */ - public TSURLLifetimeReturnStatus getTSURLLifetimeReturnStatus(int i) { - - return (TSURLLifetimeReturnStatus) array.get(i); - } - - /** - * Set the i-th element of the array. - * - * @param index - * int - * @param item - * TSURLLifetimeReturnStatus - */ - public void setTSURLReturnStatus(int index, TSURLLifetimeReturnStatus item) { - - array.set(index, item); - } - - /** - * Add an element to the array. - * - * @param item - * TSURLLifetimeReturnStatus - */ - public void addTSurlReturnStatus(TSURLLifetimeReturnStatus item) { - - array.add(item); - } - - /** - * Returns the size of the array. - * - * @return int - */ - public int size() { - - return array.size(); - } - - /** - * Encodes the array to a Hashtable structure. - * - * @param outputParam - * Hashtable - * @param name - * String - */ - public void encode(Map outputParam, String name) { - - ArrayList list = new ArrayList(); - for (int i = 0; i < array.size(); i++) { - ((TSURLLifetimeReturnStatus) array.get(i)).encode(list); - } - outputParam.put(name, list); - } + public static String PNAME_ARRAYOFFILESTATUSES = "arrayOfFileStatuses"; + + List array; + + /** + * Constructs an empty ArrayOfTSURLLifetimeReturnStatus. + */ + public ArrayOfTSURLLifetimeReturnStatus() { + + array = Lists.newArrayList(); + } + + /** + * Get the array list. + * + * @return ArrayList + */ + public List getArray() { + + return array; + } + + /** + * Add an element to the array. + * + * @param item TSURLLifetimeReturnStatus + */ + public void addTSurlReturnStatus(TSURLLifetimeReturnStatus item) { + + array.add(item); + } + + /** + * Returns the size of the array. + * + * @return int + */ + public int size() { + + return array.size(); + } + + /** + * Encodes the array to a HashMap structure. + * + * @param outputParam HashMap + * @param name String + */ + public void encode(Map outputParam, String name) { + + List> list = Lists.newArrayList(); + for (int i = 0; i < array.size(); i++) { + ((TSURLLifetimeReturnStatus) array.get(i)).encode(list); + } + outputParam.put(name, list); + } } diff --git a/src/main/java/it/grid/storm/srm/types/ArrayOfTSURLReturnStatus.java b/src/main/java/it/grid/storm/srm/types/ArrayOfTSURLReturnStatus.java index 44f6b7a18..7692574be 100644 --- a/src/main/java/it/grid/storm/srm/types/ArrayOfTSURLReturnStatus.java +++ b/src/main/java/it/grid/storm/srm/types/ArrayOfTSURLReturnStatus.java @@ -30,151 +30,149 @@ import java.util.List; import java.util.Map; +import com.google.common.collect.Lists; + public class ArrayOfTSURLReturnStatus { - public static String PNAME_ARRAYOFFILESTATUSES = "arrayOfFileStatuses"; + public static String PNAME_ARRAYOFFILESTATUSES = "arrayOfFileStatuses"; + + ArrayList surlRetList; - ArrayList surlRetList; + /** + * Construct an ArrayOfTSURLReturnStatus of numItems empty elements. + */ + public ArrayOfTSURLReturnStatus(int numItems) { - /** - * Construct an ArrayOfTSURLReturnStatus of numItems empty elements. - */ - public ArrayOfTSURLReturnStatus(int numItems) { - - surlRetList = new ArrayList(numItems); - } - - /** - * Constructor that requires a String. If it is null, then an - * InvalidArrayOfTExtraInfoAttributeException is thrown. - */ - public ArrayOfTSURLReturnStatus(TSURLReturnStatus[] surlArray) - throws InvalidArrayOfTSURLReturnStatusAttributeException { - - if (surlArray == null) { - throw new InvalidArrayOfTSURLReturnStatusAttributeException(surlArray); - } - this.surlRetList = new ArrayList( - Arrays.asList(surlArray)); - } - - public ArrayOfTSURLReturnStatus() { - - surlRetList = new ArrayList(); - } - - public ArrayList getArray() { - - return surlRetList; - } - - public TSURLReturnStatus getTSURLReturnStatus(int i) { - - return (TSURLReturnStatus) surlRetList.get(i); - } - - public void setTSURLReturnStatus(int index, TSURLReturnStatus surl) { - - surlRetList.set(index, surl); - } - - public void addTSurlReturnStatus(TSURLReturnStatus surl) { - - surlRetList.add(surl); - } - - public int size() { - - return surlRetList.size(); - } - - /** - * @param surl - * @throws IllegalArgumentException - * if null argument or not contained surl - */ - public void updateStatus(TSURLReturnStatus surlStatus, TReturnStatus newStatus) - throws IllegalArgumentException { - - if (surlStatus == null || newStatus == null) { - throw new IllegalArgumentException( - "Unable to update the status,null arguments: surlStatus=" + surlStatus - + " newStatus=" + newStatus); - } - int index = surlRetList.indexOf(surlStatus); - if (index < 0) { - throw new IllegalArgumentException( - "Unable to update the status,unknown TSURLReturnStatus" + surlStatus); - } - surlRetList.get(index).setStatus(newStatus); - } - - public void encode(Map outputParam, String name) { - - List list = new ArrayList(); - for (int i = 0; i < surlRetList.size(); i++) { - ((TSURLReturnStatus) surlRetList.get(i)).encode(list); - } - - outputParam.put(name, list); - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder.append("ArrayOfTSURLReturnStatus [surlRetList="); - builder.append(surlRetList); - builder.append("]"); - return builder.toString(); - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#hashCode() - */ - @Override - public int hashCode() { - - final int prime = 31; - int result = 1; - result = prime * result - + ((surlRetList == null) ? 0 : surlRetList.hashCode()); - return result; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#equals(java.lang.Object) - */ - @Override - public boolean equals(Object obj) { - - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - ArrayOfTSURLReturnStatus other = (ArrayOfTSURLReturnStatus) obj; - if (surlRetList == null) { - if (other.surlRetList != null) { - return false; - } - } else if (!surlRetList.equals(other.surlRetList)) { - return false; - } - return true; - } + surlRetList = new ArrayList(numItems); + } + + /** + * Constructor that requires a String. If it is null, then an + * InvalidArrayOfTExtraInfoAttributeException is thrown. + */ + public ArrayOfTSURLReturnStatus(TSURLReturnStatus[] surlArray) + throws InvalidArrayOfTSURLReturnStatusAttributeException { + + if (surlArray == null) { + throw new InvalidArrayOfTSURLReturnStatusAttributeException(surlArray); + } + this.surlRetList = new ArrayList(Arrays.asList(surlArray)); + } + + public ArrayOfTSURLReturnStatus() { + + surlRetList = new ArrayList(); + } + + public ArrayList getArray() { + + return surlRetList; + } + + public TSURLReturnStatus getTSURLReturnStatus(int i) { + + return (TSURLReturnStatus) surlRetList.get(i); + } + + public void setTSURLReturnStatus(int index, TSURLReturnStatus surl) { + + surlRetList.set(index, surl); + } + + public void addTSurlReturnStatus(TSURLReturnStatus surl) { + + surlRetList.add(surl); + } + + public int size() { + + return surlRetList.size(); + } + + /** + * @param surl + * @throws IllegalArgumentException if null argument or not contained surl + */ + public void updateStatus(TSURLReturnStatus surlStatus, TReturnStatus newStatus) + throws IllegalArgumentException { + + if (surlStatus == null || newStatus == null) { + throw new IllegalArgumentException("Unable to update the status,null arguments: surlStatus=" + + surlStatus + " newStatus=" + newStatus); + } + int index = surlRetList.indexOf(surlStatus); + if (index < 0) { + throw new IllegalArgumentException( + "Unable to update the status,unknown TSURLReturnStatus" + surlStatus); + } + surlRetList.get(index).setStatus(newStatus); + } + + public void encode(Map outputParam, String name) { + + List> list = Lists.newArrayList(); + for (int i = 0; i < surlRetList.size(); i++) { + ((TSURLReturnStatus) surlRetList.get(i)).encode(list); + } + + outputParam.put(name, list); + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + + StringBuilder builder = new StringBuilder(); + builder.append("ArrayOfTSURLReturnStatus [surlRetList="); + builder.append(surlRetList); + builder.append("]"); + return builder.toString(); + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#hashCode() + */ + @Override + public int hashCode() { + + final int prime = 31; + int result = 1; + result = prime * result + ((surlRetList == null) ? 0 : surlRetList.hashCode()); + return result; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#equals(java.lang.Object) + */ + @Override + public boolean equals(Object obj) { + + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + ArrayOfTSURLReturnStatus other = (ArrayOfTSURLReturnStatus) obj; + if (surlRetList == null) { + if (other.surlRetList != null) { + return false; + } + } else if (!surlRetList.equals(other.surlRetList)) { + return false; + } + return true; + } } diff --git a/src/main/java/it/grid/storm/srm/types/ArrayOfTSizeInBytes.java b/src/main/java/it/grid/storm/srm/types/ArrayOfTSizeInBytes.java index b5bcdbdf7..f8609b14e 100644 --- a/src/main/java/it/grid/storm/srm/types/ArrayOfTSizeInBytes.java +++ b/src/main/java/it/grid/storm/srm/types/ArrayOfTSizeInBytes.java @@ -25,75 +25,71 @@ */ package it.grid.storm.srm.types; -import it.grid.storm.common.types.SizeUnit; - import java.io.Serializable; -import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Map; -public class ArrayOfTSizeInBytes implements Serializable { +import com.google.common.collect.Lists; - private static final long serialVersionUID = -1987674620390240434L; +import it.grid.storm.common.types.SizeUnit; - public static final String PNAME_arrayOfExpectedFileSizes = "arrayOfExpectedFileSizes"; +public class ArrayOfTSizeInBytes implements Serializable { - private ArrayList sizeInBytesList; + private static final long serialVersionUID = -1987674620390240434L; - public ArrayOfTSizeInBytes() { + public static final String PNAME_arrayOfExpectedFileSizes = "arrayOfExpectedFileSizes"; - sizeInBytesList = new ArrayList(); - } + private List sizeInBytesList; - public static ArrayOfTSizeInBytes decode(Map inputParam, String fieldName) { + public ArrayOfTSizeInBytes() { - List inputList = null; - try { - inputList = Arrays.asList((Object[]) inputParam.get(fieldName)); - } catch (NullPointerException e) { - // log.warn("Empty SURL array found!"); - } + sizeInBytesList = Lists.newArrayList(); + } - if (inputList == null) - return null; + public static ArrayOfTSizeInBytes decode(Map inputParam, String fieldName) { - ArrayOfTSizeInBytes list = new ArrayOfTSizeInBytes(); - for (int i = 0; i < inputList.size(); i++) { - TSizeInBytes size = null; - String strLong = (String) inputList.get(i); - try { - size = TSizeInBytes.make(Long.parseLong(strLong), SizeUnit.BYTES); - } catch (InvalidTSizeAttributesException e) { - return null; - } - list.addTSizeInBytes(size); - } - return list; - } + List inputList = null; + try { + inputList = Arrays.asList((Object[]) inputParam.get(fieldName)); + } catch (NullPointerException e) { + // log.warn("Empty SURL array found!"); + } - public Object[] getArray() { + if (inputList == null) + return null; - return sizeInBytesList.toArray(); - } + ArrayOfTSizeInBytes list = new ArrayOfTSizeInBytes(); + for (int i = 0; i < inputList.size(); i++) { + TSizeInBytes size = null; + String strLong = (String) inputList.get(i); + try { + size = TSizeInBytes.make(Long.parseLong(strLong), SizeUnit.BYTES); + } catch (InvalidTSizeAttributesException e) { + return null; + } + list.addTSizeInBytes(size); + } + return list; + } - public TSizeInBytes getTSizeInBytes(int i) { + public Object[] getArray() { - return (TSizeInBytes) sizeInBytesList.get(i); - } + return sizeInBytesList.toArray(); + } - public void setTSizeInBytes(int index, TSizeInBytes size) { + public TSizeInBytes getTSizeInBytes(int i) { - sizeInBytesList.set(index, size); - } + return (TSizeInBytes) sizeInBytesList.get(i); + } - public void addTSizeInBytes(TSizeInBytes size) { + public void addTSizeInBytes(TSizeInBytes size) { - sizeInBytesList.add(size); - } + sizeInBytesList.add(size); + } - public int size() { + public int size() { - return sizeInBytesList.size(); - } + return sizeInBytesList.size(); + } } diff --git a/src/main/java/it/grid/storm/srm/types/ArrayOfTSpaceToken.java b/src/main/java/it/grid/storm/srm/types/ArrayOfTSpaceToken.java index e4a4036ce..f6d18cdd0 100644 --- a/src/main/java/it/grid/storm/srm/types/ArrayOfTSpaceToken.java +++ b/src/main/java/it/grid/storm/srm/types/ArrayOfTSpaceToken.java @@ -25,106 +25,90 @@ package it.grid.storm.srm.types; -import java.util.ArrayList; +import java.io.Serializable; import java.util.Arrays; -import java.util.Hashtable; import java.util.List; import java.util.Map; -import java.util.Vector; - -import it.grid.storm.srm.types.TSpaceToken; -import java.io.Serializable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class ArrayOfTSpaceToken implements Serializable { +import com.google.common.collect.Lists; - private static Logger log = LoggerFactory.getLogger(ArrayOfTSpaceToken.class); - - public static final String PNAME_ARRAYOFSPACETOKENS = "arrayOfSpaceTokens"; - - ArrayList tokenList; +public class ArrayOfTSpaceToken implements Serializable { - /** - * Constructor that requires a String. If it is null, then an - * InvalidArrayOfTTSpaceTokenAttributeException is thrown. - */ - public ArrayOfTSpaceToken(TSpaceToken[] tokenArray) - throws InvalidArrayOfTSpaceTokenAttributeException { + /** + * + */ + private static final long serialVersionUID = 1L; - if (tokenArray == null) - throw new InvalidArrayOfTSpaceTokenAttributeException(tokenArray); - // FIXME this.tokenArray = tokenArray; - } + private static Logger log = LoggerFactory.getLogger(ArrayOfTSpaceToken.class); - public ArrayOfTSpaceToken() { + public static final String PNAME_ARRAYOFSPACETOKENS = "arrayOfSpaceTokens"; - tokenList = new ArrayList(); - } + private List tokenList = Lists.newArrayList(); - public static ArrayOfTSpaceToken decode(Map inputParam, String fieldName) - throws InvalidArrayOfTSpaceTokenAttributeException { + public static ArrayOfTSpaceToken decode(Map inputParam, String fieldName) + throws InvalidArrayOfTSpaceTokenAttributeException { - List tokensList = null; - try { - tokensList = Arrays.asList((Object[]) inputParam.get(fieldName)); - } catch (NullPointerException e) { - log.warn(""); - } - if (tokensList == null) - throw new InvalidArrayOfTSpaceTokenAttributeException(null); + List tokensList = null; + try { + tokensList = Arrays.asList((Object[]) inputParam.get(fieldName)); + } catch (NullPointerException e) { + log.warn(""); + } + if (tokensList == null) + throw new InvalidArrayOfTSpaceTokenAttributeException(null); - ArrayOfTSpaceToken arrayOfTSpaceTokens = new ArrayOfTSpaceToken(); + ArrayOfTSpaceToken arrayOfTSpaceTokens = new ArrayOfTSpaceToken(); - for (int i = 0; i < tokensList.size(); i++) { - TSpaceToken token = null; - try { - token = TSpaceToken.make((String) tokensList.get(i)); - } catch (InvalidTSpaceTokenAttributesException e) { - token = TSpaceToken.makeEmpty(); - } - arrayOfTSpaceTokens.addTSpaceToken(token); - } + for (int i = 0; i < tokensList.size(); i++) { + TSpaceToken token = null; + try { + token = TSpaceToken.make((String) tokensList.get(i)); + } catch (InvalidTSpaceTokenAttributesException e) { + token = TSpaceToken.makeEmpty(); + } + arrayOfTSpaceTokens.addTSpaceToken(token); + } - return arrayOfTSpaceTokens; - } + return arrayOfTSpaceTokens; + } - public TSpaceToken getTSpaceToken(int i) { + public TSpaceToken getTSpaceToken(int i) { - return (TSpaceToken) tokenList.get(i); - } + return tokenList.get(i); + } - public TSpaceToken[] getTSpaceTokenArray() { + public TSpaceToken[] getTSpaceTokenArray() { - TSpaceToken[] array = new TSpaceToken[0]; - return tokenList.toArray(array); - } + return tokenList.toArray(new TSpaceToken[0]); + } - public void addTSpaceToken(TSpaceToken token) { + public void addTSpaceToken(TSpaceToken token) { - tokenList.add(token); - } + tokenList.add(token); + } - public int size() { + public int size() { - return tokenList.size(); - } + return tokenList.size(); + } - /** - * Encode method, used to create a structured paramter representing this - * object, for FE communication. - * - * @param outputParam - * @param name - */ - public void encode(Map outputParam, String name) { + /** + * Encode method, used to create a structured paramter representing this object, for FE + * communication. + * + * @param outputParam + * @param name + */ + public void encode(Map outputParam, String name) { - Vector vector = new Vector(); - for (int i = 0; i < tokenList.size(); i++) { - ((TSpaceToken) tokenList.get(i)).encode(vector); - } + List vector = Lists.newArrayList(); + for (int i = 0; i < tokenList.size(); i++) { + tokenList.get(i).encode(vector); + } - outputParam.put(name, vector); - } + outputParam.put(name, vector); + } } diff --git a/src/main/java/it/grid/storm/srm/types/InvalidArrayOfSURLsAttributeException.java b/src/main/java/it/grid/storm/srm/types/InvalidArrayOfSURLsAttributeException.java index a37852a38..c71b5755c 100644 --- a/src/main/java/it/grid/storm/srm/types/InvalidArrayOfSURLsAttributeException.java +++ b/src/main/java/it/grid/storm/srm/types/InvalidArrayOfSURLsAttributeException.java @@ -30,15 +30,20 @@ public class InvalidArrayOfSURLsAttributeException extends Exception { - private boolean nullArray = true; + /** + * + */ + private static final long serialVersionUID = 1L; - public InvalidArrayOfSURLsAttributeException(List array) { + private boolean nullArray = true; - nullArray = (array == null); - } + public InvalidArrayOfSURLsAttributeException(List array) { - public String toString() { + nullArray = (array == null); + } - return "surlList = " + nullArray; - } + public String toString() { + + return "surlList = " + nullArray; + } } diff --git a/src/main/java/it/grid/storm/srm/types/InvalidArrayOfTExtraInfoAttributeException.java b/src/main/java/it/grid/storm/srm/types/InvalidArrayOfTExtraInfoAttributeException.java index ddac6e3d5..0556fd423 100644 --- a/src/main/java/it/grid/storm/srm/types/InvalidArrayOfTExtraInfoAttributeException.java +++ b/src/main/java/it/grid/storm/srm/types/InvalidArrayOfTExtraInfoAttributeException.java @@ -17,25 +17,22 @@ package it.grid.storm.srm.types; -/** - * This class represents an Exception thrown when the constructor for SpaceToken - * is invoked with a null String. - * - */ - -import it.grid.storm.srm.types.TExtraInfo; - public class InvalidArrayOfTExtraInfoAttributeException extends Exception { - private boolean nullArray; + /** + * + */ + private static final long serialVersionUID = 1L; + + private boolean nullArray; - public InvalidArrayOfTExtraInfoAttributeException(Object[] infoArray) { + public InvalidArrayOfTExtraInfoAttributeException(Object[] infoArray) { - nullArray = infoArray == null; - } + nullArray = infoArray == null; + } - public String toString() { + public String toString() { - return "Invalid TExtraInfo[]: nullArray = " + nullArray; - } + return "Invalid TExtraInfo[]: nullArray = " + nullArray; + } } diff --git a/src/main/java/it/grid/storm/srm/types/InvalidArrayOfTMetaDataPathDetailAttributeException.java b/src/main/java/it/grid/storm/srm/types/InvalidArrayOfTMetaDataPathDetailAttributeException.java index dfeb2fceb..bd14773d6 100644 --- a/src/main/java/it/grid/storm/srm/types/InvalidArrayOfTMetaDataPathDetailAttributeException.java +++ b/src/main/java/it/grid/storm/srm/types/InvalidArrayOfTMetaDataPathDetailAttributeException.java @@ -17,27 +17,22 @@ package it.grid.storm.srm.types; -/** - * This class represents an Exception thrown when the constructor for SpaceToken - * is invoked with a null String. - * - */ - -import it.grid.storm.srm.types.TMetaDataPathDetail; +public class InvalidArrayOfTMetaDataPathDetailAttributeException extends Exception { -public class InvalidArrayOfTMetaDataPathDetailAttributeException extends - Exception { + /** + * + */ + private static final long serialVersionUID = 1L; - private boolean nullArray; + private boolean nullArray; - public InvalidArrayOfTMetaDataPathDetailAttributeException( - TMetaDataPathDetail[] metaDataArray) { + public InvalidArrayOfTMetaDataPathDetailAttributeException(TMetaDataPathDetail[] metaDataArray) { - nullArray = metaDataArray == null; - } + nullArray = metaDataArray == null; + } - public String toString() { + public String toString() { - return "Invalid TMetaDataPathDetail[]: nullArray = " + nullArray; - } + return "Invalid TMetaDataPathDetail[]: nullArray = " + nullArray; + } } diff --git a/src/main/java/it/grid/storm/srm/types/InvalidArrayOfTMetaDataSpaceAttributeException.java b/src/main/java/it/grid/storm/srm/types/InvalidArrayOfTMetaDataSpaceAttributeException.java index 7083c5ff3..3076ae89e 100644 --- a/src/main/java/it/grid/storm/srm/types/InvalidArrayOfTMetaDataSpaceAttributeException.java +++ b/src/main/java/it/grid/storm/srm/types/InvalidArrayOfTMetaDataSpaceAttributeException.java @@ -17,17 +17,14 @@ package it.grid.storm.srm.types; -/** - * This class represents an Exception thrown when the constructor for SpaceToken - * is invoked with a null String. - * - */ - -import it.grid.storm.srm.types.TMetaDataSpace; - public class InvalidArrayOfTMetaDataSpaceAttributeException extends Exception { - private boolean nullArray; + /** + * + */ + private static final long serialVersionUID = 1L; + + private boolean nullArray; public InvalidArrayOfTMetaDataSpaceAttributeException( TMetaDataSpace[] metaDataArray) { diff --git a/src/main/java/it/grid/storm/srm/types/InvalidArrayOfTSpaceTokenAttributeException.java b/src/main/java/it/grid/storm/srm/types/InvalidArrayOfTSpaceTokenAttributeException.java index faa6f6022..9a5f100dc 100644 --- a/src/main/java/it/grid/storm/srm/types/InvalidArrayOfTSpaceTokenAttributeException.java +++ b/src/main/java/it/grid/storm/srm/types/InvalidArrayOfTSpaceTokenAttributeException.java @@ -17,25 +17,22 @@ package it.grid.storm.srm.types; -/** - * This class represents an Exception thrown when the constructor for SpaceToken - * is invoked with a null String. - * - */ - -import it.grid.storm.srm.types.TSpaceToken; - public class InvalidArrayOfTSpaceTokenAttributeException extends Exception { - private boolean nullArray; + /** + * + */ + private static final long serialVersionUID = 1L; + + private boolean nullArray; - public InvalidArrayOfTSpaceTokenAttributeException(TSpaceToken[] tokenArray) { + public InvalidArrayOfTSpaceTokenAttributeException(TSpaceToken[] tokenArray) { - nullArray = tokenArray == null; - } + nullArray = tokenArray == null; + } - public String toString() { + public String toString() { - return "Invalid SpaceToken[]: nullArray = " + nullArray; - } + return "Invalid SpaceToken[]: nullArray = " + nullArray; + } } diff --git a/src/main/java/it/grid/storm/srm/types/InvalidTSURLInfoAttributeException.java b/src/main/java/it/grid/storm/srm/types/InvalidTSURLInfoAttributeException.java index c0c37b4af..8465bc0ed 100644 --- a/src/main/java/it/grid/storm/srm/types/InvalidTSURLInfoAttributeException.java +++ b/src/main/java/it/grid/storm/srm/types/InvalidTSURLInfoAttributeException.java @@ -26,19 +26,22 @@ package it.grid.storm.srm.types; -import it.grid.storm.srm.types.TSURL; - public class InvalidTSURLInfoAttributeException extends Exception { - private boolean nullSurl = true; + /** + * + */ + private static final long serialVersionUID = 1L; + + private boolean nullSurl = true; - public InvalidTSURLInfoAttributeException(TSURL surl) { + public InvalidTSURLInfoAttributeException(TSURL surl) { - nullSurl = (surl == null); - } + nullSurl = (surl == null); + } - public String toString() { + public String toString() { - return "nullSurl = " + nullSurl; - } + return "nullSurl = " + nullSurl; + } } diff --git a/src/main/java/it/grid/storm/srm/types/InvalidTSURLLifetimeReturnStatusAttributeException.java b/src/main/java/it/grid/storm/srm/types/InvalidTSURLLifetimeReturnStatusAttributeException.java index faa682bd4..f044954d2 100644 --- a/src/main/java/it/grid/storm/srm/types/InvalidTSURLLifetimeReturnStatusAttributeException.java +++ b/src/main/java/it/grid/storm/srm/types/InvalidTSURLLifetimeReturnStatusAttributeException.java @@ -26,20 +26,22 @@ */ package it.grid.storm.srm.types; -import it.grid.storm.srm.types.TSURL; +public class InvalidTSURLLifetimeReturnStatusAttributeException extends Exception { -public class InvalidTSURLLifetimeReturnStatusAttributeException extends - Exception { + /** + * + */ + private static final long serialVersionUID = 1L; - private boolean nullSurl = true; + private boolean nullSurl = true; - public InvalidTSURLLifetimeReturnStatusAttributeException(TSURL surl) { + public InvalidTSURLLifetimeReturnStatusAttributeException(TSURL surl) { - nullSurl = (surl == null); - } + nullSurl = (surl == null); + } - public String toString() { + public String toString() { - return "nullSurl = " + nullSurl; - } + return "nullSurl = " + nullSurl; + } } diff --git a/src/main/java/it/grid/storm/srm/types/InvalidTSURLReturnStatusAttributeException.java b/src/main/java/it/grid/storm/srm/types/InvalidTSURLReturnStatusAttributeException.java index 0b645f60f..52449a2e4 100644 --- a/src/main/java/it/grid/storm/srm/types/InvalidTSURLReturnStatusAttributeException.java +++ b/src/main/java/it/grid/storm/srm/types/InvalidTSURLReturnStatusAttributeException.java @@ -26,19 +26,22 @@ package it.grid.storm.srm.types; -import it.grid.storm.srm.types.TSURL; - public class InvalidTSURLReturnStatusAttributeException extends Exception { - private boolean nullSurl = true; + /** + * + */ + private static final long serialVersionUID = 1L; + + private boolean nullSurl = true; - public InvalidTSURLReturnStatusAttributeException(TSURL surl) { + public InvalidTSURLReturnStatusAttributeException(TSURL surl) { - nullSurl = (surl == null); - } + nullSurl = (surl == null); + } - public String toString() { + public String toString() { - return "nullSurl = " + nullSurl; - } + return "nullSurl = " + nullSurl; + } } diff --git a/src/main/java/it/grid/storm/srm/types/TAccessLatency.java b/src/main/java/it/grid/storm/srm/types/TAccessLatency.java index 67ef75ecc..4b15fff4d 100644 --- a/src/main/java/it/grid/storm/srm/types/TAccessLatency.java +++ b/src/main/java/it/grid/storm/srm/types/TAccessLatency.java @@ -29,80 +29,75 @@ public class TAccessLatency { - public static String PNAME_accessLatency = "accessLatency"; - - private String accessLatency = null; - - public static final TAccessLatency ONLINE = new TAccessLatency("ONLINE"); - public static final TAccessLatency NEARLINE = new TAccessLatency("NEARLINE"); - public static final TAccessLatency EMPTY = new TAccessLatency("EMPTY"); - - private TAccessLatency(String accessLatency) { - - this.accessLatency = accessLatency; - } - - public final static TAccessLatency getTAccessLatency(int idx) { - - switch (idx) { - case 0: - return ONLINE; - case 1: - return NEARLINE; - default: - return EMPTY; - } - } - - /** - * decode() method creates a TAccessLatency object from the information - * contained into the structured parameter received from the FE. - * - * @param inputParam - * map structure - * @param fieldName - * field name - * @return - */ - public final static TAccessLatency decode(Map inputParam, String fieldName) { - - Integer val; - - val = (Integer) inputParam.get(fieldName); - if (val == null) - return EMPTY; - - return TAccessLatency.getTAccessLatency(val.intValue()); - } - - /** - * encode() method creates structured parameter representing this ogbject. It - * is passed to the FE. - * - * @param outputParam - * hashtable structure - * @param fieldName - * field name - */ - public void encode(Map outputParam, String fieldName) { - - Integer value = null; - - if (this.equals(ONLINE)) - value = Integer.valueOf(0); - if (this.equals(NEARLINE)) - value = Integer.valueOf(1); - - outputParam.put(fieldName, value); - } - - public String toString() { - - return accessLatency; - } - - public String getValue() { - - return accessLatency; - } + public static String PNAME_accessLatency = "accessLatency"; + + private String accessLatency = null; + + public static final TAccessLatency ONLINE = new TAccessLatency("ONLINE"); + public static final TAccessLatency NEARLINE = new TAccessLatency("NEARLINE"); + public static final TAccessLatency EMPTY = new TAccessLatency("EMPTY"); + + private TAccessLatency(String accessLatency) { + + this.accessLatency = accessLatency; + } + + public final static TAccessLatency getTAccessLatency(int idx) { + + switch (idx) { + case 0: + return ONLINE; + case 1: + return NEARLINE; + default: + return EMPTY; + } + } + + /** + * decode() method creates a TAccessLatency object from the information contained into the + * structured parameter received from the FE. + * + * @param inputParam map structure + * @param fieldName field name + * @return + */ + public final static TAccessLatency decode(Map inputParam, String fieldName) { + + Integer val; + + val = (Integer) inputParam.get(fieldName); + if (val == null) + return EMPTY; + + return TAccessLatency.getTAccessLatency(val.intValue()); + } + + /** + * encode() method creates structured parameter representing this ogbject. It is passed to the FE. + * + * @param outputParam hashtable structure + * @param fieldName field name + */ + public void encode(Map outputParam, String fieldName) { + + Integer value = null; + + if (this.equals(ONLINE)) + value = Integer.valueOf(0); + if (this.equals(NEARLINE)) + value = Integer.valueOf(1); + + outputParam.put(fieldName, value); + } + + public String toString() { + + return accessLatency; + } + + public String getValue() { + + return accessLatency; + } } diff --git a/src/main/java/it/grid/storm/srm/types/TAccessPattern.java b/src/main/java/it/grid/storm/srm/types/TAccessPattern.java index b5ae131d3..1c21c44fa 100644 --- a/src/main/java/it/grid/storm/srm/types/TAccessPattern.java +++ b/src/main/java/it/grid/storm/srm/types/TAccessPattern.java @@ -29,80 +29,74 @@ public class TAccessPattern { - public static String PNAME_accessPattern = "accessPattern"; - - private String accessPattern = null; - - public static final TAccessPattern TRANSFER_MODE = new TAccessPattern( - "TRANSFER_MODE"), PROCESSING_MODE = new TAccessPattern("PROCESSING_MODE"), - EMPTY = new TAccessPattern("EMPTY"); - - private TAccessPattern(String accessPattern) { - - this.accessPattern = accessPattern; - } - - public final static TAccessPattern getTAccessPattern(int idx) { - - switch (idx) { - case 0: - return TRANSFER_MODE; - case 1: - return PROCESSING_MODE; - default: - return EMPTY; - } - } - - /** - * decode() method creates a TAccessPattern object from the inforation - * contained into the structured parameter received from the FE. - * - * @param inputParam - * map structure - * @param fieldName - * field name - * @return - */ - public final static TAccessPattern decode(Map inputParam, String fieldName) { - - Integer val; - - val = (Integer) inputParam.get(fieldName); - if (val == null) - return EMPTY; - - return TAccessPattern.getTAccessPattern(val.intValue()); - } - - /** - * encode() method creates structured parameter representing this ogbject. It - * is passed to the FE. - * - * @param outputParam - * map structure - * @param fieldName - * field name - */ - public void encode(Map outputParam, String fieldName) { - - Integer value = null; - - if (this.equals(TAccessPattern.TRANSFER_MODE)) - value = Integer.valueOf(0); - if (this.equals(TAccessPattern.PROCESSING_MODE)) - value = Integer.valueOf(1); - - outputParam.put(fieldName, value); - } - - public String toString() { - - return accessPattern; - } - - public String getValue() { - - return accessPattern; - } + public static String PNAME_accessPattern = "accessPattern"; + + private String accessPattern = null; + + public static final TAccessPattern TRANSFER_MODE = new TAccessPattern("TRANSFER_MODE"), + PROCESSING_MODE = new TAccessPattern("PROCESSING_MODE"), EMPTY = new TAccessPattern("EMPTY"); + + private TAccessPattern(String accessPattern) { + + this.accessPattern = accessPattern; + } + + public final static TAccessPattern getTAccessPattern(int idx) { + + switch (idx) { + case 0: + return TRANSFER_MODE; + case 1: + return PROCESSING_MODE; + default: + return EMPTY; + } + } + + /** + * decode() method creates a TAccessPattern object from the inforation contained into the + * structured parameter received from the FE. + * + * @param inputParam map structure + * @param fieldName field name + * @return + */ + public final static TAccessPattern decode(Map inputParam, String fieldName) { + + Integer val; + + val = (Integer) inputParam.get(fieldName); + if (val == null) + return EMPTY; + + return TAccessPattern.getTAccessPattern(val.intValue()); + } + + /** + * encode() method creates structured parameter representing this object. It is passed to the FE. + * + * @param outputParam map structure + * @param fieldName field name + */ + public void encode(Map outputParam, String fieldName) { + + Integer value = null; + + if (this.equals(TAccessPattern.TRANSFER_MODE)) + value = Integer.valueOf(0); + if (this.equals(TAccessPattern.PROCESSING_MODE)) + value = Integer.valueOf(1); + + outputParam.put(fieldName, value); + } + + public String toString() { + + return accessPattern; + } + + public String getValue() { + + return accessPattern; + } } diff --git a/src/main/java/it/grid/storm/srm/types/TCheckSumType.java b/src/main/java/it/grid/storm/srm/types/TCheckSumType.java index 93db0c002..7a372a5ea 100644 --- a/src/main/java/it/grid/storm/srm/types/TCheckSumType.java +++ b/src/main/java/it/grid/storm/srm/types/TCheckSumType.java @@ -33,28 +33,28 @@ */ public class TCheckSumType { - public static String PNAME_CHECKSUMTYPE = "checkSumType"; + public static String PNAME_CHECKSUMTYPE = "checkSumType"; - private String chkType = null; + private String chkType = null; - public TCheckSumType(String chkType) { + public TCheckSumType(String chkType) { - this.chkType = chkType; - } + this.chkType = chkType; + } - @Override - public String toString() { + @Override + public String toString() { - return chkType.toString(); - } + return chkType.toString(); + } - public String getValue() { + public String getValue() { - return chkType.toString(); - } + return chkType.toString(); + } - public void encode(Map param, String name) { + public void encode(Map param, String name) { - param.put(name, this.toString()); - } + param.put(name, this.toString()); + } }; diff --git a/src/main/java/it/grid/storm/srm/types/TCheckSumValue.java b/src/main/java/it/grid/storm/srm/types/TCheckSumValue.java index 825401bae..ba151220f 100644 --- a/src/main/java/it/grid/storm/srm/types/TCheckSumValue.java +++ b/src/main/java/it/grid/storm/srm/types/TCheckSumValue.java @@ -17,7 +17,6 @@ package it.grid.storm.srm.types; -import java.util.Hashtable; import java.util.Map; /** @@ -34,28 +33,27 @@ */ public class TCheckSumValue { - private String value = null; + private String value = null; - public static String PNAME_CHECKSUMVALUE = "checkSumValue"; + public static String PNAME_CHECKSUMVALUE = "checkSumValue"; - // TO Complete wut Exception if Strin specified == null - public TCheckSumValue(String value) { + public TCheckSumValue(String value) { - this.value = value; - } + this.value = value; + } - public String toString() { + public String toString() { - return value; - } + return value; + } - public String getValue() { + public String getValue() { - return value; - } + return value; + } - public void encode(Map param, String name) { + public void encode(Map param, String name) { - param.put(name, this.toString()); - } + param.put(name, this.toString()); + } } diff --git a/src/main/java/it/grid/storm/srm/types/TConnectionType.java b/src/main/java/it/grid/storm/srm/types/TConnectionType.java index d03e8acf1..294cc6f2c 100644 --- a/src/main/java/it/grid/storm/srm/types/TConnectionType.java +++ b/src/main/java/it/grid/storm/srm/types/TConnectionType.java @@ -25,61 +25,60 @@ */ package it.grid.storm.srm.types; -import java.util.Hashtable; import java.util.Map; public class TConnectionType { - public static String PNAME_connectionType = "connectionType"; - private String connectionType = null; + public static String PNAME_connectionType = "connectionType"; + private String connectionType = null; - public static final TConnectionType WAN = new TConnectionType("WAN"), - LAN = new TConnectionType("LAN"), EMPTY = new TConnectionType("EMPTY"); + public static final TConnectionType WAN = new TConnectionType("WAN"), + LAN = new TConnectionType("LAN"), EMPTY = new TConnectionType("EMPTY"); - private TConnectionType(String connectionType) { + private TConnectionType(String connectionType) { - this.connectionType = connectionType; - } + this.connectionType = connectionType; + } - public final static TConnectionType getTConnectionType(int idx) { + public final static TConnectionType getTConnectionType(int idx) { - switch (idx) { - case 0: - return WAN; - case 1: - return LAN; - default: - return EMPTY; - } - } + switch (idx) { + case 0: + return WAN; + case 1: + return LAN; + default: + return EMPTY; + } + } - public final static TConnectionType decode(Map inputParam, String fieldName) { + public final static TConnectionType decode(Map inputParam, String fieldName) { - Integer val; + Integer val; - val = (Integer) inputParam.get(fieldName); - if (val == null) - return EMPTY; + val = (Integer) inputParam.get(fieldName); + if (val == null) + return EMPTY; - return TConnectionType.getTConnectionType(val.intValue()); - } + return TConnectionType.getTConnectionType(val.intValue()); + } - public int toInt(TConnectionType conType) { + public int toInt(TConnectionType conType) { - if (conType.equals(WAN)) - return 0; - if (conType.equals(LAN)) - return 1; - return 2; - } + if (conType.equals(WAN)) + return 0; + if (conType.equals(LAN)) + return 1; + return 2; + } - public String toString() { + public String toString() { - return connectionType; - } + return connectionType; + } - public String getValue() { + public String getValue() { - return connectionType; - } + return connectionType; + } } diff --git a/src/main/java/it/grid/storm/srm/types/TFileLocality.java b/src/main/java/it/grid/storm/srm/types/TFileLocality.java index 884f67e71..de7bf0aa0 100644 --- a/src/main/java/it/grid/storm/srm/types/TFileLocality.java +++ b/src/main/java/it/grid/storm/srm/types/TFileLocality.java @@ -29,132 +29,124 @@ */ public class TFileLocality { - public static String PNAME_FILELOCALITY = "fileLocality"; - - public static final TFileLocality ONLINE = new TFileLocality("ONLINE"); - public static final TFileLocality NEARLINE = new TFileLocality("NEARLINE"); - public static final TFileLocality ONLINE_AND_NEARLINE = new TFileLocality( - "ONLINE_AND_NEARLINE"); - public static final TFileLocality LOST = new TFileLocality("LOST"); - public static final TFileLocality NONE = new TFileLocality("NONE"); - public static final TFileLocality UNAVAILABLE = new TFileLocality( - "UNAVAILABLE"); - public static final TFileLocality EMPTY = new TFileLocality(""); - - private String fileLocality = null; - - private TFileLocality(String fileLoc) { - - this.fileLocality = fileLoc; - } - - public String toString() { - - return fileLocality; - } - - public String getValue() { - - return fileLocality; - } - - /** - * Facility method to obtain a TFileStorageType object given its String - * representation. If an invalid String is supplied, then an EMPTY - * TFileStorageType is returned. - */ - public static TFileLocality getTFileLocality(String loc) { - - if (loc.toLowerCase().replaceAll(" ", "") - .equals(ONLINE.getValue().toLowerCase())) { - return ONLINE; - } else if (loc.toLowerCase().replaceAll(" ", "") - .equals(NEARLINE.getValue().toLowerCase())) { - return NEARLINE; - } else if (loc.toLowerCase().replaceAll(" ", "") - .equals(ONLINE_AND_NEARLINE.getValue().toLowerCase())) { - return ONLINE_AND_NEARLINE; - } else if (loc.toLowerCase().replaceAll(" ", "") - .equals(LOST.getValue().toLowerCase())) { - return LOST; - } else if (loc.toLowerCase().replaceAll(" ", "") - .equals(NONE.getValue().toLowerCase())) { - return NONE; - } else if (loc.toLowerCase().replaceAll(" ", "") - .equals(UNAVAILABLE.getValue().toLowerCase())) { - return UNAVAILABLE; - } else { - return EMPTY; - } - } - - /** - * Facility method to obtain a TFileStorageType object given its int - * representation. If an invalid String is supplied, then an EMPTY - * TFileStorageType is returned. - */ - public static TFileLocality getTFileLocality(int loc) { - - switch (loc) { - case 0: - return ONLINE; - case 1: - return NEARLINE; - case 2: - return ONLINE_AND_NEARLINE; - case 3: - return LOST; - case 4: - return NONE; - case 5: - return UNAVAILABLE; - default: - return EMPTY; - } - } - - /** - * Decode method, use to create a TFileLocaliy object from the information - * contained into a structure parametet received from FE. - * - * @param inputParam - * @param name - * @return - */ - - public static TFileLocality decode(Map inputParam, String name) { - - Integer fileLoc = (Integer) inputParam.get(name); - if (fileLoc != null) - return TFileLocality.getTFileLocality(fileLoc.intValue()); - else - return TFileLocality.EMPTY; - } - - /** - * Encode method use to create a structured paramter that represents this - * object, used for pass information to FE. - * - * @param param - * @param name - */ - public void encode(Map param, String name) { - - Integer value = null; - if (this.equals(TFileLocality.ONLINE)) - value = Integer.valueOf(0); - if (this.equals(TFileLocality.NEARLINE)) - value = Integer.valueOf(1); - if (this.equals(TFileLocality.ONLINE_AND_NEARLINE)) - value = Integer.valueOf(2); - if (this.equals(TFileLocality.LOST)) - value = Integer.valueOf(3); - if (this.equals(TFileLocality.NONE)) - value = Integer.valueOf(4); - if (this.equals(TFileLocality.UNAVAILABLE)) - value = Integer.valueOf(5); - param.put(name, value); - - } + public static String PNAME_FILELOCALITY = "fileLocality"; + + public static final TFileLocality ONLINE = new TFileLocality("ONLINE"); + public static final TFileLocality NEARLINE = new TFileLocality("NEARLINE"); + public static final TFileLocality ONLINE_AND_NEARLINE = new TFileLocality("ONLINE_AND_NEARLINE"); + public static final TFileLocality LOST = new TFileLocality("LOST"); + public static final TFileLocality NONE = new TFileLocality("NONE"); + public static final TFileLocality UNAVAILABLE = new TFileLocality("UNAVAILABLE"); + public static final TFileLocality EMPTY = new TFileLocality(""); + + private String fileLocality = null; + + private TFileLocality(String fileLoc) { + + this.fileLocality = fileLoc; + } + + public String toString() { + + return fileLocality; + } + + public String getValue() { + + return fileLocality; + } + + /** + * Facility method to obtain a TFileStorageType object given its String representation. If an + * invalid String is supplied, then an EMPTY TFileStorageType is returned. + */ + public static TFileLocality getTFileLocality(String loc) { + + if (loc.toLowerCase().replaceAll(" ", "").equals(ONLINE.getValue().toLowerCase())) { + return ONLINE; + } else if (loc.toLowerCase().replaceAll(" ", "").equals(NEARLINE.getValue().toLowerCase())) { + return NEARLINE; + } else if (loc.toLowerCase() + .replaceAll(" ", "") + .equals(ONLINE_AND_NEARLINE.getValue().toLowerCase())) { + return ONLINE_AND_NEARLINE; + } else if (loc.toLowerCase().replaceAll(" ", "").equals(LOST.getValue().toLowerCase())) { + return LOST; + } else if (loc.toLowerCase().replaceAll(" ", "").equals(NONE.getValue().toLowerCase())) { + return NONE; + } else if (loc.toLowerCase().replaceAll(" ", "").equals(UNAVAILABLE.getValue().toLowerCase())) { + return UNAVAILABLE; + } else { + return EMPTY; + } + } + + /** + * Facility method to obtain a TFileStorageType object given its int representation. If an invalid + * String is supplied, then an EMPTY TFileStorageType is returned. + */ + public static TFileLocality getTFileLocality(int loc) { + + switch (loc) { + case 0: + return ONLINE; + case 1: + return NEARLINE; + case 2: + return ONLINE_AND_NEARLINE; + case 3: + return LOST; + case 4: + return NONE; + case 5: + return UNAVAILABLE; + default: + return EMPTY; + } + } + + /** + * Decode method, use to create a TFileLocaliy object from the information contained into a + * structure parametet received from FE. + * + * @param inputParam + * @param name + * @return + */ + + public static TFileLocality decode(Map inputParam, String name) { + + Integer fileLoc = (Integer) inputParam.get(name); + if (fileLoc != null) + return TFileLocality.getTFileLocality(fileLoc.intValue()); + else + return TFileLocality.EMPTY; + } + + /** + * Encode method use to create a structured paramter that represents this object, used for pass + * information to FE. + * + * @param param + * @param name + */ + public void encode(Map param, String name) { + + Integer value = null; + if (this.equals(TFileLocality.ONLINE)) + value = Integer.valueOf(0); + if (this.equals(TFileLocality.NEARLINE)) + value = Integer.valueOf(1); + if (this.equals(TFileLocality.ONLINE_AND_NEARLINE)) + value = Integer.valueOf(2); + if (this.equals(TFileLocality.LOST)) + value = Integer.valueOf(3); + if (this.equals(TFileLocality.NONE)) + value = Integer.valueOf(4); + if (this.equals(TFileLocality.UNAVAILABLE)) + value = Integer.valueOf(5); + param.put(name, value); + + } } diff --git a/src/main/java/it/grid/storm/srm/types/TFileStorageType.java b/src/main/java/it/grid/storm/srm/types/TFileStorageType.java index 925f23ba2..613534fd0 100644 --- a/src/main/java/it/grid/storm/srm/types/TFileStorageType.java +++ b/src/main/java/it/grid/storm/srm/types/TFileStorageType.java @@ -19,6 +19,8 @@ import java.util.Map; +import it.grid.storm.config.model.v2.StorageType; + /** * This class represents the TFileStorageType of an Srm request. * @@ -29,105 +31,115 @@ */ public class TFileStorageType { - private String fileType = null; - public final static String PNAME_FILESTORAGETYPE = "fileStorageType"; - - public static final TFileStorageType VOLATILE = new TFileStorageType( - "Volatile"); - public static final TFileStorageType DURABLE = new TFileStorageType("Durable"); - public static final TFileStorageType PERMANENT = new TFileStorageType( - "Permanent"); - public static final TFileStorageType EMPTY = new TFileStorageType("Empty"); - - private TFileStorageType(String fileType) { - - this.fileType = fileType; - } - - public String toString() { - - return fileType; - } - - public String getValue() { - - return fileType; - } - - /** - * Facility method to obtain a TFileStorageType object given its String - * representation. If an invalid String is supplied, then an EMPTY - * TFileStorageType is returned. - */ - public static TFileStorageType getTFileStorageType(String type) { - - if (type.toLowerCase().trim().equals(VOLATILE.getValue().toLowerCase())) { - return VOLATILE; - } - if (type.toLowerCase().trim().equals(PERMANENT.getValue().toLowerCase())) { - return PERMANENT; - } - if (type.toLowerCase().trim().equals(DURABLE.getValue().toLowerCase())) { - return DURABLE; - } else { - return EMPTY; - } - } - - /** - * Facility method to obtain a TFileStorageType object given its String - * representation. If an invalid String is supplied, then an EMPTY - * TFileStorageType is returned. - */ - public static TFileStorageType getTFileStorageType(int type) { - - switch (type) { - case 0: - return VOLATILE; - case 1: - return DURABLE; - case 2: - return PERMANENT; - default: - return EMPTY; - } - } - - /** - * Decode method use to create a TFileStorageType object from the information - * contain into structured parameter receive from FE. - * - * @param inputParam - * @param name - * @return - */ - - public static TFileStorageType decode(Map inputParam, String name) { - - Integer fileType = (Integer) inputParam.get(name); - if (fileType != null) - return TFileStorageType.getTFileStorageType(fileType.intValue()); - else - return TFileStorageType.EMPTY; - } - - /** - * Encode method use to Create a structured paramter that rapresents this - * object, used for pass information to FE. - * - * @param param - * @param name - */ - public void encode(Map param, String name) { - - Integer value = null; - if (this.equals(TFileStorageType.VOLATILE)) - value = Integer.valueOf(0); - if (this.equals(TFileStorageType.DURABLE)) - value = Integer.valueOf(1); - if (this.equals(TFileStorageType.PERMANENT)) - value = Integer.valueOf(2); - param.put(name, value); - } + private String fileType = null; + public final static String PNAME_FILESTORAGETYPE = "fileStorageType"; + + public static final TFileStorageType VOLATILE = new TFileStorageType("Volatile"); + public static final TFileStorageType DURABLE = new TFileStorageType("Durable"); + public static final TFileStorageType PERMANENT = new TFileStorageType("Permanent"); + public static final TFileStorageType EMPTY = new TFileStorageType("Empty"); + + private TFileStorageType(String fileType) { + + this.fileType = fileType; + } + + public String toString() { + + return fileType; + } + + public String getValue() { + + return fileType; + } + + /** + * Facility method to obtain a TFileStorageType object given its String representation. If an + * invalid String is supplied, then an EMPTY TFileStorageType is returned. + */ + public static TFileStorageType getTFileStorageType(String type) { + + if (type.toLowerCase().trim().equals(VOLATILE.getValue().toLowerCase())) { + return VOLATILE; + } + if (type.toLowerCase().trim().equals(PERMANENT.getValue().toLowerCase())) { + return PERMANENT; + } + if (type.toLowerCase().trim().equals(DURABLE.getValue().toLowerCase())) { + return DURABLE; + } else { + return EMPTY; + } + } + + public static TFileStorageType getTFileStorageType(StorageType type) { + + switch (type) { + case V: + return VOLATILE; + case P: + return PERMANENT; + case D: + return DURABLE; + default: + return EMPTY; + } + } + + /** + * Facility method to obtain a TFileStorageType object given its String representation. If an + * invalid String is supplied, then an EMPTY TFileStorageType is returned. + */ + public static TFileStorageType getTFileStorageType(int type) { + + switch (type) { + case 0: + return VOLATILE; + case 1: + return DURABLE; + case 2: + return PERMANENT; + default: + return EMPTY; + } + } + + /** + * Decode method use to create a TFileStorageType object from the information contain into + * structured parameter receive from FE. + * + * @param inputParam + * @param name + * @return + */ + + public static TFileStorageType decode(Map inputParam, String name) { + + Integer fileType = (Integer) inputParam.get(name); + if (fileType != null) + return TFileStorageType.getTFileStorageType(fileType.intValue()); + else + return TFileStorageType.EMPTY; + } + + /** + * Encode method use to Create a structured parameter that represents this object, used for pass + * information to FE. + * + * @param param + * @param name + */ + public void encode(Map param, String name) { + + Integer value = null; + if (this.equals(TFileStorageType.VOLATILE)) + value = Integer.valueOf(0); + if (this.equals(TFileStorageType.DURABLE)) + value = Integer.valueOf(1); + if (this.equals(TFileStorageType.PERMANENT)) + value = Integer.valueOf(2); + param.put(name, value); + } } diff --git a/src/main/java/it/grid/storm/srm/types/TFileType.java b/src/main/java/it/grid/storm/srm/types/TFileType.java index 5af4eb6d0..f1da3de7b 100644 --- a/src/main/java/it/grid/storm/srm/types/TFileType.java +++ b/src/main/java/it/grid/storm/srm/types/TFileType.java @@ -30,55 +30,55 @@ public class TFileType { - private String fileType = null; - - public static String PNAME_TYPE = "type"; - public static final TFileType FILE = new TFileType("File"); - public static final TFileType DIRECTORY = new TFileType("Directory"); - public static final TFileType LINK = new TFileType("Link"); - - private TFileType(String fileType) { - - this.fileType = fileType; - } - - public String toString() { - - return fileType; - } - - public String getValue() { - - return fileType; - } - - public static TFileType getTFileType(String type) { - - if (type.equals(FILE.getValue())) - return FILE; - if (type.equals(DIRECTORY.getValue())) - return DIRECTORY; - if (type.equals(LINK.getValue())) - return LINK; - return null; - } - - /** - * Encode method use to represnts in a structured paramter this objects, for - * communication to FE component. - * - * @param param - * @param name - */ - public void encode(Map param, String name) { - - Integer value = null; - if (this.equals(TFileType.FILE)) - value = Integer.valueOf(0); - if (this.equals(TFileType.DIRECTORY)) - value = Integer.valueOf(1); - if (this.equals(TFileType.LINK)) - value = Integer.valueOf(2); - param.put(name, value); - } + private String fileType = null; + + public static String PNAME_TYPE = "type"; + public static final TFileType FILE = new TFileType("File"); + public static final TFileType DIRECTORY = new TFileType("Directory"); + public static final TFileType LINK = new TFileType("Link"); + + private TFileType(String fileType) { + + this.fileType = fileType; + } + + public String toString() { + + return fileType; + } + + public String getValue() { + + return fileType; + } + + public static TFileType getTFileType(String type) { + + if (type.equals(FILE.getValue())) + return FILE; + if (type.equals(DIRECTORY.getValue())) + return DIRECTORY; + if (type.equals(LINK.getValue())) + return LINK; + return null; + } + + /** + * Encode method use to represnts in a structured paramter this objects, for communication to FE + * component. + * + * @param param + * @param name + */ + public void encode(Map param, String name) { + + Integer value = null; + if (this.equals(TFileType.FILE)) + value = Integer.valueOf(0); + if (this.equals(TFileType.DIRECTORY)) + value = Integer.valueOf(1); + if (this.equals(TFileType.LINK)) + value = Integer.valueOf(2); + param.put(name, value); + } } diff --git a/src/main/java/it/grid/storm/srm/types/TGroupPermission.java b/src/main/java/it/grid/storm/srm/types/TGroupPermission.java index f52d4ea0d..463cda468 100644 --- a/src/main/java/it/grid/storm/srm/types/TGroupPermission.java +++ b/src/main/java/it/grid/storm/srm/types/TGroupPermission.java @@ -26,57 +26,58 @@ package it.grid.storm.srm.types; -import java.util.HashMap; import java.util.Map; +import com.google.common.collect.Maps; + public class TGroupPermission { - private TGroupID groupID; - private TPermissionMode permMode; + private TGroupID groupID; + private TPermissionMode permMode; - public static String PNAME_GROUPPERMISSION = "groupPermission"; + public static String PNAME_GROUPPERMISSION = "groupPermission"; - public TGroupPermission(TGroupID groupID, TPermissionMode permMode) { + public TGroupPermission(TGroupID groupID, TPermissionMode permMode) { - this.groupID = groupID; - this.permMode = permMode; - } + this.groupID = groupID; + this.permMode = permMode; + } - public TGroupID getGroupID() { + public TGroupID getGroupID() { - return groupID; - } + return groupID; + } - public TPermissionMode getPermissionMode() { + public TPermissionMode getPermissionMode() { - return permMode; - } + return permMode; + } - public static TGroupPermission makeDirectoryDefault() { + public static TGroupPermission makeDirectoryDefault() { - return new TGroupPermission(new TGroupID("undef"), TPermissionMode.NONE); - } + return new TGroupPermission(new TGroupID("undef"), TPermissionMode.NONE); + } - public static TGroupPermission makeFileDefault() { + public static TGroupPermission makeFileDefault() { - return new TGroupPermission(new TGroupID("undef"), TPermissionMode.NONE); - } + return new TGroupPermission(new TGroupID("undef"), TPermissionMode.NONE); + } - /** - * Encode method use to provide a represnetation of this object into a - * structures paramter for communication to FE component. - * - * @param param - * @param name - */ - public void encode(Map param, String name) { + /** + * Encode method use to provide a representation of this object into a structures parameter for + * communication to FE component. + * + * @param param + * @param name + */ + public void encode(Map param, String name) { - Map paramStructure = new HashMap(); - if ((groupID != null) && (permMode != null)) { - groupID.encode(paramStructure, TGroupID.NAME_GROUPID); - permMode.encode(paramStructure, TPermissionMode.PNAME_MODE); - param.put(name, paramStructure); - } - } + Map paramStructure = Maps.newHashMap(); + if ((groupID != null) && (permMode != null)) { + groupID.encode(paramStructure, TGroupID.NAME_GROUPID); + permMode.encode(paramStructure, TPermissionMode.PNAME_MODE); + param.put(name, paramStructure); + } + } } diff --git a/src/main/java/it/grid/storm/srm/types/TLifeTimeInSeconds.java b/src/main/java/it/grid/storm/srm/types/TLifeTimeInSeconds.java index 932aca937..9e0f6a3c1 100644 --- a/src/main/java/it/grid/storm/srm/types/TLifeTimeInSeconds.java +++ b/src/main/java/it/grid/storm/srm/types/TLifeTimeInSeconds.java @@ -34,245 +34,237 @@ public class TLifeTimeInSeconds implements Serializable { - /** - * - */ - private static final long serialVersionUID = -8025723621535456819L; - private long time = -1; - private TimeUnit u = TimeUnit.EMPTY; - private boolean empty = true; - private static TLifeTimeInSeconds emptyTime = null; - private boolean infinite = false; - private static TLifeTimeInSeconds infiniteTime = null; - - public static String PNAME_LIFETIMEASSIGNED = "lifetimeAssigned"; - public static String PNAME_LIFETIMELEFT = "lifetimeLeft"; - public static String PNAME_DESIREDLIFETIMEOFRESERVEDSPACE = "desiredLifetimeOfReservedSpace"; - public static String PNAME_LIFETIMEOFRESERVEDSPACE = "lifetimeOfReservedSpace"; - public static String PNAME_FILELIFETIME = "fileLifetime"; - public static String PNAME_PINLIFETIME = "pinLifetime"; - - /** - * This constructor requires a long time representing the time in TimeUnit u. - */ - private TLifeTimeInSeconds(long time, TimeUnit u, boolean empty, - boolean infinite) { - - this.time = time; - this.u = u; - this.empty = empty; - this.infinite = infinite; - } - - /** - * Method that requires a long time representing the time in TimeUnit u; it - * throws an InvalidTLifeTimeAttributeException if u is null. A negative value - * for time, automatically results in an Infinite TLifeTimeInSeconds. - */ - public static TLifeTimeInSeconds make(long time, TimeUnit unit) - throws IllegalArgumentException { - - if (unit == null) { - throw new IllegalArgumentException( - "Unable to create the object, illegal arguments: time=" + time - + " unit=" + unit); - } - if (time < 0) { - return makeInfinite(); - } - return new TLifeTimeInSeconds(time, unit, false, false); - } - - /** - * Method that returns an Empty TLifeTimeInSeconds. - */ - public static TLifeTimeInSeconds makeEmpty() { - - if (emptyTime == null) - emptyTime = new TLifeTimeInSeconds(0, TimeUnit.EMPTY, true, false); - return emptyTime; - } - - /** - * Method that returns an Infinite TLifeTimeInSeconds. - */ - public static TLifeTimeInSeconds makeInfinite() { - - if (infiniteTime == null) - infiniteTime = new TLifeTimeInSeconds(-1, TimeUnit.EMPTY, false, true); - return infiniteTime; - } - - /** - * Method that returns true if this is an Empty TLifeTimeInSeconds. - */ - public boolean isEmpty() { - - return empty; - } - - /** - * Method that returns true if This is an Infinite TLifeTimeInSeconds. - */ - public boolean isInfinite() { - - return infinite; - } - - /** - * Method that returns a long value for this LifeTime. It returns -1 if This - * is an Empty or Infinite TLifeTimeInSeconds. - */ - public long value() { - - return time; - } - - /** - * Method that returns the TimeUnit for this LifeTime. It returns - * TimeUnit.EMPTY if This is an Empty or Infintie TLifeTimeInSeconds. - */ - public TimeUnit unit() { - - return u; - } - - /** - * Public static method that return this LifeTime value converted into the - * specified TimeUnit. It returns -1 if a null TimeUnit is passed, or if This - * is an Empty or Infinite TLifeTimeInSeconds. - */ - public double getTimeIn(TimeUnit u) { - - if ((!empty) && (!infinite) && (u != null)) { - Long l_time = Long.valueOf(time); - double result = l_time.doubleValue() - * (this.u.conversionFactor() / u.conversionFactor()); - return result; - } else - return -1; - } - - /** - * Returns the number of seconds remaining to reach startTimeInSeconds plus - * the value of this instance. - * - * @param startTimeInSeconds - * The starting time in seconds. - * @return Seconds remaining, zero otherwise. - */ - public TLifeTimeInSeconds timeLeft(long startTimeInSeconds) { - - if (empty) - return emptyTime; - long secondsLeft = this.time + startTimeInSeconds; - - Date currentDate = new Date(); - long currentTime = currentDate.getTime(); // current time in milliseconds - currentTime /= 1000; // current time in seconds - secondsLeft -= currentTime; - if (secondsLeft < 0) - secondsLeft = 0; - - TLifeTimeInSeconds timeLeft = null; - try { - timeLeft = TLifeTimeInSeconds.make(secondsLeft, TimeUnit.SECONDS); - } catch (IllegalArgumentException e) { - timeLeft = TLifeTimeInSeconds.makeEmpty(); - } - - return timeLeft; - } - - /** - * Returns the number of seconds remaining to reach startingDate plus the - * value of this instance. - * - * @param startingDate - * The starting date. - * @return Seconds remaining, zero otherwise. - */ - public TLifeTimeInSeconds timeLeft(Date startingDate) { - - if (empty || (startingDate == null)) - return emptyTime; - long startTimeInSeconds = startingDate.getTime() / 1000; - return timeLeft(startTimeInSeconds); - } - - /** - * Method that returns a TSizeInBytes object retrieving its value by the - * Hashtable used for communicating with the FE - */ - public static TLifeTimeInSeconds decode(Map inputParam, String fieldName) { - - String lifetime = (String) inputParam.get(fieldName); - - if (lifetime == null) - return TLifeTimeInSeconds.makeEmpty(); - long lifetimeLong = Long.parseLong(lifetime); - - try { - return TLifeTimeInSeconds.make(lifetimeLong, TimeUnit.SECONDS); - } catch (IllegalArgumentException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - return TLifeTimeInSeconds.makeEmpty(); - } - } - - /** - * Encode method, create a representation of this object into a structured - * paramter used for communication to FE component. - * - * @param param - * @param name - */ - public void encode(Map param, String name) { - - if (empty) - return; - String lifetimeString; - lifetimeString = String.valueOf(this.time); - param.put(name, lifetimeString); - } - - public String toString() { - - if (empty) - return "Empty TLifeTimeInSeconds!"; - if (infinite) - return "Infinite TLifeTimeInSeconds"; - return "" + time + " " + u; - } - - /** - * Beware that this equality will _not_ return true for the same quantity - * expressed in different units of measure! - */ - public boolean equals(Object o) { - - if (o == this) - return true; - if (!(o instanceof TLifeTimeInSeconds)) - return false; - TLifeTimeInSeconds et = (TLifeTimeInSeconds) o; - if ((this.empty) && (et.empty)) - return true; - if ((this.infinite) && (et.infinite)) - return true; - return ((this.time == et.time) && (this.u == et.u) && (this.empty == et.empty)); - } - - public int hashCode() { - - if (empty) - return -1; - if (infinite) - return -2; - int hash = 17; - hash = 37 * hash + Long.valueOf(time).hashCode(); - hash = 37 * hash + u.hashCode(); - return hash; - } + /** + * + */ + private static final long serialVersionUID = -8025723621535456819L; + private long time = -1; + private TimeUnit u = TimeUnit.EMPTY; + private boolean empty = true; + private static TLifeTimeInSeconds emptyTime = null; + private boolean infinite = false; + private static TLifeTimeInSeconds infiniteTime = null; + + public static String PNAME_LIFETIMEASSIGNED = "lifetimeAssigned"; + public static String PNAME_LIFETIMELEFT = "lifetimeLeft"; + public static String PNAME_DESIREDLIFETIMEOFRESERVEDSPACE = "desiredLifetimeOfReservedSpace"; + public static String PNAME_LIFETIMEOFRESERVEDSPACE = "lifetimeOfReservedSpace"; + public static String PNAME_FILELIFETIME = "fileLifetime"; + public static String PNAME_PINLIFETIME = "pinLifetime"; + + /** + * This constructor requires a long time representing the time in TimeUnit u. + */ + private TLifeTimeInSeconds(long time, TimeUnit u, boolean empty, boolean infinite) { + + this.time = time; + this.u = u; + this.empty = empty; + this.infinite = infinite; + } + + /** + * Method that requires a long time representing the time in TimeUnit u; it throws an + * InvalidTLifeTimeAttributeException if u is null. A negative value for time, automatically + * results in an Infinite TLifeTimeInSeconds. + */ + public static TLifeTimeInSeconds make(long time, TimeUnit unit) throws IllegalArgumentException { + + if (unit == null) { + throw new IllegalArgumentException( + "Unable to create the object, illegal arguments: time=" + time + " unit=" + unit); + } + if (time < 0) { + return makeInfinite(); + } + return new TLifeTimeInSeconds(time, unit, false, false); + } + + /** + * Method that returns an Empty TLifeTimeInSeconds. + */ + public static TLifeTimeInSeconds makeEmpty() { + + if (emptyTime == null) + emptyTime = new TLifeTimeInSeconds(0, TimeUnit.EMPTY, true, false); + return emptyTime; + } + + /** + * Method that returns an Infinite TLifeTimeInSeconds. + */ + public static TLifeTimeInSeconds makeInfinite() { + + if (infiniteTime == null) + infiniteTime = new TLifeTimeInSeconds(-1, TimeUnit.EMPTY, false, true); + return infiniteTime; + } + + /** + * Method that returns true if this is an Empty TLifeTimeInSeconds. + */ + public boolean isEmpty() { + + return empty; + } + + /** + * Method that returns true if This is an Infinite TLifeTimeInSeconds. + */ + public boolean isInfinite() { + + return infinite; + } + + /** + * Method that returns a long value for this LifeTime. It returns -1 if This is an Empty or + * Infinite TLifeTimeInSeconds. + */ + public long value() { + + return time; + } + + /** + * Method that returns the TimeUnit for this LifeTime. It returns TimeUnit.EMPTY if This is an + * Empty or Infintie TLifeTimeInSeconds. + */ + public TimeUnit unit() { + + return u; + } + + /** + * Public static method that return this LifeTime value converted into the specified TimeUnit. It + * returns -1 if a null TimeUnit is passed, or if This is an Empty or Infinite TLifeTimeInSeconds. + */ + public double getTimeIn(TimeUnit u) { + + if ((!empty) && (!infinite) && (u != null)) { + Long l_time = Long.valueOf(time); + double result = l_time.doubleValue() * (this.u.conversionFactor() / u.conversionFactor()); + return result; + } else + return -1; + } + + /** + * Returns the number of seconds remaining to reach startTimeInSeconds plus the value of this + * instance. + * + * @param startTimeInSeconds The starting time in seconds. + * @return Seconds remaining, zero otherwise. + */ + public TLifeTimeInSeconds timeLeft(long startTimeInSeconds) { + + if (empty) + return emptyTime; + long secondsLeft = this.time + startTimeInSeconds; + + Date currentDate = new Date(); + long currentTime = currentDate.getTime(); // current time in milliseconds + currentTime /= 1000; // current time in seconds + secondsLeft -= currentTime; + if (secondsLeft < 0) + secondsLeft = 0; + + TLifeTimeInSeconds timeLeft = null; + try { + timeLeft = TLifeTimeInSeconds.make(secondsLeft, TimeUnit.SECONDS); + } catch (IllegalArgumentException e) { + timeLeft = TLifeTimeInSeconds.makeEmpty(); + } + + return timeLeft; + } + + /** + * Returns the number of seconds remaining to reach startingDate plus the value of this instance. + * + * @param startingDate The starting date. + * @return Seconds remaining, zero otherwise. + */ + public TLifeTimeInSeconds timeLeft(Date startingDate) { + + if (empty || (startingDate == null)) + return emptyTime; + long startTimeInSeconds = startingDate.getTime() / 1000; + return timeLeft(startTimeInSeconds); + } + + /** + * Method that returns a TSizeInBytes object retrieving its value by the Hashtable used for + * communicating with the FE + */ + public static TLifeTimeInSeconds decode(Map inputParam, String fieldName) { + + String lifetime = (String) inputParam.get(fieldName); + + if (lifetime == null) + return TLifeTimeInSeconds.makeEmpty(); + long lifetimeLong = Long.parseLong(lifetime); + + try { + return TLifeTimeInSeconds.make(lifetimeLong, TimeUnit.SECONDS); + } catch (IllegalArgumentException e) { + e.printStackTrace(); + return TLifeTimeInSeconds.makeEmpty(); + } + } + + /** + * Encode method, create a representation of this object into a structured paramter used for + * communication to FE component. + * + * @param param + * @param name + */ + public void encode(Map param, String name) { + + if (empty) { + return; + } + String lifetimeString; + lifetimeString = String.valueOf(this.time); + param.put(name, lifetimeString); + } + + public String toString() { + + if (empty) + return "Empty TLifeTimeInSeconds!"; + if (infinite) + return "Infinite TLifeTimeInSeconds"; + return "" + time + " " + u; + } + + /** + * Beware that this equality will _not_ return true for the same quantity expressed in different + * units of measure! + */ + public boolean equals(Object o) { + + if (o == this) + return true; + if (!(o instanceof TLifeTimeInSeconds)) + return false; + TLifeTimeInSeconds et = (TLifeTimeInSeconds) o; + if ((this.empty) && (et.empty)) + return true; + if ((this.infinite) && (et.infinite)) + return true; + return ((this.time == et.time) && (this.u == et.u) && (this.empty == et.empty)); + } + + public int hashCode() { + + if (empty) + return -1; + if (infinite) + return -2; + int hash = 17; + hash = 37 * hash + Long.valueOf(time).hashCode(); + hash = 37 * hash + u.hashCode(); + return hash; + } } diff --git a/src/main/java/it/grid/storm/srm/types/TMetaDataPathDetail.java b/src/main/java/it/grid/storm/srm/types/TMetaDataPathDetail.java index 28b0fc615..537f247b2 100644 --- a/src/main/java/it/grid/storm/srm/types/TMetaDataPathDetail.java +++ b/src/main/java/it/grid/storm/srm/types/TMetaDataPathDetail.java @@ -33,454 +33,444 @@ package it.grid.storm.srm.types; -import it.grid.storm.common.types.StFN; - import java.text.SimpleDateFormat; import java.util.Date; -import java.util.HashMap; import java.util.List; import java.util.Map; +import com.google.common.collect.Maps; + +import it.grid.storm.common.types.StFN; + public class TMetaDataPathDetail { - private static final SimpleDateFormat dateFormat = new SimpleDateFormat( - "yyyy-MM-dd'T'HH:mm:ss"); - - private TSURL surl = null; - // Change in new srm 2.2 - private StFN stfn = null; - private TReturnStatus retStatus = null; - private TSizeInBytes size = null; - private Date createdAtTime = null; - private Date lastModificationAtTime = null; - private TFileStorageType fileStorageType = null; - private TRetentionPolicyInfo retentionPolicyInfo = null; - private TFileLocality fileLocality = null; - private ArrayOfTSpaceToken tokenArray = null; - private TFileType type = null; - private TLifeTimeInSeconds lifetimeAssigned = null; - private TLifeTimeInSeconds lifetimeLeft = null; - private TUserPermission ownerPermission = null; - private TGroupPermission groupPermission = null; - private TPermissionMode otherPermission = null; - private TCheckSumType checkSumType = null; - private TCheckSumValue checkSumValue = null; - private ArrayOfTMetaDataPathDetail arrayOfSubPaths = null; - - public TMetaDataPathDetail() { - - } - - /** - * Method that return Surl - */ - public TSURL getSurl() { - - return surl; - } - - /** - * Method that set Surl - */ - public void setSurl(TSURL surl) { - - this.surl = surl; - } - - /** - * Method that return StFN - */ - public StFN getStFN() { - - return stfn; - } - - /** - * Method that set StFN - */ - public void setStFN(StFN stfn) { - - this.stfn = stfn; - } - - /** - * Method that return Status - */ - public TReturnStatus getStatus() { - - return retStatus; - } - - /** - * Method that set Status. - */ - public void setStatus(TReturnStatus status) { - - this.retStatus = status; - } - - /** - * Method that Return Size - */ - public TSizeInBytes getSize() { - - return size; - } - - /** - * Method that set Size - */ - public void setSize(TSizeInBytes size) { - - this.size = size; - } - - /** - * Method that get LastModificationAtTime Value; - */ - public Date getModificationTime() { - - return lastModificationAtTime; - } - - /** - * Method that set CreatedAtTime Value - */ - public void setModificationTime(Date lastModificationAtTime) { - - this.lastModificationAtTime = lastModificationAtTime; - } - - /** - * Method that get CreatedAtTime Value; - */ - public Date getCreationTime() { - - return createdAtTime; - } - - /** - * Method that set CreatedAtTime Value - */ - public void setCreationTime(Date createdAtTime) { + private static final SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss"); + + private TSURL surl = null; + // Change in new srm 2.2 + private StFN stfn = null; + private TReturnStatus retStatus = null; + private TSizeInBytes size = null; + private Date createdAtTime = null; + private Date lastModificationAtTime = null; + private TFileStorageType fileStorageType = null; + private TRetentionPolicyInfo retentionPolicyInfo = null; + private TFileLocality fileLocality = null; + private ArrayOfTSpaceToken tokenArray = null; + private TFileType type = null; + private TLifeTimeInSeconds lifetimeAssigned = null; + private TLifeTimeInSeconds lifetimeLeft = null; + private TUserPermission ownerPermission = null; + private TGroupPermission groupPermission = null; + private TPermissionMode otherPermission = null; + private TCheckSumType checkSumType = null; + private TCheckSumValue checkSumValue = null; + private ArrayOfTMetaDataPathDetail arrayOfSubPaths = null; + + public TMetaDataPathDetail() { + + } + + /** + * Method that return Surl + */ + public TSURL getSurl() { + + return surl; + } + + /** + * Method that set Surl + */ + public void setSurl(TSURL surl) { + + this.surl = surl; + } + + /** + * Method that return StFN + */ + public StFN getStFN() { + + return stfn; + } + + /** + * Method that set StFN + */ + public void setStFN(StFN stfn) { + + this.stfn = stfn; + } + + /** + * Method that return Status + */ + public TReturnStatus getStatus() { + + return retStatus; + } + + /** + * Method that set Status. + */ + public void setStatus(TReturnStatus status) { + + this.retStatus = status; + } + + /** + * Method that Return Size + */ + public TSizeInBytes getSize() { + + return size; + } + + /** + * Method that set Size + */ + public void setSize(TSizeInBytes size) { + + this.size = size; + } + + /** + * Method that get LastModificationAtTime Value; + */ + public Date getModificationTime() { + + return lastModificationAtTime; + } + + /** + * Method that set CreatedAtTime Value + */ + public void setModificationTime(Date lastModificationAtTime) { + + this.lastModificationAtTime = lastModificationAtTime; + } + + /** + * Method that get CreatedAtTime Value; + */ + public Date getCreationTime() { + + return createdAtTime; + } + + /** + * Method that set CreatedAtTime Value + */ + public void setCreationTime(Date createdAtTime) { + + this.createdAtTime = createdAtTime; + } - this.createdAtTime = createdAtTime; - } + /** + * Method that return TFileStorageType; + */ + public TFileStorageType getFileStorageType() { - /** - * Method that return TFileStorageType; - */ - public TFileStorageType getFileStorageType() { + return fileStorageType; + } - return fileStorageType; - } + /** + * Method that set TFileStorageType + */ + public void setTFileStorageType(TFileStorageType type) { - /** - * Method that set TFileStorageType - */ - public void setTFileStorageType(TFileStorageType type) { + this.fileStorageType = type; + } - this.fileStorageType = type; - } + /** + * Method that return TRetentionPolicyInfo; + */ + public TRetentionPolicyInfo getTRetentionPolicyInfo() { - /** - * Method that return TRetentionPolicyInfo; - */ - public TRetentionPolicyInfo getTRetentionPolicyInfo() { + return this.retentionPolicyInfo; + } - return this.retentionPolicyInfo; - } + /** + * Method that set TRetentionPolicyInfo + */ + public void setTRetentionPolicyInfo(TRetentionPolicyInfo info) { - /** - * Method that set TRetentionPolicyInfo - */ - public void setTRetentionPolicyInfo(TRetentionPolicyInfo info) { + this.retentionPolicyInfo = info; + } - this.retentionPolicyInfo = info; - } + /** + * Method that return TFileLocality; + */ + public TFileLocality getTFileLocality() { - /** - * Method that return TFileLocality; - */ - public TFileLocality getTFileLocality() { + return this.fileLocality; + } - return this.fileLocality; - } + /** + * Method that set TFileLocality + */ + public void setTFileLocality(TFileLocality loc) { - /** - * Method that set TFileLocality - */ - public void setTFileLocality(TFileLocality loc) { + this.fileLocality = loc; + } - this.fileLocality = loc; - } + /** + * Method that return array of TSpaceToken; + */ + public ArrayOfTSpaceToken getArrayOfTSpaceToken() { - /** - * Method that return array of TSpaceToken; - */ - public ArrayOfTSpaceToken getArrayOfTSpaceToken() { + return this.tokenArray; + } - return this.tokenArray; - } + /** + * Method that set array of TSpaceToken + */ + public void setArrayOfTSpaceToken(ArrayOfTSpaceToken tokenArray) { - /** - * Method that set array of TSpaceToken - */ - public void setArrayOfTSpaceToken(ArrayOfTSpaceToken tokenArray) { + this.tokenArray = tokenArray; + } - this.tokenArray = tokenArray; - } + /** + * Method that return TFileType; + */ + public TFileType getFileType() { - /** - * Method that return TFileType; - */ - public TFileType getFileType() { + return type; + } - return type; - } + /** + * Method that set TFileType + */ + public void setFileType(TFileType type) { - /** - * Method that set TFileType - */ - public void setFileType(TFileType type) { + this.type = type; + } - this.type = type; - } + /** + * Method that GET lifetime assigned; + */ + public TLifeTimeInSeconds getLifetimeAssigned() { - /** - * Method that GET lifetime assigned; - */ - public TLifeTimeInSeconds getLifetimeAssigned() { + return lifetimeAssigned; + } - return lifetimeAssigned; - } + /** + * Method that set lifetime assigned + */ + public void setLifeTimeAssigned(TLifeTimeInSeconds lifetime) { - /** - * Method that set lifetime assigned - */ - public void setLifeTimeAssigned(TLifeTimeInSeconds lifetime) { + this.lifetimeAssigned = lifetime; + } - this.lifetimeAssigned = lifetime; - } + /** + * Method that GET lifetime LEFT; + */ - /** - * Method that GET lifetime LEFT; - */ + public TLifeTimeInSeconds getLifetimeLeft() { - public TLifeTimeInSeconds getLifetimeLeft() { + return lifetimeLeft; + } - return lifetimeLeft; - } + /** + * Method that set lifetime Left + */ + public void setLifetimeLeft(TLifeTimeInSeconds lifetime) { - /** - * Method that set lifetime Left - */ - public void setLifetimeLeft(TLifeTimeInSeconds lifetime) { + this.lifetimeLeft = lifetime; + } - this.lifetimeLeft = lifetime; - } + /** + * Method that set OwnerPermission + */ + public void setOwnerPermission(TUserPermission ownerPermission) { + + this.ownerPermission = ownerPermission; + } - /** - * Method that set OwnerPermission - */ - public void setOwnerPermission(TUserPermission ownerPermission) { - - this.ownerPermission = ownerPermission; - } - - /** - * Method that return OwnerPermission; - */ - public TUserPermission getOwnerPermission() { - - return ownerPermission; - } - - /** - * Method that set TGroupPermissionArray - */ - public void setGroupPermission(TGroupPermission groupP) { - - this.groupPermission = groupP; - } - - /** - * Method that get TGroupPermissionArray - */ - public TGroupPermission getGroupPermission() { - - return groupPermission; - } - - /** - * Method that set otherPermission - */ - public void setOtherPermission(TPermissionMode otherP) { - - this.otherPermission = otherP; - } - - /** - * Method that get otherPermission - */ - public TPermissionMode getUserPermissionArray() { - - return otherPermission; - } - - /** - * - * @param checkSumType - */ - public void setCheckSumType(TCheckSumType checkSumType) { - - this.checkSumType = checkSumType; - } - - /** - * Method that get CHECKSUMTYPE - */ - public TCheckSumType getCheckSumType() { - - return checkSumType; - } - - /** - * - * @param checkSumValue - */ - public void setCheckSumValue(TCheckSumValue checkSumValue) { - - this.checkSumValue = checkSumValue; - } - - /** - * Method that get CHECKSUMVALUE - */ - public TCheckSumValue getCheckSumValue() { - - return checkSumValue; - } - - /** - * Method that get TMetaDataPathDetails - * - * @TODO - */ - public ArrayOfTMetaDataPathDetail getArrayOfSubPaths() { - - return arrayOfSubPaths; - } - - public void setArrayOfSubPaths(ArrayOfTMetaDataPathDetail array) { - - arrayOfSubPaths = array; - } - - /** - * Encode method, used to encode a TMetaDataPathDetail object into a - * structured paramter (Hashtable), used for communicate to the FE component - * thourgh xmlrpc. - * - * @param param - * Hashtable that must contain structures results - * @param name - * name for the TMetaData field - */ - public void encode(List list) { - - Map param = new HashMap(); - - /* (1) StFN */ - if (this.stfn != null) { - this.stfn.encode(param, StFN.PNAME_PATH); - } - /* (2) TReturnStatus */ - if (this.retStatus != null) { - this.retStatus.encode(param, TReturnStatus.PNAME_STATUS); - } - /* (3) Size */ - if (this.size != null) { - this.size.encode(param, TSizeInBytes.PNAME_SIZE); - } - /* (4) createdAtTime */ - if (this.createdAtTime != null) { - param.put("createdAtTime", dateFormat.format(createdAtTime)); - } - /* (5) lastModificationTime */ - if (this.lastModificationAtTime != null) { - // param.put("lastModificationTime", lastModificationAtTime.toString()); - param.put("lastModificationTime", - dateFormat.format(lastModificationAtTime)); - } - /* (6) fileStorageType */ - if (this.fileStorageType != null) { - this.fileStorageType - .encode(param, TFileStorageType.PNAME_FILESTORAGETYPE); - } - /* (7) TRetentionPolicyInfo */ - if (this.retentionPolicyInfo != null) { - this.retentionPolicyInfo.encode(param, - TRetentionPolicyInfo.PNAME_retentionPolicyInfo); - } - /* (8) fileLocality */ - if (this.fileLocality != null) { - this.fileLocality.encode(param, TFileLocality.PNAME_FILELOCALITY); - } - /* (9) ArrayOfTSpaceToken */ - if (this.tokenArray != null) { - this.tokenArray - .encode(param, ArrayOfTSpaceToken.PNAME_ARRAYOFSPACETOKENS); - } - /* (10) TFileType */ - if (this.type != null) { - this.type.encode(param, TFileType.PNAME_TYPE); - } - /* (11) lifeTimeAssigned */ - if (this.lifetimeAssigned != null) { - this.lifetimeAssigned.encode(param, - TLifeTimeInSeconds.PNAME_LIFETIMEASSIGNED); - } - /* (12) lifeTimeLeft */ - if (this.lifetimeLeft != null) { - this.lifetimeLeft.encode(param, TLifeTimeInSeconds.PNAME_LIFETIMELEFT); - } - /* (13) TUserPermission ownerPermission */ - if (this.ownerPermission != null) { - this.ownerPermission.encode(param, TUserPermission.PNAME_OWNERPERMISSION); - } - /* (14) TGroupPermission groupPermission */ - if (this.groupPermission != null) { - this.groupPermission - .encode(param, TGroupPermission.PNAME_GROUPPERMISSION); - } - /* (15) TPermissionMode otherPermission */ - if (this.otherPermission != null) { - this.otherPermission.encode(param, TPermissionMode.PNAME_OTHERPERMISSION); - } - /* (16) TCheckSumType */ - if (this.checkSumType != null) { - this.checkSumType.encode(param, TCheckSumType.PNAME_CHECKSUMTYPE); - } - /* (17) TCheckSumValue */ - if (this.checkSumValue != null) { - this.checkSumValue.encode(param, TCheckSumValue.PNAME_CHECKSUMVALUE); - } - /* (18) ArrayOfTMetaDataPathDetails arrayOfSubPaths */ - if (this.arrayOfSubPaths != null) { - this.arrayOfSubPaths.encode(param, - ArrayOfTMetaDataPathDetail.PNAME_ARRAYOFSUBPATHS); - } - - // Add Hastable to global vector - list.add(param); - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append(stfn.toString() + "\n"); - return sb.toString(); - } + /** + * Method that return OwnerPermission; + */ + public TUserPermission getOwnerPermission() { + + return ownerPermission; + } + + /** + * Method that set TGroupPermissionArray + */ + public void setGroupPermission(TGroupPermission groupP) { + + this.groupPermission = groupP; + } + + /** + * Method that get TGroupPermissionArray + */ + public TGroupPermission getGroupPermission() { + + return groupPermission; + } + + /** + * Method that set otherPermission + */ + public void setOtherPermission(TPermissionMode otherP) { + + this.otherPermission = otherP; + } + + /** + * Method that get otherPermission + */ + public TPermissionMode getUserPermissionArray() { + + return otherPermission; + } + + /** + * + * @param checkSumType + */ + public void setCheckSumType(TCheckSumType checkSumType) { + + this.checkSumType = checkSumType; + } + + /** + * Method that get CHECKSUMTYPE + */ + public TCheckSumType getCheckSumType() { + + return checkSumType; + } + + /** + * + * @param checkSumValue + */ + public void setCheckSumValue(TCheckSumValue checkSumValue) { + + this.checkSumValue = checkSumValue; + } + + /** + * Method that get CHECKSUMVALUE + */ + public TCheckSumValue getCheckSumValue() { + + return checkSumValue; + } + + /** + * Method that get TMetaDataPathDetails + * + * @TODO + */ + public ArrayOfTMetaDataPathDetail getArrayOfSubPaths() { + + return arrayOfSubPaths; + } + + public void setArrayOfSubPaths(ArrayOfTMetaDataPathDetail array) { + + arrayOfSubPaths = array; + } + + /** + * Encode method, used to encode a TMetaDataPathDetail object into a structured paramter + * (Hashtable), used for communicate to the FE component thourgh xmlrpc. + * + * @param param Hashtable that must contain structures results + * @param name name for the TMetaData field + */ + public void encode(List list) { + + Map param = Maps.newHashMap(); + + /* (1) StFN */ + if (this.stfn != null) { + this.stfn.encode(param, StFN.PNAME_PATH); + } + /* (2) TReturnStatus */ + if (this.retStatus != null) { + this.retStatus.encode(param, TReturnStatus.PNAME_STATUS); + } + /* (3) Size */ + if (this.size != null) { + this.size.encode(param, TSizeInBytes.PNAME_SIZE); + } + /* (4) createdAtTime */ + if (this.createdAtTime != null) { + param.put("createdAtTime", dateFormat.format(createdAtTime)); + } + /* (5) lastModificationTime */ + if (this.lastModificationAtTime != null) { + // param.put("lastModificationTime", lastModificationAtTime.toString()); + param.put("lastModificationTime", dateFormat.format(lastModificationAtTime)); + } + /* (6) fileStorageType */ + if (this.fileStorageType != null) { + this.fileStorageType.encode(param, TFileStorageType.PNAME_FILESTORAGETYPE); + } + /* (7) TRetentionPolicyInfo */ + if (this.retentionPolicyInfo != null) { + this.retentionPolicyInfo.encode(param, TRetentionPolicyInfo.PNAME_retentionPolicyInfo); + } + /* (8) fileLocality */ + if (this.fileLocality != null) { + this.fileLocality.encode(param, TFileLocality.PNAME_FILELOCALITY); + } + /* (9) ArrayOfTSpaceToken */ + if (this.tokenArray != null) { + this.tokenArray.encode(param, ArrayOfTSpaceToken.PNAME_ARRAYOFSPACETOKENS); + } + /* (10) TFileType */ + if (this.type != null) { + this.type.encode(param, TFileType.PNAME_TYPE); + } + /* (11) lifeTimeAssigned */ + if (this.lifetimeAssigned != null) { + this.lifetimeAssigned.encode(param, TLifeTimeInSeconds.PNAME_LIFETIMEASSIGNED); + } + /* (12) lifeTimeLeft */ + if (this.lifetimeLeft != null) { + this.lifetimeLeft.encode(param, TLifeTimeInSeconds.PNAME_LIFETIMELEFT); + } + /* (13) TUserPermission ownerPermission */ + if (this.ownerPermission != null) { + this.ownerPermission.encode(param, TUserPermission.PNAME_OWNERPERMISSION); + } + /* (14) TGroupPermission groupPermission */ + if (this.groupPermission != null) { + this.groupPermission.encode(param, TGroupPermission.PNAME_GROUPPERMISSION); + } + /* (15) TPermissionMode otherPermission */ + if (this.otherPermission != null) { + this.otherPermission.encode(param, TPermissionMode.PNAME_OTHERPERMISSION); + } + /* (16) TCheckSumType */ + if (this.checkSumType != null) { + this.checkSumType.encode(param, TCheckSumType.PNAME_CHECKSUMTYPE); + } + /* (17) TCheckSumValue */ + if (this.checkSumValue != null) { + this.checkSumValue.encode(param, TCheckSumValue.PNAME_CHECKSUMVALUE); + } + /* (18) ArrayOfTMetaDataPathDetails arrayOfSubPaths */ + if (this.arrayOfSubPaths != null) { + this.arrayOfSubPaths.encode(param, ArrayOfTMetaDataPathDetail.PNAME_ARRAYOFSUBPATHS); + } + + // Add Hastable to global vector + list.add(param); + } + + @Override + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append(stfn.toString() + "\n"); + return sb.toString(); + } } diff --git a/src/main/java/it/grid/storm/srm/types/TMetaDataSpace.java b/src/main/java/it/grid/storm/srm/types/TMetaDataSpace.java index 9c08ed462..bd5fe54ac 100644 --- a/src/main/java/it/grid/storm/srm/types/TMetaDataSpace.java +++ b/src/main/java/it/grid/storm/srm/types/TMetaDataSpace.java @@ -29,325 +29,312 @@ package it.grid.storm.srm.types; -import it.grid.storm.space.SpaceHelper; -import it.grid.storm.space.StorageSpaceData; - import java.io.Serializable; -import java.util.HashMap; import java.util.Map; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.common.collect.Maps; + +import it.grid.storm.space.SpaceHelper; +import it.grid.storm.space.StorageSpaceData; + public class TMetaDataSpace implements Serializable { - private TSpaceType spaceType; - private TReturnStatus status = null; - private TSpaceToken spaceToken; - private TRetentionPolicyInfo retentionPolicyInfo; - private TUserID owner; - private TSizeInBytes totalSize; - private TSizeInBytes guaranteedSize; - private TSizeInBytes unusedSize; - private TLifeTimeInSeconds lifetimeAssigned = null; - private TLifeTimeInSeconds lifetimeLeft = null; - - private static final Logger log = LoggerFactory - .getLogger(TMetaDataSpace.class); - - public TMetaDataSpace() { - - this.spaceType = TSpaceType.EMPTY; - this.status = null; - this.spaceToken = TSpaceToken.makeEmpty(); - this.retentionPolicyInfo = null; - this.owner = TUserID.makeEmpty(); - this.totalSize = TSizeInBytes.makeEmpty(); - this.guaranteedSize = TSizeInBytes.makeEmpty(); - this.unusedSize = TSizeInBytes.makeEmpty(); - this.lifetimeAssigned = TLifeTimeInSeconds.makeEmpty(); - this.lifetimeLeft = TLifeTimeInSeconds.makeEmpty(); - } - - /** - * Constructor - * - * @param spaceType - * TSpaceType - * @param spaceToken - * TSpaceToken - * @param status - * TReturnStatus - * @param user - * TUserID - * @param totalSize - * TSizeInBytes - * @param guaranteedSize - * TSizeInBytes - * @param unusedSize - * TSizeInBytes - * @param lifetimeAssigned - * TLifeTimeInSeconds - * @param lifetimeLeft - * TLifeTimeInSeconds - * @throws InvalidTMetaDataSpaceAttributeException - */ - public TMetaDataSpace(TSpaceType spaceType, TSpaceToken spaceToken, - TReturnStatus status, TUserID user, TSizeInBytes totalSize, - TSizeInBytes guaranteedSize, TSizeInBytes unusedSize, - TLifeTimeInSeconds lifetimeAssigned, TLifeTimeInSeconds lifetimeLeft) - throws InvalidTMetaDataSpaceAttributeException { - - boolean ok = (spaceToken != null); - - if (!ok) { - throw new InvalidTMetaDataSpaceAttributeException(spaceToken); - } - - this.spaceType = spaceType; - this.spaceToken = spaceToken; - this.status = status; - this.owner = user; - this.totalSize = totalSize; - this.guaranteedSize = guaranteedSize; - this.unusedSize = unusedSize; - this.lifetimeAssigned = lifetimeAssigned; - this.lifetimeLeft = lifetimeLeft; - } - - /** - * Constructor with SpaceData returned by DAO. - * - * @param spaceData - * of type StorageSpaceData - * @throws InvalidTMetaDataSpaceAttributeException - * @throws InvalidTSizeAttributesException - */ - public TMetaDataSpace(StorageSpaceData spaceData) - throws InvalidTMetaDataSpaceAttributeException, - InvalidTSizeAttributesException { - - if (spaceData == null) { - log.warn("TMetaDataSpace built without SPACEDATA detail."); - this.spaceType = TSpaceType.EMPTY; - this.spaceToken = TSpaceToken.makeEmpty(); - this.status = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, - "Invalid space token"); - this.owner = TUserID.makeEmpty(); - this.totalSize = TSizeInBytes.makeEmpty(); - this.guaranteedSize = TSizeInBytes.makeEmpty(); - this.unusedSize = TSizeInBytes.makeEmpty(); - this.lifetimeAssigned = TLifeTimeInSeconds.makeEmpty(); - this.lifetimeLeft = TLifeTimeInSeconds.makeEmpty(); - } else { - boolean ok = (spaceData.getSpaceToken() != null); - if (!ok) { - log.warn("TMetaDataSpace built with SpaceData without Token.. !?"); - throw new InvalidTMetaDataSpaceAttributeException(spaceToken); - } - this.spaceType = spaceData.getSpaceType(); - this.spaceToken = spaceData.getSpaceToken(); - this.owner = spaceData.getUserID(); - this.totalSize = spaceData.getTotalSpaceSize(); - this.guaranteedSize = spaceData.getTotalGuaranteedSize(); - try { - if (SpaceHelper.isStorageArea(spaceData)) { - this.guaranteedSize = spaceData.getTotalGuaranteedSize(); - } else { - this.guaranteedSize = spaceData.getReservedSpaceSize(); - } - } catch (IllegalArgumentException e) { - // impossible - } - - this.unusedSize = spaceData.getFreeSpaceSize(); - this.lifetimeAssigned = spaceData.getLifeTime(); - if (this.lifetimeAssigned.isInfinite()) { - this.lifetimeLeft = TLifeTimeInSeconds.makeInfinite(); - } else { - this.lifetimeLeft = this.lifetimeAssigned.timeLeft(spaceData - .getCreationDate()); - } - if ((this.lifetimeLeft.value() == 0) - && (this.spaceType != TSpaceType.VOSPACE)) { - this.status = new TReturnStatus(TStatusCode.SRM_SPACE_LIFETIME_EXPIRED, - "Expired space lifetime"); - } else { - this.status = new TReturnStatus(TStatusCode.SRM_SUCCESS, - "Valid space token"); - } - } - } - - public static TMetaDataSpace makeEmpty() { - - return new TMetaDataSpace(); - } - - /** - * Method that returns SpaceType - */ - public TSpaceType getSpaceType() { - - return spaceType; - } - - /** - * Get TReturnStatus - */ - public TReturnStatus getStatus() { - - return status; - } - - /** - * Set TReturnStatus - */ - public void setStatus(TReturnStatus status) { - - this.status = status; - } - - /** - * Return Space Token; - */ - public TSpaceToken getSpaceToken() { - - return spaceToken; - } - - public void setSpaceToken(TSpaceToken token) { - - this.spaceToken = token; - } - - public void setSpaceType(TSpaceType type) { - - this.spaceType = type; - } - - public void setOwner(TUserID uid) { - - this.owner = uid; - } - - public void setTotalSize(TSizeInBytes tsize) { - - this.totalSize = tsize; - } - - public void setGuarSize(TSizeInBytes gsize) { - - this.guaranteedSize = gsize; - } - - public void setUnSize(TSizeInBytes usize) { - - this.unusedSize = usize; - } - - public void setLifeTime(TLifeTimeInSeconds time) { - - this.lifetimeAssigned = time; - } - - public void setLifeTimeLeft(TLifeTimeInSeconds time) { + /** + * + */ + private static final long serialVersionUID = 1L; + + private TSpaceType spaceType; + private TReturnStatus status = null; + private TSpaceToken spaceToken; + private TRetentionPolicyInfo retentionPolicyInfo; + private TUserID owner; + private TSizeInBytes totalSize; + private TSizeInBytes guaranteedSize; + private TSizeInBytes unusedSize; + private TLifeTimeInSeconds lifetimeAssigned = null; + private TLifeTimeInSeconds lifetimeLeft = null; + + private static final Logger log = LoggerFactory.getLogger(TMetaDataSpace.class); + + public TMetaDataSpace() { + + this.spaceType = TSpaceType.EMPTY; + this.status = null; + this.spaceToken = TSpaceToken.makeEmpty(); + this.retentionPolicyInfo = null; + this.owner = TUserID.makeEmpty(); + this.totalSize = TSizeInBytes.makeEmpty(); + this.guaranteedSize = TSizeInBytes.makeEmpty(); + this.unusedSize = TSizeInBytes.makeEmpty(); + this.lifetimeAssigned = TLifeTimeInSeconds.makeEmpty(); + this.lifetimeLeft = TLifeTimeInSeconds.makeEmpty(); + } + + /** + * Constructor + * + * @param spaceType TSpaceType + * @param spaceToken TSpaceToken + * @param status TReturnStatus + * @param user TUserID + * @param totalSize TSizeInBytes + * @param guaranteedSize TSizeInBytes + * @param unusedSize TSizeInBytes + * @param lifetimeAssigned TLifeTimeInSeconds + * @param lifetimeLeft TLifeTimeInSeconds + * @throws InvalidTMetaDataSpaceAttributeException + */ + public TMetaDataSpace(TSpaceType spaceType, TSpaceToken spaceToken, TReturnStatus status, + TUserID user, TSizeInBytes totalSize, TSizeInBytes guaranteedSize, TSizeInBytes unusedSize, + TLifeTimeInSeconds lifetimeAssigned, TLifeTimeInSeconds lifetimeLeft) + throws InvalidTMetaDataSpaceAttributeException { + + boolean ok = (spaceToken != null); + + if (!ok) { + throw new InvalidTMetaDataSpaceAttributeException(spaceToken); + } + + this.spaceType = spaceType; + this.spaceToken = spaceToken; + this.status = status; + this.owner = user; + this.totalSize = totalSize; + this.guaranteedSize = guaranteedSize; + this.unusedSize = unusedSize; + this.lifetimeAssigned = lifetimeAssigned; + this.lifetimeLeft = lifetimeLeft; + } + + /** + * Constructor with SpaceData returned by DAO. + * + * @param spaceData of type StorageSpaceData + * @throws InvalidTMetaDataSpaceAttributeException + * @throws InvalidTSizeAttributesException + */ + public TMetaDataSpace(StorageSpaceData spaceData) + throws InvalidTMetaDataSpaceAttributeException, InvalidTSizeAttributesException { + + if (spaceData == null) { + log.warn("TMetaDataSpace built without SPACEDATA detail."); + this.spaceType = TSpaceType.EMPTY; + this.spaceToken = TSpaceToken.makeEmpty(); + this.status = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, "Invalid space token"); + this.owner = TUserID.makeEmpty(); + this.totalSize = TSizeInBytes.makeEmpty(); + this.guaranteedSize = TSizeInBytes.makeEmpty(); + this.unusedSize = TSizeInBytes.makeEmpty(); + this.lifetimeAssigned = TLifeTimeInSeconds.makeEmpty(); + this.lifetimeLeft = TLifeTimeInSeconds.makeEmpty(); + } else { + boolean ok = (spaceData.getSpaceToken() != null); + if (!ok) { + log.warn("TMetaDataSpace built with SpaceData without Token.. !?"); + throw new InvalidTMetaDataSpaceAttributeException(spaceToken); + } + this.spaceType = spaceData.getSpaceType(); + this.spaceToken = spaceData.getSpaceToken(); + this.owner = spaceData.getUserID(); + this.totalSize = spaceData.getTotalSpaceSize(); + this.guaranteedSize = spaceData.getTotalGuaranteedSize(); + try { + if (SpaceHelper.isStorageArea(spaceData)) { + this.guaranteedSize = spaceData.getTotalGuaranteedSize(); + } else { + this.guaranteedSize = spaceData.getReservedSpaceSize(); + } + } catch (IllegalArgumentException e) { + // impossible + } + + this.unusedSize = spaceData.getFreeSpaceSize(); + this.lifetimeAssigned = spaceData.getLifeTime(); + if (this.lifetimeAssigned.isInfinite()) { + this.lifetimeLeft = TLifeTimeInSeconds.makeInfinite(); + } else { + this.lifetimeLeft = this.lifetimeAssigned.timeLeft(spaceData.getCreationDate()); + } + if ((this.lifetimeLeft.value() == 0) && (this.spaceType != TSpaceType.VOSPACE)) { + this.status = + new TReturnStatus(TStatusCode.SRM_SPACE_LIFETIME_EXPIRED, "Expired space lifetime"); + } else { + this.status = new TReturnStatus(TStatusCode.SRM_SUCCESS, "Valid space token"); + } + } + } + + public static TMetaDataSpace makeEmpty() { + + return new TMetaDataSpace(); + } + + /** + * Method that returns SpaceType + */ + public TSpaceType getSpaceType() { + + return spaceType; + } + + /** + * Get TReturnStatus + */ + public TReturnStatus getStatus() { + + return status; + } + + /** + * Set TReturnStatus + */ + public void setStatus(TReturnStatus status) { + + this.status = status; + } + + /** + * Return Space Token; + */ + public TSpaceToken getSpaceToken() { + + return spaceToken; + } + + public void setSpaceToken(TSpaceToken token) { + + this.spaceToken = token; + } + + public void setSpaceType(TSpaceType type) { + + this.spaceType = type; + } + + public void setOwner(TUserID uid) { + + this.owner = uid; + } + + public void setTotalSize(TSizeInBytes tsize) { + + this.totalSize = tsize; + } + + public void setGuarSize(TSizeInBytes gsize) { + + this.guaranteedSize = gsize; + } + + public void setUnSize(TSizeInBytes usize) { + + this.unusedSize = usize; + } + + public void setLifeTime(TLifeTimeInSeconds time) { + + this.lifetimeAssigned = time; + } + + public void setLifeTimeLeft(TLifeTimeInSeconds time) { - this.lifetimeLeft = time; - } + this.lifetimeLeft = time; + } - /** - * Return retentionPolicyInfo - */ - public TRetentionPolicyInfo getRetentionPolicyInfo() { + /** + * Return retentionPolicyInfo + */ + public TRetentionPolicyInfo getRetentionPolicyInfo() { - return retentionPolicyInfo; - } + return retentionPolicyInfo; + } - public void setRetentionPolicyInfo(TRetentionPolicyInfo retentionPolicyInfo) { + public void setRetentionPolicyInfo(TRetentionPolicyInfo retentionPolicyInfo) { - this.retentionPolicyInfo = retentionPolicyInfo; - } + this.retentionPolicyInfo = retentionPolicyInfo; + } - /** - * Return User Identifier; - */ - public TUserID getUserID() { + /** + * Return User Identifier; + */ + public TUserID getUserID() { - return owner; - } + return owner; + } - /** - * Return TotalSize; - */ - public TSizeInBytes getTotalSize() { + /** + * Return TotalSize; + */ + public TSizeInBytes getTotalSize() { - return totalSize; - } + return totalSize; + } - /** - * Return Guaranteed Size; - */ - public TSizeInBytes getGuaranteedSize() { + /** + * Return Guaranteed Size; + */ + public TSizeInBytes getGuaranteedSize() { - return guaranteedSize; - } + return guaranteedSize; + } - /** - * Return Unused Size. - */ - public TSizeInBytes getUnusedSize() { + /** + * Return Unused Size. + */ + public TSizeInBytes getUnusedSize() { - return unusedSize; - } + return unusedSize; + } - /** - * Return Lifetime Assigned. - */ - public TLifeTimeInSeconds getLifeTimeAssigned() { + /** + * Return Lifetime Assigned. + */ + public TLifeTimeInSeconds getLifeTimeAssigned() { - return lifetimeAssigned; + return lifetimeAssigned; - } + } - /** - * Return LifeTime Left - */ - public TLifeTimeInSeconds getLifeTimeLeft() { + /** + * Return LifeTime Left + */ + public TLifeTimeInSeconds getLifeTimeLeft() { - return lifetimeLeft; - } + return lifetimeLeft; + } - /** - * Method used to encode value for FE communication. - */ - public void encode(Map outputParam, String fieldName) { + /** + * Method used to encode value for FE communication. + */ + public void encode(Map outputParam, String fieldName) { - Map metaDataSpace = new HashMap(); + Map metaDataSpace = Maps.newHashMap(); - this.encode(metaDataSpace); - outputParam.put(fieldName, metaDataSpace); - } + this.encode(metaDataSpace); + outputParam.put(fieldName, metaDataSpace); + } - /** - * Method used to encode value for FE communication. - */ - public void encode(Map metaDataSpace) { + /** + * Method used to encode value for FE communication. + */ + public void encode(Map metaDataSpace) { - spaceToken.encode(metaDataSpace, TSpaceToken.PNAME_SPACETOKEN); - if (status != null) { - status.encode(metaDataSpace, TReturnStatus.PNAME_STATUS); - } - if (retentionPolicyInfo != null) { - retentionPolicyInfo.encode(metaDataSpace, - TRetentionPolicyInfo.PNAME_retentionPolicyInfo); - } - owner.encode(metaDataSpace, TUserID.PNAME_OWNER); - totalSize.encode(metaDataSpace, TSizeInBytes.PNAME_TOTALSIZE); - guaranteedSize.encode(metaDataSpace, TSizeInBytes.PNAME_GUARANTEEDSIZE); - unusedSize.encode(metaDataSpace, TSizeInBytes.PNAME_UNUSEDSIZE); - lifetimeAssigned.encode(metaDataSpace, - TLifeTimeInSeconds.PNAME_LIFETIMEASSIGNED); - lifetimeLeft.encode(metaDataSpace, TLifeTimeInSeconds.PNAME_LIFETIMELEFT); - } + spaceToken.encode(metaDataSpace, TSpaceToken.PNAME_SPACETOKEN); + if (status != null) { + status.encode(metaDataSpace, TReturnStatus.PNAME_STATUS); + } + if (retentionPolicyInfo != null) { + retentionPolicyInfo.encode(metaDataSpace, TRetentionPolicyInfo.PNAME_retentionPolicyInfo); + } + owner.encode(metaDataSpace, TUserID.PNAME_OWNER); + totalSize.encode(metaDataSpace, TSizeInBytes.PNAME_TOTALSIZE); + guaranteedSize.encode(metaDataSpace, TSizeInBytes.PNAME_GUARANTEEDSIZE); + unusedSize.encode(metaDataSpace, TSizeInBytes.PNAME_UNUSEDSIZE); + lifetimeAssigned.encode(metaDataSpace, TLifeTimeInSeconds.PNAME_LIFETIMEASSIGNED); + lifetimeLeft.encode(metaDataSpace, TLifeTimeInSeconds.PNAME_LIFETIMELEFT); + } } diff --git a/src/main/java/it/grid/storm/srm/types/TOverwriteMode.java b/src/main/java/it/grid/storm/srm/types/TOverwriteMode.java index 11210033b..95c0636bc 100644 --- a/src/main/java/it/grid/storm/srm/types/TOverwriteMode.java +++ b/src/main/java/it/grid/storm/srm/types/TOverwriteMode.java @@ -44,28 +44,4 @@ public String getValue() { return mode; } - - /** - * @param mode - * @return - * @throws IllegalArgumentException - */ - public static TOverwriteMode getTOverwriteMode(String mode) - throws IllegalArgumentException { - - if (mode == null) { - throw new IllegalArgumentException("Received null mode parameter"); - } - if (mode.equals(EMPTY.getValue())) - return EMPTY; - if (mode.equals(NEVER.getValue())) - return NEVER; - if (mode.equals(ALWAYS.getValue())) - return ALWAYS; - if (mode.equals(WHENFILESAREDIFFERENT.getValue())) - return WHENFILESAREDIFFERENT; - throw new IllegalArgumentException( - "No matching TOverwriteMode for String \'" + mode + "\'"); - } - } diff --git a/src/main/java/it/grid/storm/srm/types/TPermissionMode.java b/src/main/java/it/grid/storm/srm/types/TPermissionMode.java index 3961b0b4d..dd9ad4f58 100644 --- a/src/main/java/it/grid/storm/srm/types/TPermissionMode.java +++ b/src/main/java/it/grid/storm/srm/types/TPermissionMode.java @@ -17,14 +17,12 @@ package it.grid.storm.srm.types; -import java.util.Hashtable; import java.util.Map; import it.grid.storm.filesystem.FilesystemPermission; /** - * This class represents the TPermissionMode of a File or Space Area managed by - * Srm. + * This class represents the TPermissionMode of a File or Space Area managed by Srm. * * @author Magnoni Luca * @author CNAF - INFN Bologna @@ -34,123 +32,121 @@ public class TPermissionMode { - public static String PNAME_OTHERPERMISSION = "otherPermission"; - public static String PNAME_MODE = "mode"; - - private String mode = null; - - public static final TPermissionMode NONE = new TPermissionMode("None"); - public static final TPermissionMode X = new TPermissionMode("X"); - public static final TPermissionMode W = new TPermissionMode("W"); - public static final TPermissionMode WX = new TPermissionMode("WX"); - public static final TPermissionMode R = new TPermissionMode("R"); - public static final TPermissionMode RX = new TPermissionMode("RX"); - public static final TPermissionMode RW = new TPermissionMode("RW"); - public static final TPermissionMode RWX = new TPermissionMode("RWX"); - - private TPermissionMode(String mode) { - - this.mode = mode; - } - - public String toString() { - - return mode; - } - - public String getValue() { - - return mode; - } - - public static TPermissionMode getTPermissionMode(String type) { - - if (type.equals(NONE.getValue())) - return NONE; - if (type.equals(X.getValue())) - return X; - if (type.equals(W.getValue())) - return W; - if (type.equals(WX.getValue())) - return WX; - if (type.equals(R.getValue())) - return R; - if (type.equals(RX.getValue())) - return RX; - if (type.equals(RW.getValue())) - return RW; - if (type.equals(RWX.getValue())) - return RWX; - return null; - } - - public static TPermissionMode getTPermissionMode(int type) { - - switch (type) { - case 0: - return NONE; - case 1: - return X; - case 2: - return W; - case 3: - return WX; - case 4: - return R; - case 5: - return RX; - case 6: - return RW; - case 7: - return RWX; - default: - return NONE; - } - } - - public static TPermissionMode getTPermissionMode(FilesystemPermission type) { - - String perm = ""; - - if (type.canReadFile() || type.canListDirectory()) - perm += "R"; - if (type.canWriteFile()) - perm += "W"; - if (type.canTraverseDirectory()) - perm += "X"; - if (perm.length() == 0) - perm = "None"; - return getTPermissionMode(perm); - } - - /** - * This method is used to encode Permission mode from BE to FE commonucation. - * - * @param param - * Hashtable that will contains output xmlrpc structure. - * @param name - * The name of the field to be added. - */ - public void encode(Map param, String name) { - - Integer permissionInt = null; - if (this.equals(NONE)) - permissionInt = Integer.valueOf(0); - if (this.equals(X)) - permissionInt = Integer.valueOf(1); - if (this.equals(W)) - permissionInt = Integer.valueOf(2); - if (this.equals(WX)) - permissionInt = Integer.valueOf(3); - if (this.equals(R)) - permissionInt = Integer.valueOf(4); - if (this.equals(RX)) - permissionInt = Integer.valueOf(5); - if (this.equals(RW)) - permissionInt = Integer.valueOf(6); - if (this.equals(RWX)) - permissionInt = Integer.valueOf(7); - - param.put(name, permissionInt); - } + public static String PNAME_OTHERPERMISSION = "otherPermission"; + public static String PNAME_MODE = "mode"; + + private String mode = null; + + public static final TPermissionMode NONE = new TPermissionMode("None"); + public static final TPermissionMode X = new TPermissionMode("X"); + public static final TPermissionMode W = new TPermissionMode("W"); + public static final TPermissionMode WX = new TPermissionMode("WX"); + public static final TPermissionMode R = new TPermissionMode("R"); + public static final TPermissionMode RX = new TPermissionMode("RX"); + public static final TPermissionMode RW = new TPermissionMode("RW"); + public static final TPermissionMode RWX = new TPermissionMode("RWX"); + + private TPermissionMode(String mode) { + + this.mode = mode; + } + + public String toString() { + + return mode; + } + + public String getValue() { + + return mode; + } + + public static TPermissionMode getTPermissionMode(String type) { + + if (type.equals(NONE.getValue())) + return NONE; + if (type.equals(X.getValue())) + return X; + if (type.equals(W.getValue())) + return W; + if (type.equals(WX.getValue())) + return WX; + if (type.equals(R.getValue())) + return R; + if (type.equals(RX.getValue())) + return RX; + if (type.equals(RW.getValue())) + return RW; + if (type.equals(RWX.getValue())) + return RWX; + return null; + } + + public static TPermissionMode getTPermissionMode(int type) { + + switch (type) { + case 0: + return NONE; + case 1: + return X; + case 2: + return W; + case 3: + return WX; + case 4: + return R; + case 5: + return RX; + case 6: + return RW; + case 7: + return RWX; + default: + return NONE; + } + } + + public static TPermissionMode getTPermissionMode(FilesystemPermission type) { + + String perm = ""; + + if (type.canReadFile() || type.canListDirectory()) + perm += "R"; + if (type.canWriteFile()) + perm += "W"; + if (type.canTraverseDirectory()) + perm += "X"; + if (perm.length() == 0) + perm = "None"; + return getTPermissionMode(perm); + } + + /** + * This method is used to encode Permission mode from BE to FE commonucation. + * + * @param param Hashtable that will contains output xmlrpc structure. + * @param name The name of the field to be added. + */ + public void encode(Map param, String name) { + + Integer permissionInt = null; + if (this.equals(NONE)) + permissionInt = Integer.valueOf(0); + if (this.equals(X)) + permissionInt = Integer.valueOf(1); + if (this.equals(W)) + permissionInt = Integer.valueOf(2); + if (this.equals(WX)) + permissionInt = Integer.valueOf(3); + if (this.equals(R)) + permissionInt = Integer.valueOf(4); + if (this.equals(RX)) + permissionInt = Integer.valueOf(5); + if (this.equals(RW)) + permissionInt = Integer.valueOf(6); + if (this.equals(RWX)) + permissionInt = Integer.valueOf(7); + + param.put(name, permissionInt); + } } diff --git a/src/main/java/it/grid/storm/srm/types/TRequestToken.java b/src/main/java/it/grid/storm/srm/types/TRequestToken.java index f88cfaa74..4e5c6b8f1 100644 --- a/src/main/java/it/grid/storm/srm/types/TRequestToken.java +++ b/src/main/java/it/grid/storm/srm/types/TRequestToken.java @@ -33,137 +33,133 @@ * @author Magnoni Luca * */ - public class TRequestToken implements Serializable { - private static final long serialVersionUID = -6926632390881024529L; + private static final long serialVersionUID = -6926632390881024529L; - public static final String PNAME_REQUESTOKEN = "requestToken"; + public static final String PNAME_REQUESTOKEN = "requestToken"; - private final String value; + private final String value; - private final Calendar expiration; + private final Calendar expiration; - private static final long REQUEST_LIFETIME = Configuration.getInstance() - .getExpiredRequestTime() * 1000; + private final long defaultExpirationTime = Configuration.getInstance().getCompletedRequestsAgentPurgeAge(); - public TRequestToken(String requestToken, Date timestamp) - throws InvalidTRequestTokenAttributesException { + public TRequestToken(String requestToken, Date timestamp) + throws InvalidTRequestTokenAttributesException { - if (requestToken == null || requestToken.trim().isEmpty()) { - throw new InvalidTRequestTokenAttributesException(requestToken); - } - this.value = requestToken; - Calendar expiration = null; - if (timestamp != null) { - expiration = Calendar.getInstance(); - expiration.setTimeInMillis(timestamp.getTime() + REQUEST_LIFETIME); - } - this.expiration = expiration; - } - - public TRequestToken() throws InvalidTRequestTokenAttributesException { - this(UUID.randomUUID().toString(), Calendar.getInstance().getTime()); - } - - public static TRequestToken getRandom() { - - UUID token = UUID.randomUUID(); - try { - return new TRequestToken(token.toString(), Calendar.getInstance() - .getTime()); - } catch (InvalidTRequestTokenAttributesException e) { - // never thrown - throw new IllegalStateException( - "Unexpected InvalidTRequestTokenAttributesException", e); - } - } - - @JsonIgnore - public boolean hasExpirationDate() { - - return expiration != null; - } - - @JsonIgnore - public boolean isExpired() throws IllegalStateException { - - if (!hasExpirationDate()) { - throw new IllegalStateException( - "Unable to check expiration, the token han not an expiration date"); - } - return expiration.before(Calendar.getInstance()); - } - - /** - * @return the expiration - */ - public Calendar getExpiration() { - - return expiration; - } - - public void updateExpiration(Date expiration) { - - this.expiration.setTime(expiration); - } - - public String getValue() { - - return value; - } - - public String toString() { - - return value; - } - - public static TRequestToken decode(Map inputParam, - String fieldName) throws InvalidTRequestTokenAttributesException { - - return new TRequestToken((String) inputParam.get(fieldName), null); - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#hashCode() - */ - @Override - public int hashCode() { - - final int prime = 31; - int result = 1; - result = prime * result + ((value == null) ? 0 : value.hashCode()); - return result; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#equals(java.lang.Object) - */ - @Override - public boolean equals(Object obj) { - - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - TRequestToken other = (TRequestToken) obj; - if (value == null) { - if (other.value != null) { - return false; - } - } else if (!value.equals(other.value)) { - return false; - } - return true; - } + if (requestToken == null || requestToken.trim().isEmpty()) { + throw new InvalidTRequestTokenAttributesException(requestToken); + } + this.value = requestToken; + Calendar expiration = null; + if (timestamp != null) { + expiration = Calendar.getInstance(); + expiration.setTimeInMillis(timestamp.getTime() + defaultExpirationTime * 1000); + } + this.expiration = expiration; + } + + public TRequestToken() throws InvalidTRequestTokenAttributesException { + this(UUID.randomUUID().toString(), Calendar.getInstance().getTime()); + } + + public static TRequestToken getRandom() { + + UUID token = UUID.randomUUID(); + try { + return new TRequestToken(token.toString(), Calendar.getInstance().getTime()); + } catch (InvalidTRequestTokenAttributesException e) { + // never thrown + throw new IllegalStateException("Unexpected InvalidTRequestTokenAttributesException", e); + } + } + + @JsonIgnore + public boolean hasExpirationDate() { + + return expiration != null; + } + + @JsonIgnore + public boolean isExpired() throws IllegalStateException { + + if (!hasExpirationDate()) { + throw new IllegalStateException( + "Unable to check expiration, the token han not an expiration date"); + } + return expiration.before(Calendar.getInstance()); + } + + /** + * @return the expiration + */ + public Calendar getExpiration() { + + return expiration; + } + + public void updateExpiration(Date expiration) { + + this.expiration.setTime(expiration); + } + + public String getValue() { + + return value; + } + + public String toString() { + + return value; + } + + public static TRequestToken decode(Map inputParam, String fieldName) + throws InvalidTRequestTokenAttributesException { + + return new TRequestToken((String) inputParam.get(fieldName), null); + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#hashCode() + */ + @Override + public int hashCode() { + + final int prime = 31; + int result = 1; + result = prime * result + ((value == null) ? 0 : value.hashCode()); + return result; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#equals(java.lang.Object) + */ + @Override + public boolean equals(Object obj) { + + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + TRequestToken other = (TRequestToken) obj; + if (value == null) { + if (other.value != null) { + return false; + } + } else if (!value.equals(other.value)) { + return false; + } + return true; + } } diff --git a/src/main/java/it/grid/storm/srm/types/TRequestType.java b/src/main/java/it/grid/storm/srm/types/TRequestType.java index d1e998cf6..7666acc96 100644 --- a/src/main/java/it/grid/storm/srm/types/TRequestType.java +++ b/src/main/java/it/grid/storm/srm/types/TRequestType.java @@ -18,8 +18,8 @@ package it.grid.storm.srm.types; /** - * This class represents the ReqType of an SRM request. It is a simple - * application of the TypeSafe Enum Pattern. + * This class represents the ReqType of an SRM request. It is a simple application of the TypeSafe + * Enum Pattern. * * @author EGRID ICTP Trieste / CNAF Bologna * @date March 18th, 2005 @@ -27,44 +27,28 @@ */ public enum TRequestType { - PREPARE_TO_GET("PrepareToGet"), PREPARE_TO_PUT("PrepareToPut"), COPY("Copy"), BRING_ON_LINE( - "BringOnLine"), EMPTY("Empty"), UNKNOWN("Unknown"); + PREPARE_TO_GET("PrepareToGet"), PREPARE_TO_PUT("PrepareToPut"), COPY("Copy"), BRING_ON_LINE( + "BringOnLine"), EMPTY("Empty"); - private final String value; + private final String value; - private TRequestType(String value) { + private TRequestType(String value) { - this.value = value; - } + this.value = value; + } - public String getValue() { + public String getValue() { - return value; - } + return value; + } - /** - * Facility method that returns a TRequestType object given its String - * representation. If no TRequestType is found for the given String, an - * IllegalArgumentException is thrown. - */ - public static TRequestType getTRequestType(String type) - throws IllegalArgumentException { + public boolean isEmpty() { - for (TRequestType requestType : TRequestType.values()) { - if (requestType.getValue().equals(type)) { - return requestType; - } - } - return UNKNOWN; - } + return this.equals(EMPTY); + } - public boolean isEmpty() { + public String toString() { - return this.equals(EMPTY); - } - - public String toString() { - - return value; - } + return value; + } } diff --git a/src/main/java/it/grid/storm/srm/types/TRetentionPolicy.java b/src/main/java/it/grid/storm/srm/types/TRetentionPolicy.java index 87226c6c9..db2b7f33f 100644 --- a/src/main/java/it/grid/storm/srm/types/TRetentionPolicy.java +++ b/src/main/java/it/grid/storm/srm/types/TRetentionPolicy.java @@ -29,84 +29,80 @@ public class TRetentionPolicy { - public static String PNAME_retentionPolicy = "retentionPolicy"; - private String retentionPolicy = null; - - public static final TRetentionPolicy REPLICA = new TRetentionPolicy("REPLICA"), - OUTPUT = new TRetentionPolicy("OUTPUT"), CUSTODIAL = new TRetentionPolicy( - "CUSTODIAL"), EMPTY = new TRetentionPolicy("EMPTY"); - - private TRetentionPolicy(String retPol) { - - this.retentionPolicy = retPol; - } - - public final static TRetentionPolicy getTRetentionPolicy(int idx) { - - switch (idx) { - case 0: - return REPLICA; - case 1: - return OUTPUT; - case 2: - return CUSTODIAL; - default: - return EMPTY; - } - - } - - /** - * decode() method creates a TRetentionPolicy object from the inforation - * contained into the structured parameter received from the FE. - * - * @param inputParam - * hashtable structure - * @param fieldName - * field name - * @return - */ - public final static TRetentionPolicy decode(Map inputParam, String fieldName) { - - Integer val; - - val = (Integer) inputParam.get(fieldName); - if (val == null) - return EMPTY; - - return TRetentionPolicy.getTRetentionPolicy(val.intValue()); - } - - /** - * encode() method creates structured parameter representing this ogbject. It - * is passed to the FE. - * - * @param outputParam - * hashtable structure - * @param fieldName - * field name - */ - public void encode(Map outputParam, String fieldName) { - - Integer value = null; - - if (this.equals(TRetentionPolicy.REPLICA)) - value = Integer.valueOf(0); - if (this.equals(TRetentionPolicy.OUTPUT)) - value = Integer.valueOf(1); - if (this.equals(TRetentionPolicy.CUSTODIAL)) - value = Integer.valueOf(2); - - outputParam.put(fieldName, value); - } - - public String toString() { - - return retentionPolicy; - } - - public String getValue() { - - return retentionPolicy; - } + public static String PNAME_retentionPolicy = "retentionPolicy"; + private String retentionPolicy = null; + + public static final TRetentionPolicy REPLICA = new TRetentionPolicy("REPLICA"); + public static final TRetentionPolicy OUTPUT = new TRetentionPolicy("OUTPUT"); + public static final TRetentionPolicy CUSTODIAL = new TRetentionPolicy("CUSTODIAL"); + public static final TRetentionPolicy EMPTY = new TRetentionPolicy("EMPTY"); + + private TRetentionPolicy(String retPol) { + + this.retentionPolicy = retPol; + } + + public final static TRetentionPolicy getTRetentionPolicy(int idx) { + + switch (idx) { + case 0: + return REPLICA; + case 1: + return OUTPUT; + case 2: + return CUSTODIAL; + default: + return EMPTY; + } + + } + + /** + * decode() method creates a TRetentionPolicy object from the information contained into the + * structured parameter received from the FE. + * + * @param inputParam map structure + * @param fieldName field name + * @return + */ + public final static TRetentionPolicy decode(Map inputParam, String fieldName) { + + Integer val; + + val = (Integer) inputParam.get(fieldName); + if (val == null) + return EMPTY; + + return TRetentionPolicy.getTRetentionPolicy(val.intValue()); + } + + /** + * encode() method creates structured parameter representing this object. It is passed to the FE. + * + * @param outputParam map structure + * @param fieldName field name + */ + public void encode(Map outputParam, String fieldName) { + + Integer value = null; + + if (this.equals(TRetentionPolicy.REPLICA)) + value = Integer.valueOf(0); + if (this.equals(TRetentionPolicy.OUTPUT)) + value = Integer.valueOf(1); + if (this.equals(TRetentionPolicy.CUSTODIAL)) + value = Integer.valueOf(2); + + outputParam.put(fieldName, value); + } + + public String toString() { + + return retentionPolicy; + } + + public String getValue() { + + return retentionPolicy; + } } diff --git a/src/main/java/it/grid/storm/srm/types/TRetentionPolicyInfo.java b/src/main/java/it/grid/storm/srm/types/TRetentionPolicyInfo.java index 819b3fe04..cf2d4991e 100644 --- a/src/main/java/it/grid/storm/srm/types/TRetentionPolicyInfo.java +++ b/src/main/java/it/grid/storm/srm/types/TRetentionPolicyInfo.java @@ -28,126 +28,118 @@ package it.grid.storm.srm.types; import java.io.Serializable; -import java.util.HashMap; import java.util.Map; +import com.google.common.collect.Maps; + public class TRetentionPolicyInfo implements Serializable { - private static final long serialVersionUID = -8530924298311412411L; - - /* Hashtable field names for encode() and decode() methods */ - public static String PNAME_retentionPolicyInfo = "retentionPolicyInfo"; - - public static final TRetentionPolicyInfo TAPE0_DISK1_RETENTION_POLICY = new TRetentionPolicyInfo( - TRetentionPolicy.REPLICA, TAccessLatency.ONLINE); - public static final TRetentionPolicyInfo TAPE1_DISK1_RETENTION_POLICY = new TRetentionPolicyInfo( - TRetentionPolicy.CUSTODIAL, TAccessLatency.ONLINE); - private TRetentionPolicy retentionPolicy; - private TAccessLatency accessLatency; - - public TRetentionPolicyInfo() { - - } - - public TRetentionPolicyInfo(TRetentionPolicy retentionPolicy, - TAccessLatency accessLatency) { - - this.retentionPolicy = retentionPolicy; - this.accessLatency = accessLatency; - } - - /** - * decode() method creates a TRetentionPolicyInfo object from the inforation - * contained into the structured parameter received from the FE. - * - * @param inputParam - * hashtable structure - * @param fieldName - * field name - * @return - */ - public static TRetentionPolicyInfo decode(Map inputParam, String fieldName) { - - Map param = (Map) inputParam.get(fieldName); - if (param == null) - return null; - TRetentionPolicy retPol = TRetentionPolicy.decode(param, - TRetentionPolicy.PNAME_retentionPolicy); - TAccessLatency accLat = TAccessLatency.decode(param, - TAccessLatency.PNAME_accessLatency); - - return new TRetentionPolicyInfo(retPol, accLat); - } - - /** - * encode() method creates structured parameter representing this ogbject. It - * is passed to the FE. - * - * @param outputParam - * hashtable structure - * @param fieldName - * field name - */ - public void encode(Map outputParam, String fieldName) { - - Map param = new HashMap(); - - retentionPolicy.encode(param, TRetentionPolicy.PNAME_retentionPolicy); - accessLatency.encode(param, TAccessLatency.PNAME_accessLatency); - - outputParam.put(fieldName, param); - } - - /** - * Get Retention Policy. - * - * @return TRetentionPolicy - */ - public TRetentionPolicy getRetentionPolicy() { - - return retentionPolicy; - } - - /** - * Set Retention Policy. - * - * @param retentionPolicy - * TRetentionPolicy - */ - public void setRetentionPolicy(TRetentionPolicy retentionPolicy) { - - this.retentionPolicy = retentionPolicy; - } - - /** - * Get AccessLatency. - * - * @return TAccessLatency - */ - public TAccessLatency getAccessLatency() { - - return accessLatency; - } - - /** - * Set AccessLatency. - * - * @param accessLatency - * TAccessLatency - */ - public void setAccessLatency(TAccessLatency accessLatency) { - - this.accessLatency = accessLatency; - } - - public String toString() { - - StringBuilder buf = new StringBuilder("RetentionPolicyInfo: "); - buf.append("["); - buf.append("retentionPolicy: " + retentionPolicy); - buf.append("] , ["); - buf.append("accessLatency: " + accessLatency); - buf.append("]"); - return buf.toString(); - } + private static final long serialVersionUID = -8530924298311412411L; + + /* Hashtable field names for encode() and decode() methods */ + public static String PNAME_retentionPolicyInfo = "retentionPolicyInfo"; + + public static final TRetentionPolicyInfo TAPE0_DISK1_RETENTION_POLICY = + new TRetentionPolicyInfo(TRetentionPolicy.REPLICA, TAccessLatency.ONLINE); + public static final TRetentionPolicyInfo TAPE1_DISK1_RETENTION_POLICY = + new TRetentionPolicyInfo(TRetentionPolicy.CUSTODIAL, TAccessLatency.ONLINE); + private TRetentionPolicy retentionPolicy; + private TAccessLatency accessLatency; + + public TRetentionPolicyInfo() { + + } + + public TRetentionPolicyInfo(TRetentionPolicy retentionPolicy, TAccessLatency accessLatency) { + + this.retentionPolicy = retentionPolicy; + this.accessLatency = accessLatency; + } + + /** + * decode() method creates a TRetentionPolicyInfo object from the information contained into the + * structured parameter received from the FE. + * + * @param inputParam map structure + * @param fieldName field name + * @return + */ + public static TRetentionPolicyInfo decode(Map inputParam, String fieldName) { + + Map param = (Map) inputParam.get(fieldName); + if (param == null) + return null; + TRetentionPolicy retPol = + TRetentionPolicy.decode(param, TRetentionPolicy.PNAME_retentionPolicy); + TAccessLatency accLat = TAccessLatency.decode(param, TAccessLatency.PNAME_accessLatency); + + return new TRetentionPolicyInfo(retPol, accLat); + } + + /** + * encode() method creates structured parameter representing this object. It is passed to the FE. + * + * @param outputParam hashmap structure + * @param fieldName field name + */ + public void encode(Map outputParam, String fieldName) { + + Map param = Maps.newHashMap(); + + retentionPolicy.encode(param, TRetentionPolicy.PNAME_retentionPolicy); + accessLatency.encode(param, TAccessLatency.PNAME_accessLatency); + + outputParam.put(fieldName, param); + } + + /** + * Get Retention Policy. + * + * @return TRetentionPolicy + */ + public TRetentionPolicy getRetentionPolicy() { + + return retentionPolicy; + } + + /** + * Set Retention Policy. + * + * @param retentionPolicy TRetentionPolicy + */ + public void setRetentionPolicy(TRetentionPolicy retentionPolicy) { + + this.retentionPolicy = retentionPolicy; + } + + /** + * Get AccessLatency. + * + * @return TAccessLatency + */ + public TAccessLatency getAccessLatency() { + + return accessLatency; + } + + /** + * Set AccessLatency. + * + * @param accessLatency TAccessLatency + */ + public void setAccessLatency(TAccessLatency accessLatency) { + + this.accessLatency = accessLatency; + } + + public String toString() { + + StringBuilder buf = new StringBuilder("RetentionPolicyInfo: "); + buf.append("["); + buf.append("retentionPolicy: " + retentionPolicy); + buf.append("] , ["); + buf.append("accessLatency: " + accessLatency); + buf.append("]"); + return buf.toString(); + } } diff --git a/src/main/java/it/grid/storm/srm/types/TSURL.java b/src/main/java/it/grid/storm/srm/types/TSURL.java index 1e6c8236a..821effb2d 100644 --- a/src/main/java/it/grid/storm/srm/types/TSURL.java +++ b/src/main/java/it/grid/storm/srm/types/TSURL.java @@ -17,6 +17,12 @@ package it.grid.storm.srm.types; +import java.util.List; +import java.util.Map; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.common.types.EndPoint; import it.grid.storm.common.types.InvalidEndPointAttributeException; import it.grid.storm.common.types.InvalidMachineAttributeException; @@ -30,122 +36,67 @@ import it.grid.storm.common.types.StFN; import it.grid.storm.config.Configuration; import it.grid.storm.namespace.NamespaceException; +import it.grid.storm.namespace.model.Authority; import it.grid.storm.namespace.naming.SURL; -import java.util.ArrayList; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** - * This class represents a TSURL, that is a Site URL. It is made up of a - * SiteProtocol and a SFN. - * - * @author Ezio Corso - Magnoni Luca - * @author EGRID ICTP Trieste / CNAF INFN Bologna - * @date Avril, 2005 - * @version 2.0 + * This class represents a TSURL, that is a Site URL. It is made up of a SiteProtocol and a SFN. */ public class TSURL { - private static Logger log = LoggerFactory.getLogger(TSURL.class); - - private static final String EMPTY_STRING = ""; - - /** - * The surl as provided by User - */ - private final String rawSurl; - private final SiteProtocol sp; - private final SFN sfn; - private String normalizedStFN = null; - private int uniqueID = 0; - - private boolean empty = true; - - public static final String PNAME_SURL = "surl"; - public static final String PNAME_FROMSURL = "fromSURL"; - public static final String PNAME_TOSURL = "toSURL"; - - private static ArrayList tsurlManaged = new ArrayList(); - private static LinkedList defaultPorts = new LinkedList(); + private static Logger log = LoggerFactory.getLogger(TSURL.class); - static { + private static final String EMPTY_STRING = ""; - // Lazy initialization from Configuration - if (tsurlManaged.isEmpty()) { + /** + * The surl as provided by User + */ + private final String rawSurl; + private final SiteProtocol sp; + private final SFN sfn; + private String normalizedStFN = null; + private int uniqueID = 0; - TSURL checkTSURL; - String[] surlValid = Configuration.getInstance().getManagedSURLs(); - for (String checkSurl : surlValid) { - try { + private boolean empty = true; - checkTSURL = TSURL.makeFromStringWellFormed(checkSurl); - tsurlManaged.add(checkTSURL); - log.debug("### SURL Managed : {}",checkTSURL); + public static final String PNAME_SURL = "surl"; + public static final String PNAME_FROMSURL = "fromSURL"; + public static final String PNAME_TOSURL = "toSURL"; - } catch (InvalidTSURLAttributesException e) { + private static List managedSrmEndpoints = Configuration.getInstance().getManagedSrmEndpoints(); - log.error("Unable to build a TSURL : {}", checkSurl, e); - } - } - } + private TSURL(SiteProtocol sp, SFN sfn, String rawSurl, boolean empty) { - if (defaultPorts.isEmpty()) { + this.sp = sp; + this.sfn = sfn; + this.rawSurl = rawSurl; + this.empty = empty; - Integer[] ports = Configuration.getInstance() - .getManagedSurlDefaultPorts(); - - for (Integer portInteger : ports) { - try { - - defaultPorts.add(Port.make(portInteger.intValue())); - log.debug("### Default SURL port : {}", defaultPorts.getLast()); - - } catch (InvalidPortAttributeException e) { - - log.error("Unable to build a Port : {}", portInteger , e); - - } - } - } - } - - private TSURL(SiteProtocol sp, SFN sfn, String rawSurl, boolean empty) { - - this.sp = sp; - this.sfn = sfn; - this.rawSurl = rawSurl; - this.empty = empty; - - } + } - /** - * Method that create a TSURL from structure received from FE. - * - * @throws InvalidTSURLAttributesException - */ - public static TSURL decode(Map inputParam, String name) - throws InvalidTSURLAttributesException { + /** + * Method that create a TSURL from structure received from FE. + * + * @throws InvalidTSURLAttributesException + */ + public static TSURL decode(Map inputParam, String name) + throws InvalidTSURLAttributesException { - String surlstring = (String) inputParam.get(name); - return TSURL.makeFromStringWellFormed(surlstring); - } + String surlstring = (String) inputParam.get(name); + return TSURL.makeFromStringWellFormed(surlstring); + } - /** - * Build a TSURL by extracting the content of the received SURL object and - * storing the received raw surl string - * - * @param surl - * @param rawSurl - * @return - * @throws InvalidTSURLAttributesException - */ + /** + * Build a TSURL by extracting the content of the received SURL object and storing the received + * raw surl string + * + * @param surl + * @param rawSurl + * @return + * @throws InvalidTSURLAttributesException + */ public static TSURL getWellFormed(SURL surl, String rawSurl) - throws InvalidTSURLAttributesException { + throws InvalidTSURLAttributesException { TSURL result; SFN sfn; @@ -230,348 +181,305 @@ public static TSURL getWellFormed(SURL surl, String rawSurl) } - /** - * Static factory method that returns a TSURL and that requires the - * SiteProtocol and the SFN of this TSURL: if any is null or empty an - * InvalidTSURLAttributesException is thrown. Check for ".." in Storage File - * Name for security issues. - */ - private static TSURL make(SiteProtocol sp, SFN sfn, String userSurl) - throws InvalidTSURLAttributesException { - - if ((sp == null) || (sfn == null) || (sp == SiteProtocol.EMPTY) - || sfn.isEmpty()) { - throw new InvalidTSURLAttributesException(sp, sfn); - } - return new TSURL(sp, sfn, userSurl, false); - } - - /** - * Static factory method that returns an empty TSURL. - */ - public static TSURL makeEmpty() { - - return new TSURL(SiteProtocol.EMPTY, SFN.makeEmpty(), "", true); - } - - /** - * Static factory method that returns a TSURL from a String representation: if - * it is null or malformed then an Invalid TSURLAttributesException is thrown. - */ - public static TSURL makeFromStringWellFormed(String surlString) - throws InvalidTSURLAttributesException { - - TSURL result = null; - if (surlString == null) { - throw new InvalidTSURLAttributesException(null, null); - } - // first occurrences of :// - int separator = surlString.indexOf("://"); - if ((separator == -1) || (separator == 0)) { - // separator not found or right at the beginning! - throw new InvalidTSURLAttributesException(null, null); - } - String spString = surlString.substring(0, separator); - SiteProtocol sp = null; - try { - sp = SiteProtocol.fromString(spString); - } catch (IllegalArgumentException e) { - // do nothing - sp remains null and that is fine! - log.warn("TSURL: Site protocol by {} is empty, but that's fine.", - spString); - } - if ((separator + 3) > (surlString.length())) { - // separator found at the end! - throw new InvalidTSURLAttributesException(sp, null); - } - - log.debug("SURL string: {}", surlString); - SURL surl; - - try { - surl = SURL.makeSURLfromString(surlString); - } catch (NamespaceException ex) { - log.error("Invalid surl: {}", surlString, ex); - throw new InvalidTSURLAttributesException(null, null); - } - - result = getWellFormed(surl, surlString); - - return result; - } - - /** - * Static factory method that returns a TSURL from a String representation: if - * it is null or malformed then an Invalid TSURLAttributesException is thrown. - */ - public static TSURL makeFromStringValidate(String surlString) - throws InvalidTSURLAttributesException { - - TSURL tsurl = makeFromStringWellFormed(surlString); - - if (!isValid(tsurl)) { - - log.warn("SURL {} is not managed by this StoRM instance.", tsurl); - throw new InvalidTSURLAttributesException(tsurl.sp, tsurl.sfn()); - - } - return tsurl; - } - - /** - * Auxiliary method that returns true if the supplied TSURL corresponds to - * some managed SURL as declared in Configuration. - * - */ - public static boolean isValid(TSURL surl) { - return isManaged(surl, TSURL.tsurlManaged); - } - - public static boolean isManaged(TSURL surl, List managedSurls) { - - boolean result = false; - for (TSURL tsurlReference : managedSurls) { - if (isSURLManaged(surl, tsurlReference)) { - result = true; - break; - } - } - return result; - } - - /** - * A SURL is managed by a managed SURL if their hosts are the same and if the - * comingSURL specifies a port this port is the same as the one specified on - * the managed SURL or, if the managed SURL doesn't specifies a port this port - * is listed in the default ports - * - * @param comingSURL - * @param managedSURL - * @return - */ - private static boolean isSURLManaged(TSURL comingSURL, TSURL managedSURL) { - - boolean result = false; - String serviceHost = comingSURL.sfn().machine().toString(); - String expectedServiceHost = managedSURL.sfn().machine().toString(); - - log.debug("SURL VALID [ coming-service-host = {}, expected = {} ]", - serviceHost, expectedServiceHost); - - if (comingSURL.sfn().port().isEmpty()) { - - if (serviceHost.equalsIgnoreCase(expectedServiceHost)) { - result = true; - } - } else { - - if (!managedSURL.sfn().port().isEmpty()) { - - int expectedServicePort = managedSURL.sfn().port().toInt(); - int port = comingSURL.sfn().port().toInt(); - - log.debug("SURL VALID [ coming-service-port = {}, expected = {} ]", - port, expectedServicePort); - - if ((serviceHost.equalsIgnoreCase(expectedServiceHost)) - && (expectedServicePort == port)) { - result = true; - } - } else { - int port = comingSURL.sfn().port().toInt(); - try { - Port comingPort = Port.make(port); - if ((serviceHost.equalsIgnoreCase(expectedServiceHost)) - && (defaultPorts.contains(comingPort))) { - result = true; - } - } catch (InvalidPortAttributeException e) { - log.error("Invalid surl: {}", comingSURL, e); - } - } - } - return result; - } - - public void encode(Map param, String name) { - - param.put(name, toString()); - } - - @Override - public int hashCode() { - - final int prime = 31; - int result = 1; - result = prime * result + (empty ? 1231 : 1237); - result = prime * result - + ((normalizedStFN() == null) ? 0 : normalizedStFN().hashCode()); - result = prime * result + ((rawSurl == null) ? 0 : rawSurl.hashCode()); - result = prime * result + ((sfn() == null) ? 0 : sfn().hashCode()); - result = prime * result - + ((protocol() == null) ? 0 : protocol().hashCode()); - result = prime * result + uniqueId(); - return result; - } - - @Override - public boolean equals(Object obj) { - - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - TSURL other = (TSURL) obj; - if (empty != other.empty) { - return false; - } - if (normalizedStFN() == null) { - if (other.normalizedStFN() != null) { - return false; - } - } else if (!normalizedStFN().equals(other.normalizedStFN())) { - return false; - } - if (rawSurl == null) { - if (other.rawSurl != null) { - return false; - } - } else if (!rawSurl.equals(other.rawSurl)) { - return false; - } - if (sfn() == null) { - if (other.sfn() != null) { - return false; - } - } else if (!sfn().equals(other.sfn())) { - return false; - } - if (protocol() == null) { - if (other.protocol() != null) { - return false; - } - } else if (!protocol().equals(other.protocol())) { - return false; - } - if (uniqueId() != other.uniqueId()) { - return false; - } - return true; - } - - /** - * Returns a string representation of the SURL. - * - * @return String - */ - public String getSURLString() { - - if (empty) { - return EMPTY_STRING; - } - - StringBuilder builder = new StringBuilder(); - builder.append(sp); - builder.append("://"); - builder.append(sfn); - - return builder.toString(); - } - - public boolean isEmpty() { - - return empty; - } - - /** - * Method that returns the SiteProtocol of this TSURL. If this is empty, then - * an empty SiteProtocol is returned. - */ - public SiteProtocol protocol() { - - if (empty) { - return SiteProtocol.EMPTY; - } - return sp; - } - - /** - * @return the rawSurl - */ - public String rawSurl() { - - return rawSurl; - } - - /** - * Method that returns the SFN of this SURL. If this is empty, then an empty - * SFN is returned. - */ - public SFN sfn() { - - if (empty) { - return SFN.makeEmpty(); - } - return sfn; - } - - /** - * @return - */ - public String normalizedStFN() { - - if (this.normalizedStFN == null) { - this.normalizedStFN = this.sfn.stfn().toString(); - } - return this.normalizedStFN; - } - - /** - * @param normalizedStFN - * the normalizedStFN to set - */ - public void setNormalizedStFN(String normalizedStFN) { - - this.normalizedStFN = normalizedStFN; - } - - /** - * @param uniqueID - * the uniqueID to set - */ - public void setUniqueID(int uniqueID) { - - this.uniqueID = uniqueID; - } - - /** - * @return - */ - public int uniqueId() { - - if (this.uniqueID == 0) { - this.uniqueID = this.sfn.stfn().hashCode(); - } - return this.uniqueID; - } - - @Override - public String toString() { - - if (empty) { - return "Empty TSURL"; - } - - StringBuilder builder = new StringBuilder(); - builder.append(sp); - builder.append("://"); - builder.append(sfn); - - return builder.toString(); - } + /** + * Static factory method that returns a TSURL and that requires the SiteProtocol and the SFN of + * this TSURL: if any is null or empty an InvalidTSURLAttributesException is thrown. Check for + * ".." in Storage File Name for security issues. + */ + private static TSURL make(SiteProtocol sp, SFN sfn, String userSurl) + throws InvalidTSURLAttributesException { + + if ((sp == null) || (sfn == null) || (sp == SiteProtocol.EMPTY) || sfn.isEmpty()) { + throw new InvalidTSURLAttributesException(sp, sfn); + } + return new TSURL(sp, sfn, userSurl, false); + } + + /** + * Static factory method that returns an empty TSURL. + */ + public static TSURL makeEmpty() { + + return new TSURL(SiteProtocol.EMPTY, SFN.makeEmpty(), "", true); + } + + /** + * Static factory method that returns a TSURL from a String representation: if it is null or + * malformed then an Invalid TSURLAttributesException is thrown. + */ + public static TSURL makeFromStringWellFormed(String surlString) + throws InvalidTSURLAttributesException { + + TSURL result = null; + if (surlString == null) { + throw new InvalidTSURLAttributesException(null, null); + } + // first occurrences of :// + int separator = surlString.indexOf("://"); + if ((separator == -1) || (separator == 0)) { + // separator not found or right at the beginning! + throw new InvalidTSURLAttributesException(null, null); + } + String spString = surlString.substring(0, separator); + SiteProtocol sp = null; + try { + sp = SiteProtocol.fromString(spString); + } catch (IllegalArgumentException e) { + // do nothing - sp remains null and that is fine! + log.warn("TSURL: Site protocol by {} is empty, but that's fine.", spString); + } + if ((separator + 3) > (surlString.length())) { + // separator found at the end! + throw new InvalidTSURLAttributesException(sp, null); + } + + log.debug("SURL string: {}", surlString); + SURL surl; + + try { + surl = SURL.makeSURLfromString(surlString); + } catch (NamespaceException ex) { + log.error("Invalid surl: {}", surlString, ex); + throw new InvalidTSURLAttributesException(null, null); + } + + result = getWellFormed(surl, surlString); + + return result; + } + + /** + * Static factory method that returns a TSURL from a String representation: if it is null or + * malformed then an Invalid TSURLAttributesException is thrown. + */ + public static TSURL makeFromStringValidate(String surlString) + throws InvalidTSURLAttributesException { + + TSURL tsurl = makeFromStringWellFormed(surlString); + + if (!isValid(tsurl)) { + + log.warn("SURL {} is not managed by this StoRM instance.", tsurl); + throw new InvalidTSURLAttributesException(tsurl.sp, tsurl.sfn()); + + } + return tsurl; + } + + /** + * Auxiliary method that returns true if the supplied TSURL corresponds to some managed SURL as + * declared in Configuration. + * + */ + public static boolean isValid(TSURL surl) { + return isManaged(surl, TSURL.managedSrmEndpoints); + } + + public static boolean isManaged(TSURL surl, List managedEndpoints) { + + boolean result = false; + for (Authority e : managedEndpoints) { + if (isSURLManaged(surl, e)) { + result = true; + break; + } + } + return result; + } + + private static boolean isSURLManaged(TSURL comingSURL, Authority endpoint) { + + String serviceHost = comingSURL.sfn().machine().toString(); + + log.debug("SURL VALID [ coming-service-host = {}, expected = {} ]", serviceHost, endpoint.getServiceHostname()); + + if (serviceHost.equalsIgnoreCase(endpoint.getServiceHostname())) { + if (comingSURL.sfn().port().isEmpty()) { + return true; + } else { + int servicePort = comingSURL.sfn().port().toInt(); + log.debug("SURL VALID [ coming-service-port = {}, expected = {} ]", servicePort, endpoint.getServicePort()); + return servicePort == endpoint.getServicePort(); + } + } else { + return false; + } + } + + public void encode(Map param, String name) { + + param.put(name, toString()); + } + + @Override + public int hashCode() { + + final int prime = 31; + int result = 1; + result = prime * result + (empty ? 1231 : 1237); + result = prime * result + ((normalizedStFN() == null) ? 0 : normalizedStFN().hashCode()); + result = prime * result + ((rawSurl == null) ? 0 : rawSurl.hashCode()); + result = prime * result + ((sfn() == null) ? 0 : sfn().hashCode()); + result = prime * result + ((protocol() == null) ? 0 : protocol().hashCode()); + result = prime * result + uniqueId(); + return result; + } + + @Override + public boolean equals(Object obj) { + + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + TSURL other = (TSURL) obj; + if (empty != other.empty) { + return false; + } + if (normalizedStFN() == null) { + if (other.normalizedStFN() != null) { + return false; + } + } else if (!normalizedStFN().equals(other.normalizedStFN())) { + return false; + } + if (rawSurl == null) { + if (other.rawSurl != null) { + return false; + } + } else if (!rawSurl.equals(other.rawSurl)) { + return false; + } + if (sfn() == null) { + if (other.sfn() != null) { + return false; + } + } else if (!sfn().equals(other.sfn())) { + return false; + } + if (protocol() == null) { + if (other.protocol() != null) { + return false; + } + } else if (!protocol().equals(other.protocol())) { + return false; + } + if (uniqueId() != other.uniqueId()) { + return false; + } + return true; + } + + /** + * Returns a string representation of the SURL. + * + * @return String + */ + public String getSURLString() { + + if (empty) { + return EMPTY_STRING; + } + + StringBuilder builder = new StringBuilder(); + builder.append(sp); + builder.append("://"); + builder.append(sfn); + + return builder.toString(); + } + + public boolean isEmpty() { + + return empty; + } + + /** + * Method that returns the SiteProtocol of this TSURL. If this is empty, then an empty + * SiteProtocol is returned. + */ + public SiteProtocol protocol() { + + if (empty) { + return SiteProtocol.EMPTY; + } + return sp; + } + + /** + * @return the rawSurl + */ + public String rawSurl() { + + return rawSurl; + } + + /** + * Method that returns the SFN of this SURL. If this is empty, then an empty SFN is returned. + */ + public SFN sfn() { + + if (empty) { + return SFN.makeEmpty(); + } + return sfn; + } + + /** + * @return + */ + public String normalizedStFN() { + + if (this.normalizedStFN == null) { + this.normalizedStFN = this.sfn.stfn().toString(); + } + return this.normalizedStFN; + } + + /** + * @param normalizedStFN the normalizedStFN to set + */ + public void setNormalizedStFN(String normalizedStFN) { + + this.normalizedStFN = normalizedStFN; + } + + /** + * @param uniqueID the uniqueID to set + */ + public void setUniqueID(int uniqueID) { + + this.uniqueID = uniqueID; + } + + /** + * @return + */ + public int uniqueId() { + + if (this.uniqueID == 0) { + this.uniqueID = this.sfn.stfn().hashCode(); + } + return this.uniqueID; + } + + @Override + public String toString() { + + if (empty) { + return "Empty TSURL"; + } + + StringBuilder builder = new StringBuilder(); + builder.append(sp); + builder.append("://"); + builder.append(sfn); + + return builder.toString(); + } } diff --git a/src/main/java/it/grid/storm/srm/types/TSURLLifetimeReturnStatus.java b/src/main/java/it/grid/storm/srm/types/TSURLLifetimeReturnStatus.java index dfd518aef..dc4b5a771 100644 --- a/src/main/java/it/grid/storm/srm/types/TSURLLifetimeReturnStatus.java +++ b/src/main/java/it/grid/storm/srm/types/TSURLLifetimeReturnStatus.java @@ -26,138 +26,133 @@ */ package it.grid.storm.srm.types; -import java.util.HashMap; import java.util.List; import java.util.Map; +import com.google.common.collect.Maps; + public class TSURLLifetimeReturnStatus { - private TSURL surl = null; - private TReturnStatus returnStatus = null; - private TLifeTimeInSeconds fileLifetime = null; - private TLifeTimeInSeconds pinLifetime = null; - - public TSURLLifetimeReturnStatus() { - - } - - public TSURLLifetimeReturnStatus(TSURL surl, TReturnStatus status, - TLifeTimeInSeconds fileLifetime, TLifeTimeInSeconds pinLifetime) - throws InvalidTSURLLifetimeReturnStatusAttributeException { - - boolean ok = (surl != null); - if (!ok) - throw new InvalidTSURLLifetimeReturnStatusAttributeException(surl); - this.surl = surl; - this.returnStatus = status; - this.fileLifetime = fileLifetime; - this.pinLifetime = pinLifetime; - } - - /** - * Returns the SURL. - * - * @return TSURL - */ - public TSURL getSurl() { - - return surl; - } - - /** - * Sets the SURL - * - * @param surl - * TSURL - */ - public void setSurl(TSURL surl) { - - this.surl = surl; - } - - /** - * Set the status. - * - * @param status - * TReturnStatus - */ - public void setStatus(TReturnStatus status) { - - this.returnStatus = status; - } - - /** - * Get the status. - * - * @return TReturnStatus - */ - public TReturnStatus getStatus() { - - return this.returnStatus; - } - - /** - * Get fileLifetime. - * - * @return TLifeTimeInSeconds - */ - public TLifeTimeInSeconds getFileLifetime() { - - return this.fileLifetime; - } - - /** - * Set fileLifetime. - * - * @param fileLifetime - */ - public void setFileLifetime(TLifeTimeInSeconds fileLifetime) { - - this.fileLifetime = fileLifetime; - } - - /** - * Get pinLifetime. - * - * @return TLifeTimeInSeconds - */ - public TLifeTimeInSeconds getpinLifetime() { - - return this.pinLifetime; - } - - /** - * Set pinLifetime. - * - * @param fileLifetime - */ - public void setpinLifetime(TLifeTimeInSeconds pinLifetime) { - - this.pinLifetime = pinLifetime; - } - - /** - * Add an element to 'outputVector'. The element is a Hashtable structure of - * this instance of TSURLLifetimeReturnStatus (used to comunicate with the - * FE). - * - * @param outputVector - * Vector - */ - public void encode(List outputVector) { - - Map surlRetStatusParam = new HashMap(); - if (this.surl != null) - this.surl.encode(surlRetStatusParam, TSURL.PNAME_SURL); - if (this.returnStatus != null) - this.returnStatus.encode(surlRetStatusParam, TReturnStatus.PNAME_STATUS); - if (this.fileLifetime != null) - this.fileLifetime.encode(surlRetStatusParam, - TLifeTimeInSeconds.PNAME_FILELIFETIME); - if (this.pinLifetime != null) - this.pinLifetime.encode(surlRetStatusParam, - TLifeTimeInSeconds.PNAME_PINLIFETIME); - - outputVector.add(surlRetStatusParam); - } + private TSURL surl = null; + private TReturnStatus returnStatus = null; + private TLifeTimeInSeconds fileLifetime = null; + private TLifeTimeInSeconds pinLifetime = null; + + public TSURLLifetimeReturnStatus() { + + } + + public TSURLLifetimeReturnStatus(TSURL surl, TReturnStatus status, + TLifeTimeInSeconds fileLifetime, TLifeTimeInSeconds pinLifetime) + throws InvalidTSURLLifetimeReturnStatusAttributeException { + + boolean ok = (surl != null); + if (!ok) + throw new InvalidTSURLLifetimeReturnStatusAttributeException(surl); + this.surl = surl; + this.returnStatus = status; + this.fileLifetime = fileLifetime; + this.pinLifetime = pinLifetime; + } + + /** + * Returns the SURL. + * + * @return TSURL + */ + public TSURL getSurl() { + + return surl; + } + + /** + * Sets the SURL + * + * @param surl TSURL + */ + public void setSurl(TSURL surl) { + + this.surl = surl; + } + + /** + * Set the status. + * + * @param status TReturnStatus + */ + public void setStatus(TReturnStatus status) { + + this.returnStatus = status; + } + + /** + * Get the status. + * + * @return TReturnStatus + */ + public TReturnStatus getStatus() { + + return this.returnStatus; + } + + /** + * Get fileLifetime. + * + * @return TLifeTimeInSeconds + */ + public TLifeTimeInSeconds getFileLifetime() { + + return this.fileLifetime; + } + + /** + * Set fileLifetime. + * + * @param fileLifetime + */ + public void setFileLifetime(TLifeTimeInSeconds fileLifetime) { + + this.fileLifetime = fileLifetime; + } + + /** + * Get pinLifetime. + * + * @return TLifeTimeInSeconds + */ + public TLifeTimeInSeconds getpinLifetime() { + + return this.pinLifetime; + } + + /** + * Set pinLifetime. + * + * @param fileLifetime + */ + public void setpinLifetime(TLifeTimeInSeconds pinLifetime) { + + this.pinLifetime = pinLifetime; + } + + /** + * Add an element to 'outputVector'. The element is a HashMap structure of this instance of + * TSURLLifetimeReturnStatus (used to communicate with the FE). + * + * @param outputVector Vector + */ + public void encode(List> outputVector) { + + Map surlRetStatusParam = Maps.newHashMap(); + if (this.surl != null) + this.surl.encode(surlRetStatusParam, TSURL.PNAME_SURL); + if (this.returnStatus != null) + this.returnStatus.encode(surlRetStatusParam, TReturnStatus.PNAME_STATUS); + if (this.fileLifetime != null) + this.fileLifetime.encode(surlRetStatusParam, TLifeTimeInSeconds.PNAME_FILELIFETIME); + if (this.pinLifetime != null) + this.pinLifetime.encode(surlRetStatusParam, TLifeTimeInSeconds.PNAME_PINLIFETIME); + + outputVector.add(surlRetStatusParam); + } } diff --git a/src/main/java/it/grid/storm/srm/types/TSURLReturnStatus.java b/src/main/java/it/grid/storm/srm/types/TSURLReturnStatus.java index df5ca2631..019604915 100644 --- a/src/main/java/it/grid/storm/srm/types/TSURLReturnStatus.java +++ b/src/main/java/it/grid/storm/srm/types/TSURLReturnStatus.java @@ -26,140 +26,140 @@ */ package it.grid.storm.srm.types; -import java.util.HashMap; import java.util.List; import java.util.Map; +import com.google.common.collect.Maps; + public class TSURLReturnStatus { - private TSURL surl = null; - private TReturnStatus returnStatus = null; - - public TSURLReturnStatus() { - - } - - public TSURLReturnStatus(TSURL surl, TReturnStatus status) { - - if (surl == null) - throw new IllegalArgumentException("SURL is null"); - this.surl = surl; - this.returnStatus = status; - } - - /** - * Method that return SURL specified in SRM request. - */ - - public TSURL getSurl() { - - return surl; - } - - public void setSurl(TSURL surl) { - - this.surl = surl; - } - - /** - * Set Status - */ - public void setStatus(TReturnStatus status) { - - this.returnStatus = status; - } - - /** - * Get Status - */ - public TReturnStatus getStatus() { - - return this.returnStatus; - } - - /* - * Encode function used to fill output structure for FE communication. - */ - public void encode(List outputVector) { - - // Creation of a single TMetaPathDetail struct - Map surlRetStatusParam = new HashMap(); - // Member name "surl" - if (this.surl != null) - this.surl.encode(surlRetStatusParam, TSURL.PNAME_SURL); - if (this.returnStatus != null) - this.returnStatus.encode(surlRetStatusParam, TReturnStatus.PNAME_STATUS); - - outputVector.add(surlRetStatusParam); - - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder.append("TSURLReturnStatus [surl="); - builder.append(surl); - builder.append(", returnStatus="); - builder.append(returnStatus); - builder.append("]"); - return builder.toString(); - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#hashCode() - */ - @Override - public int hashCode() { - - final int prime = 31; - int result = 1; - result = prime * result - + ((returnStatus == null) ? 0 : returnStatus.hashCode()); - result = prime * result + ((surl == null) ? 0 : surl.hashCode()); - return result; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#equals(java.lang.Object) - */ - @Override - public boolean equals(Object obj) { - - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - TSURLReturnStatus other = (TSURLReturnStatus) obj; - if (returnStatus == null) { - if (other.returnStatus != null) { - return false; - } - } else if (!returnStatus.equals(other.returnStatus)) { - return false; - } - if (surl == null) { - if (other.surl != null) { - return false; - } - } else if (!surl.equals(other.surl)) { - return false; - } - return true; - } + private TSURL surl = null; + private TReturnStatus returnStatus = null; + + public TSURLReturnStatus() { + + } + + public TSURLReturnStatus(TSURL surl, TReturnStatus status) { + + if (surl == null) + throw new IllegalArgumentException("SURL is null"); + this.surl = surl; + this.returnStatus = status; + } + + /** + * Method that return SURL specified in SRM request. + */ + + public TSURL getSurl() { + + return surl; + } + + public void setSurl(TSURL surl) { + + this.surl = surl; + } + + /** + * Set Status + */ + public void setStatus(TReturnStatus status) { + + this.returnStatus = status; + } + + /** + * Get Status + */ + public TReturnStatus getStatus() { + + return this.returnStatus; + } + + /* + * Encode function used to fill output structure for FE communication. + */ + public void encode(List> outputVector) { + + // Creation of a single TMetaPathDetail structure + Map surlRetStatusParam = Maps.newHashMap(); + // Member name "surl" + if (this.surl != null) + this.surl.encode(surlRetStatusParam, TSURL.PNAME_SURL); + if (this.returnStatus != null) + this.returnStatus.encode(surlRetStatusParam, TReturnStatus.PNAME_STATUS); + + outputVector.add(surlRetStatusParam); + + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + + StringBuilder builder = new StringBuilder(); + builder.append("TSURLReturnStatus [surl="); + builder.append(surl); + builder.append(", returnStatus="); + builder.append(returnStatus); + builder.append("]"); + return builder.toString(); + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#hashCode() + */ + @Override + public int hashCode() { + + final int prime = 31; + int result = 1; + result = prime * result + ((returnStatus == null) ? 0 : returnStatus.hashCode()); + result = prime * result + ((surl == null) ? 0 : surl.hashCode()); + return result; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#equals(java.lang.Object) + */ + @Override + public boolean equals(Object obj) { + + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + TSURLReturnStatus other = (TSURLReturnStatus) obj; + if (returnStatus == null) { + if (other.returnStatus != null) { + return false; + } + } else if (!returnStatus.equals(other.returnStatus)) { + return false; + } + if (surl == null) { + if (other.surl != null) { + return false; + } + } else if (!surl.equals(other.surl)) { + return false; + } + return true; + } } diff --git a/src/main/java/it/grid/storm/srm/types/TSpaceToken.java b/src/main/java/it/grid/storm/srm/types/TSpaceToken.java index b2b89c715..205544a3e 100644 --- a/src/main/java/it/grid/storm/srm/types/TSpaceToken.java +++ b/src/main/java/it/grid/storm/srm/types/TSpaceToken.java @@ -135,13 +135,13 @@ public final static TSpaceToken decode(Map param, String name) { * * @param vector */ - public void encode(List list) { + public void encode(List list) { list.add(this.toString()); } - public void encode(Map outputParam, String fieldName) { + public void encode(Map outputParam, String fieldName) { - outputParam.put(fieldName, (String) token); + outputParam.put(fieldName, token); } } diff --git a/src/main/java/it/grid/storm/srm/types/TUserPermission.java b/src/main/java/it/grid/storm/srm/types/TUserPermission.java index a209e9324..1b4076bdd 100644 --- a/src/main/java/it/grid/storm/srm/types/TUserPermission.java +++ b/src/main/java/it/grid/storm/srm/types/TUserPermission.java @@ -17,9 +17,10 @@ package it.grid.storm.srm.types; -import java.util.HashMap; import java.util.Map; +import com.google.common.collect.Maps; + /** * This class represents the TUserPermission in Srm request. * @@ -31,56 +32,56 @@ public class TUserPermission { - private TUserID userID; - private TPermissionMode permissionMode; + private TUserID userID; + private TPermissionMode permissionMode; - public static String PNAME_OWNERPERMISSION = "ownerPermission"; + public static String PNAME_OWNERPERMISSION = "ownerPermission"; - public TUserPermission(TUserID userID, TPermissionMode permMode) { + public TUserPermission(TUserID userID, TPermissionMode permMode) { - this.userID = userID; - this.permissionMode = permMode; - } + this.userID = userID; + this.permissionMode = permMode; + } - public static TUserPermission makeEmpty() { + public static TUserPermission makeEmpty() { - return new TUserPermission(TUserID.makeEmpty(), TPermissionMode.NONE); - } + return new TUserPermission(TUserID.makeEmpty(), TPermissionMode.NONE); + } - public TUserID getUserID() { + public TUserID getUserID() { - return userID; - } + return userID; + } - public TPermissionMode getPermissionMode() { + public TPermissionMode getPermissionMode() { - return permissionMode; - } + return permissionMode; + } - public static TUserPermission makeDirectoryDefault() { + public static TUserPermission makeDirectoryDefault() { - return new TUserPermission(TUserID.makeEmpty(), TPermissionMode.X); - } + return new TUserPermission(TUserID.makeEmpty(), TPermissionMode.X); + } - public static TUserPermission makeFileDefault() { + public static TUserPermission makeFileDefault() { - return new TUserPermission(TUserID.makeEmpty(), TPermissionMode.R); - } + return new TUserPermission(TUserID.makeEmpty(), TPermissionMode.R); + } - /** - * Encode method use to provide a represnetation of this object into a - * structures paramter for communication to FE component. - * - * @param param - * @param name - */ - public void encode(Map param, String name) { + /** + * Encode method use to provide a represnetation of this object into a structures paramter for + * communication to FE component. + * + * @param param + * @param name + */ + public void encode(Map param, String name) { - Map paramStructure = new HashMap(); - if ((userID != null) && (permissionMode != null)) { - userID.encode(paramStructure, TUserID.PNAME_USERID); - permissionMode.encode(paramStructure, TPermissionMode.PNAME_MODE); - param.put(name, paramStructure); - } - } + Map paramStructure = Maps.newHashMap(); + if ((userID != null) && (permissionMode != null)) { + userID.encode(paramStructure, TUserID.PNAME_USERID); + permissionMode.encode(paramStructure, TPermissionMode.PNAME_MODE); + param.put(name, paramStructure); + } + } } diff --git a/src/main/java/it/grid/storm/synchcall/FileSystemUtility.java b/src/main/java/it/grid/storm/synchcall/FileSystemUtility.java index 5e3e9de87..f0b395986 100644 --- a/src/main/java/it/grid/storm/synchcall/FileSystemUtility.java +++ b/src/main/java/it/grid/storm/synchcall/FileSystemUtility.java @@ -21,50 +21,30 @@ import static it.grid.storm.metrics.StormMetricRegistry.METRIC_REGISTRY; import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import it.grid.storm.filesystem.Filesystem; import it.grid.storm.filesystem.FilesystemIF; import it.grid.storm.filesystem.LocalFile; import it.grid.storm.filesystem.MetricsFilesystemAdapter; import it.grid.storm.filesystem.swig.genericfs; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.VirtualFSInterface; +import it.grid.storm.namespace.model.VirtualFS; -/** - *

- * Title: - *

- * - *

- * Description: - *

- * - *

- * Copyright: Copyright (c) 2006 - *

- * - *

- * Company: INFN-CNAF and ICTP/eGrid project - *

- * - * @author Riccardo Zappi - * @version 1.0 - */ public class FileSystemUtility { - private static Logger log = NamespaceDirector.getLogger(); + private static Logger log = LoggerFactory.getLogger(FileSystemUtility.class); public static LocalFile getLocalFileByAbsolutePath(String absolutePath) throws NamespaceException { LocalFile file = null; - VirtualFSInterface vfs = null; + VirtualFS vfs = null; genericfs fsDriver = null; FilesystemIF fs = null; try { - vfs = NamespaceDirector.getNamespace().resolveVFSbyAbsolutePath( - absolutePath); + vfs = Namespace.getInstance().resolveVFSbyAbsolutePath(absolutePath); } catch (NamespaceException ex) { log.error("Unable to retrieve VFS by Absolute Path", ex); } diff --git a/src/main/java/it/grid/storm/synchcall/command/datatransfer/CommandException.java b/src/main/java/it/grid/storm/synchcall/command/datatransfer/CommandException.java index 8b234920d..b1823bdf3 100644 --- a/src/main/java/it/grid/storm/synchcall/command/datatransfer/CommandException.java +++ b/src/main/java/it/grid/storm/synchcall/command/datatransfer/CommandException.java @@ -36,15 +36,4 @@ public CommandException(String message) { super(message); } - - public CommandException(Throwable cause) { - - super(cause); - } - - public CommandException(String message, Throwable cause) { - - super(message, cause); - } - } diff --git a/src/main/java/it/grid/storm/synchcall/command/datatransfer/ExtendFileLifeTimeCommand.java b/src/main/java/it/grid/storm/synchcall/command/datatransfer/ExtendFileLifeTimeCommand.java index 2a94524bd..8b534b4f7 100644 --- a/src/main/java/it/grid/storm/synchcall/command/datatransfer/ExtendFileLifeTimeCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/datatransfer/ExtendFileLifeTimeCommand.java @@ -17,6 +17,15 @@ package it.grid.storm.synchcall.command.datatransfer; +import java.util.Calendar; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.authz.AuthzException; import it.grid.storm.catalogs.VolatileAndJiTCatalog; import it.grid.storm.catalogs.surl.SURLStatusManager; @@ -24,9 +33,8 @@ import it.grid.storm.filesystem.LocalFile; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.namespace.InvalidSURLException; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.NamespaceInterface; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.UnapprochableSurlException; import it.grid.storm.srm.types.ArrayOfSURLs; @@ -50,585 +58,518 @@ import it.grid.storm.synchcall.surl.ExpiredTokenException; import it.grid.storm.synchcall.surl.UnknownTokenException; -import java.util.Calendar; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class is part of the StoRM project. Copyright (c) 2008 INFN-CNAF. - *

- * Authors: - * - * @author=lucamag luca.magnoniATcnaf.infn.it - * @author Alberto Forti - * @date = Oct 10, 2008 - */ -public class ExtendFileLifeTimeCommand extends DataTransferCommand implements - Command { - - private static final Logger log = LoggerFactory - .getLogger(ExtendFileLifeTimeCommand.class); - private static final String SRM_COMMAND = "srmExtendFileLifeTime"; - - public ExtendFileLifeTimeCommand() { - - }; - - /** - * Executes an srmExtendFileLifeTime(). - * - * @param inputData - * ExtendFileLifeTimeInputData - * @return ExtendFileLifeTimeOutputData - */ - - public OutputData execute(InputData data) { - - final String funcName = "ExtendFileLifeTime: "; - ExtendFileLifeTimeOutputData outputData = new ExtendFileLifeTimeOutputData(); - IdentityExtendFileLifeTimeInputData inputData; - if (data instanceof IdentityInputData) { - inputData = (IdentityExtendFileLifeTimeInputData) data; - } else { - outputData.setReturnStatus(CommandHelper.buildStatus( - TStatusCode.SRM_NOT_SUPPORTED, "Anonymous user can not perform" - + SRM_COMMAND)); - outputData.setArrayOfFileStatuses(null); - printRequestOutcome(outputData.getReturnStatus(), - (ExtendFileLifeTimeInputData) data); - return outputData; - } - - TReturnStatus globalStatus = null; - - ExtendFileLifeTimeCommand.log.debug(funcName + "Started."); - - /****************************** Check for malformed request ******************************/ - if (inputData.getArrayOfSURLs() == null) { - globalStatus = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, - "Missing mandatory parameter 'arrayOfSURLs'"); - } else if (inputData.getArrayOfSURLs().size() < 1) { - globalStatus = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, - "Parameter 'arrayOfSURLs': invalid size"); - } else if (!(inputData.getNewPinLifetime().isEmpty()) - && !(inputData.getNewFileLifetime().isEmpty()) - && (inputData.getRequestToken() != null)) { - globalStatus = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, - "Cannot update both FileLifetime and PinLifetime"); - } else if (inputData.getNewPinLifetime().isEmpty() - && !(inputData.getNewFileLifetime().isEmpty()) - && (inputData.getRequestToken() != null)) { - globalStatus = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, - "Do not specify the request token to update the FileLifetime"); - } else if (!(inputData.getNewPinLifetime().isEmpty()) - && !(inputData.getNewFileLifetime().isEmpty()) - && (inputData.getRequestToken() == null)) { - globalStatus = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, - "Attempt to extend PinLifetime without request token"); - } else if (!(inputData.getNewPinLifetime().isEmpty()) - && inputData.getNewFileLifetime().isEmpty() - && (inputData.getRequestToken() == null)) { - globalStatus = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, - "Attempt to extend PinLifetime without request token"); - } - - if (globalStatus != null) { - ExtendFileLifeTimeCommand.log.debug(funcName - + globalStatus.getExplanation()); - outputData.setReturnStatus(globalStatus); - outputData.setArrayOfFileStatuses(null); - printRequestOutcome(outputData.getReturnStatus(), inputData); - return outputData; - } - - /********************** Check user authentication and authorization ******************************/ - GridUserInterface user = inputData.getUser(); - if (user == null) { - ExtendFileLifeTimeCommand.log.debug(funcName + "The user field is NULL"); - outputData.setReturnStatus(CommandHelper.buildStatus( - TStatusCode.SRM_AUTHENTICATION_FAILURE, - "Unable to get user credential!")); - printRequestOutcome(outputData.getReturnStatus(), inputData); - outputData.setArrayOfFileStatuses(null); - return outputData; - } - - /********************************** Start to manage the request ***********************************/ - ArrayOfTSURLLifetimeReturnStatus arrayOfFileStatus = new ArrayOfTSURLLifetimeReturnStatus(); - - if ((inputData.getRequestToken() == null) - && (inputData.getNewPinLifetime().isEmpty())) { - log.debug(funcName + "Extending SURL lifetime..."); - globalStatus = manageExtendSURLLifetime(inputData.getNewFileLifetime(), - inputData.getArrayOfSURLs(), user, arrayOfFileStatus, - inputData.getRequestToken()); - } else { - log.debug(funcName + "Extending PIN lifetime..."); - try { - globalStatus = manageExtendPinLifetime(inputData.getRequestToken(), - inputData.getNewPinLifetime(), inputData.getArrayOfSURLs(), user, - arrayOfFileStatus); - } catch (IllegalArgumentException e) { - log.error(funcName + "Unexpected IllegalArgumentException: " - + e.getMessage()); - globalStatus = CommandHelper.buildStatus( - TStatusCode.SRM_INTERNAL_ERROR, "Request Failed, retry."); - outputData.setReturnStatus(globalStatus); - outputData.setArrayOfFileStatuses(null); - printRequestOutcome(outputData.getReturnStatus(), inputData); - return outputData; - } - } - - outputData.setReturnStatus(globalStatus); - outputData.setArrayOfFileStatuses(arrayOfFileStatus); - printRequestOutcome(outputData.getReturnStatus(), inputData); - log.debug(funcName + "Finished."); - - return outputData; - } - - /** - * Extend the lifetime of a SURL. The parameter details is filled by this - * method and contains file level information on the execution of the request. - * - * @param newLifetime - * TLifeTimeInSeconds. - * @param arrayOfSURLS - * ArrayOfSURLs. - * @param guser - * VomsGridUser. - * @param arrayOfFileLifetimeStatus - * . ArrayOfTSURLLifetimeReturnStatus The returned file level - * information. - * @return TReturnStatus. The request status. - */ - private TReturnStatus manageExtendSURLLifetime( - TLifeTimeInSeconds newLifetime, ArrayOfSURLs arrayOfSURLS, - GridUserInterface guser, ArrayOfTSURLLifetimeReturnStatus details, - TRequestToken requestToken) { - - if (details == null) { - ExtendFileLifeTimeCommand.log - .debug("Function manageExtendSURLLifetime, class ExtendFileLifeTimeExecutor: parameter details is NULL"); - } - NamespaceInterface namespace = NamespaceDirector.getNamespace(); - VolatileAndJiTCatalog catalog = VolatileAndJiTCatalog.getInstance(); - boolean requestSuccess = true; - boolean requestFailure = true; - - // For each requested SURL, try to extend its lifetime. - for (int i = 0; i < arrayOfSURLS.size(); i++) { - TSURL surl = arrayOfSURLS.getTSURL(i); - StoRI stori = null; - TStatusCode fileStatusCode; - String fileStatusExplanation; - try { - try { - stori = namespace.resolveStoRIbySURL(surl, guser); - } catch (IllegalArgumentException e) { - ExtendFileLifeTimeCommand.log.error( - "Unable to build StoRI by SURL and user", e); - fileStatusCode = TStatusCode.SRM_INTERNAL_ERROR; - fileStatusExplanation = "Unable to build StoRI by SURL and user"; - } catch (UnapprochableSurlException e) { - log.info("Unable to build a stori for surl " + surl + " for user " - + guser + " UnapprochableSurlException: " + e.getMessage()); - fileStatusCode = TStatusCode.SRM_AUTHORIZATION_FAILURE; - fileStatusExplanation = e.getMessage(); - } catch (NamespaceException e) { - log.info("Unable to build a stori for surl " + surl + " for user " - + guser + " NamespaceException: " + e.getMessage()); - fileStatusCode = TStatusCode.SRM_INTERNAL_ERROR; - fileStatusExplanation = e.getMessage(); - } catch (InvalidSURLException e) { - log.info("Unable to build a stori for surl " + surl + " for user " - + guser + " InvalidSURLException: " + e.getMessage()); - fileStatusCode = TStatusCode.SRM_INVALID_PATH; - fileStatusExplanation = e.getMessage(); - } - if (stori != null) { - LocalFile localFile = stori.getLocalFile(); - if (localFile.exists()) { - ExtendFileLifeTimeCommand.log.debug(stori.getPFN().toString()); - List volatileInfo = catalog.volatileInfoOn(stori.getPFN()); - if (volatileInfo.isEmpty()) { - fileStatusCode = TStatusCode.SRM_SUCCESS; - fileStatusExplanation = "Nothing to do, SURL is permanent"; - newLifetime = TLifeTimeInSeconds.makeInfinite(); - requestFailure = false; - } else if (volatileInfo.size() > 2) { - fileStatusCode = TStatusCode.SRM_INTERNAL_ERROR; - fileStatusExplanation = "Found more than one entry.... that's a BUG."; - // For lifetimes infinite means also unknown - newLifetime = TLifeTimeInSeconds.makeInfinite(); - requestSuccess = false; - } else if (isStoRISURLBusy(stori)) { - fileStatusCode = TStatusCode.SRM_FILE_BUSY; - fileStatusExplanation = "File status is SRM_SPACE_AVAILABLE. SURL lifetime cannot be extend (try with PIN lifetime)"; - // For lifetimes infinite means also unknown - newLifetime = TLifeTimeInSeconds.makeInfinite(); - requestSuccess = false; - } else { // Ok, extend the lifetime of the SURL - // Update the DB with the new lifetime - catalog.trackVolatile(stori.getPFN(), - (Calendar) volatileInfo.get(0), newLifetime); - // TODO: return the correct lifetime, i.e. the one which is - // written to the DB. - // TLifeTimeInSeconds writtenLifetime = (TLifeTimeInSeconds) - // volatileInfo.get(1); - - fileStatusCode = TStatusCode.SRM_SUCCESS; - fileStatusExplanation = "Lifetime extended"; - requestFailure = false; - } - } else { // Requested SURL does not exists in the filesystem - fileStatusCode = TStatusCode.SRM_INVALID_PATH; - fileStatusExplanation = "File does not exist"; - requestSuccess = false; - } - - // Set the file level information to be returned. - TReturnStatus fileStatus = new TReturnStatus(fileStatusCode, - fileStatusExplanation); - if (fileStatus.getStatusCode().equals(TStatusCode.SRM_SUCCESS)) { - ExtendFileLifeTimeCommand.log.info("srmExtendFileLifeTime: <" - + guser + "> Request for [token:" + requestToken + "] for [SURL:" - + surl + "] with [lifetime:" + newLifetime - + " ] successfully done with: [status:" + fileStatus + "]"); - } else { - ExtendFileLifeTimeCommand.log.error("srmExtendFileLifeTime: <" - + guser + "> Request for [token:" + requestToken + "] for [SURL:" - + surl + "] with [lifetime:" + newLifetime - + "] failed with: [status:" + fileStatus + "]"); - } - TSURLLifetimeReturnStatus lifetimeReturnStatus = new TSURLLifetimeReturnStatus( - surl, fileStatus, newLifetime, null); - details.addTSurlReturnStatus(lifetimeReturnStatus); - } - } catch (InvalidTSURLLifetimeReturnStatusAttributeException e3) { - ExtendFileLifeTimeCommand.log - .debug("Thrown InvalidTSURLLifetimeReturnStatusAttributeException"); - } - } - TReturnStatus globalStatus = null; - // Set global status - if (requestFailure) { - globalStatus = new TReturnStatus(TStatusCode.SRM_FAILURE, - "All file requests are failed"); - } else if (requestSuccess) { - globalStatus = new TReturnStatus(TStatusCode.SRM_SUCCESS, - "All file requests are successfully completed"); - } else { - globalStatus = new TReturnStatus(TStatusCode.SRM_PARTIAL_SUCCESS, - "Details are on the file statuses"); - } - return globalStatus; - } - - /** - * Returns true if the status of the SURL of the received StoRI is - * SRM_SPACE_AVAILABLE, false otherwise. This method queries the DB, therefore - * pay attention to possible performance issues. - * - * @return boolean - */ - private boolean isStoRISURLBusy(StoRI element) { - - SURLStatusManager checker = SURLStatusManagerFactory - .newSURLStatusManager(); - - return checker.isSURLBusy(element.getSURL()); - } - - /** - * Extend the PIN lifetime of a SURL. The parameter details is filled by this - * method and contains file level information on the execution of the request. - * - * @param requestToken - * TRequestToken. - * @param newPINLifetime - * TLifeTimeInSeconds. - * @param arrayOfSURLS - * ArrayOfSURLs. - * @param guser - * VomsGridUser. - * @param details - * ArrayOfTSURLLifetimeReturnStatus. - * @return TReturnStatus. The request status. - * @throws UnknownTokenException - * @throws IllegalArgumentException - */ - private TReturnStatus manageExtendPinLifetime(TRequestToken requestToken, - TLifeTimeInSeconds newPINLifetime, ArrayOfSURLs arrayOfSURLS, - GridUserInterface guser, ArrayOfTSURLLifetimeReturnStatus details) - throws IllegalArgumentException { - - if (details == null) { - ExtendFileLifeTimeCommand.log - .debug("Function manageExtendSURLLifetime, class ExtendFileLifeTimeExecutor: parameter details is NULL"); - } - TReturnStatus globalStatus = null; - List requestSURLsList; - try { - requestSURLsList = getListOfSURLsInTheRequest(guser, requestToken); - } catch (UnknownTokenException e4) { - return CommandHelper.buildStatus(TStatusCode.SRM_INVALID_REQUEST, - "Invalid request token"); - } catch (ExpiredTokenException e) { - return CommandHelper.buildStatus(TStatusCode.SRM_REQUEST_TIMED_OUT, - "Request expired"); - } catch (AuthzException e) { - return CommandHelper.buildStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, - e.getMessage()); - } - if (requestSURLsList.isEmpty()) { - return CommandHelper.buildStatus(TStatusCode.SRM_INVALID_REQUEST, - "Invalid request token"); - } - // Once we have the list of SURLs belonging to the request, we must check - // that the SURLs given by the user are consistent, that the resulting - // lifetime could be lower than the one requested (and for this we must read - // the Volatile table of the DB), that the SURLs are not released, aborted, - // expired or suspended and so on... therefore the purpose of all that stuff - // is to return the right information. I mean, no PIN lifetime is - // effectively extend, in StoRM the TURL corresponds to the SURL. - boolean requestSuccess = true; - boolean requestFailure = true; - TLifeTimeInSeconds PINLifetime; - TLifeTimeInSeconds dbLifetime = null; - for (int i = 0; i < arrayOfSURLS.size(); i++) { - TSURL surl = arrayOfSURLS.getTSURL(i); - TStatusCode statusOfTheSURL = null; - TStatusCode fileStatusCode; - String fileStatusExplanation; - boolean surlFound = false; - // Check if the current SURL belongs to the request token - for (int j = 0; j < requestSURLsList.size(); j++) { - SURLData surlData = (SURLData) requestSURLsList.get(j); - if (surl.equals(surlData.surl)) { - statusOfTheSURL = surlData.statusCode; - requestSURLsList.remove(j); - surlFound = true; - break; - } - } - try { - if (surlFound) { - ExtendFileLifeTimeCommand.log.debug("Found SURL: " - + surl.getSURLString() + " (status: " + statusOfTheSURL.toString() - + ")"); - NamespaceInterface namespace = NamespaceDirector.getNamespace(); - StoRI stori = null; - try { - stori = namespace.resolveStoRIbySURL(surl, guser); - } catch (IllegalArgumentException e) { - log.error("Unable to build StoRI by SURL and user", e); - } catch (Exception e) { - log.info(String.format( - "Unable to build a stori for surl %s for user %s, %s: %s", surl, - guser, e.getClass().getCanonicalName(), e.getMessage())); - } - if (stori != null) { - LocalFile localFile = stori.getLocalFile(); - if (localFile.exists()) { - VolatileAndJiTCatalog catalog = VolatileAndJiTCatalog - .getInstance(); - List volatileInfo = catalog.volatileInfoOn(stori.getPFN()); - - if ((statusOfTheSURL != TStatusCode.SRM_FILE_PINNED) - && (statusOfTheSURL != TStatusCode.SRM_SPACE_AVAILABLE) - && (statusOfTheSURL != TStatusCode.SRM_SUCCESS)) - { - fileStatusCode = TStatusCode.SRM_INVALID_REQUEST; - fileStatusExplanation = "No TURL available"; - PINLifetime = null; - requestSuccess = false; - } else if (volatileInfo.size() > 2) { - fileStatusCode = TStatusCode.SRM_INTERNAL_ERROR; - fileStatusExplanation = "Found more than one entry.... that's a BUG."; - // For lifetimes infinite means also unknown - PINLifetime = TLifeTimeInSeconds.makeInfinite(); - requestSuccess = false; - } else { // OK, extend the PIN lifetime. - // If the status is success the extension will not take place, - // only in case of empty parameter the current value are - // returned, otherwaise the request must - // fail! - - if ((statusOfTheSURL == TStatusCode.SRM_SUCCESS) - && (!newPINLifetime.isEmpty())) { - - fileStatusCode = TStatusCode.SRM_INVALID_REQUEST; - fileStatusExplanation = "No TURL available"; - PINLifetime = null; - requestSuccess = false; - - } else { - - fileStatusCode = TStatusCode.SRM_SUCCESS; - - if (volatileInfo.isEmpty()) { // SURL is permanent - dbLifetime = TLifeTimeInSeconds.makeInfinite(); - } else { - dbLifetime = (TLifeTimeInSeconds) volatileInfo.get(1); - } - if ((!dbLifetime.isInfinite()) - && (newPINLifetime.value() > dbLifetime.value())) { - PINLifetime = dbLifetime; - fileStatusExplanation = "The requested PIN lifetime is greater than the lifetime of the SURL." - + " PIN lifetime is now equal to the lifetime of the SURL."; - } else { - PINLifetime = newPINLifetime; - fileStatusExplanation = "Lifetime extended"; - } - ExtendFileLifeTimeCommand.log.debug("New PIN lifetime is: " - + PINLifetime.value() + "(SURL: " + surl.getSURLString() - + ")"); - // TODO: update the RequestSummaryCatalog with the new - // pinLifetime - // it is better to do it only once after the for loop - requestFailure = false; - } - } - } else { // file does not exist in the file system - fileStatusCode = TStatusCode.SRM_INVALID_PATH; - fileStatusExplanation = "Invalid path"; - PINLifetime = null; - requestSuccess = false; - - } - } else { - log.error("Unable to build StoRI by SURL and user"); - fileStatusCode = TStatusCode.SRM_INTERNAL_ERROR; - fileStatusExplanation = "Unable to build StoRI by SURL and user"; - // For lifetimes infinite means also unknown - PINLifetime = null; - requestSuccess = false; - } - } else { // SURL not found in the DB - ExtendFileLifeTimeCommand.log.debug("SURL: " + surl.getSURLString() - + " NOT FOUND!"); - fileStatusCode = TStatusCode.SRM_INVALID_PATH; - fileStatusExplanation = "SURL not found in the request"; - PINLifetime = null; - requestSuccess = false; - } - // Set the file level information to be returned. - TReturnStatus fileStatus = new TReturnStatus(fileStatusCode, - fileStatusExplanation); - if (fileStatus.getStatusCode().equals(TStatusCode.SRM_SUCCESS)) { - ExtendFileLifeTimeCommand.log.info("srmExtendFileLifeTime: <" + guser - + "> Request for [token:" + requestToken + "] for [SURL:" + surl - + "] with [pinlifetime: " + newPINLifetime - + "] successfully done with: [status:" + fileStatus.toString() - + "]"); - } else { - ExtendFileLifeTimeCommand.log.error("srmExtendFileLifeTime: <" - + guser + "> Request for [token:" + requestToken + "] for [SURL:" - + surl + "] with [pinlifetime: " + newPINLifetime - + "] failed with: [status:" + fileStatus.toString() + "]"); - } - - TSURLLifetimeReturnStatus lifetimeReturnStatus = new TSURLLifetimeReturnStatus( - surl, fileStatus, dbLifetime, PINLifetime); - details.addTSurlReturnStatus(lifetimeReturnStatus); - } catch (InvalidTSURLLifetimeReturnStatusAttributeException e3) { - ExtendFileLifeTimeCommand.log - .debug("Thrown InvalidTSURLLifetimeReturnStatusAttributeException"); - } - } - - // Set global status - if (requestFailure) { - globalStatus = new TReturnStatus(TStatusCode.SRM_FAILURE, - "All file requests are failed"); - } else if (requestSuccess) { - globalStatus = new TReturnStatus(TStatusCode.SRM_SUCCESS, - "All file requests are successfully completed"); - } else { - globalStatus = new TReturnStatus(TStatusCode.SRM_PARTIAL_SUCCESS, - "Details are on the file statuses"); - } - return globalStatus; - } - - /** - * Returns the list of SURLs and statuses (a List of SURLData) belonging to - * the request identified by the requestToken. - * - * @param requestToken - * TRequestToken - * @return List - * @throws UnknownTokenException - * @throws IllegalArgumentException - * @throws ExpiredTokenException - */ - private List getListOfSURLsInTheRequest(GridUserInterface user, - TRequestToken requestToken) - throws IllegalArgumentException, UnknownTokenException, - ExpiredTokenException { - - List listOfSURLsInfo = new LinkedList(); - - SURLStatusManager checker = SURLStatusManagerFactory - .newSURLStatusManager(); - - Map surlStatusMap = - checker.getSURLStatuses(user, requestToken); - - if (!(surlStatusMap == null || surlStatusMap.isEmpty())) { - for (Entry surlStatus : surlStatusMap.entrySet()) { - listOfSURLsInfo.add(new SURLData(surlStatus.getKey(), surlStatus - .getValue().getStatusCode())); - } - } - return listOfSURLsInfo; - } - - private void printRequestOutcome(TReturnStatus status, - ExtendFileLifeTimeInputData inputData) { - - if (inputData != null) { - if (inputData.getArrayOfSURLs() != null) { - if (inputData.getRequestToken() != null) { - CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, - inputData, inputData.getRequestToken(), inputData.getArrayOfSURLs() - .asStringList()); - } else { - CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, - inputData, inputData.getArrayOfSURLs().asStringList()); - } - - } else { - if (inputData.getRequestToken() != null) { - CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, - inputData, inputData.getRequestToken()); - } else { - CommandHelper - .printRequestOutcome(SRM_COMMAND, log, status, inputData); - } - } - - } else { - CommandHelper.printRequestOutcome(SRM_COMMAND, log, status); - } - } - - private class SURLData { - - public TSURL surl; - public TStatusCode statusCode; - - public SURLData(TSURL surl, TStatusCode statusCode) { - - this.surl = surl; - this.statusCode = statusCode; - } - } +public class ExtendFileLifeTimeCommand extends DataTransferCommand implements Command { + + private static final Logger log = LoggerFactory.getLogger(ExtendFileLifeTimeCommand.class); + private static final String SRM_COMMAND = "srmExtendFileLifeTime"; + + public ExtendFileLifeTimeCommand() { + + }; + + /** + * Executes an srmExtendFileLifeTime(). + * + * @param inputData ExtendFileLifeTimeInputData + * @return ExtendFileLifeTimeOutputData + */ + + public OutputData execute(InputData data) { + + final String funcName = "ExtendFileLifeTime: "; + ExtendFileLifeTimeOutputData outputData = new ExtendFileLifeTimeOutputData(); + IdentityExtendFileLifeTimeInputData inputData; + if (data instanceof IdentityInputData) { + inputData = (IdentityExtendFileLifeTimeInputData) data; + } else { + outputData.setReturnStatus(CommandHelper.buildStatus(TStatusCode.SRM_NOT_SUPPORTED, + "Anonymous user can not perform" + SRM_COMMAND)); + outputData.setArrayOfFileStatuses(null); + printRequestOutcome(outputData.getReturnStatus(), (ExtendFileLifeTimeInputData) data); + return outputData; + } + + TReturnStatus globalStatus = null; + + ExtendFileLifeTimeCommand.log.debug(funcName + "Started."); + + /****************************** Check for malformed request ******************************/ + if (inputData.getArrayOfSURLs() == null) { + globalStatus = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, + "Missing mandatory parameter 'arrayOfSURLs'"); + } else if (inputData.getArrayOfSURLs().size() < 1) { + globalStatus = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, + "Parameter 'arrayOfSURLs': invalid size"); + } else if (!(inputData.getNewPinLifetime().isEmpty()) + && !(inputData.getNewFileLifetime().isEmpty()) && (inputData.getRequestToken() != null)) { + globalStatus = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, + "Cannot update both FileLifetime and PinLifetime"); + } else if (inputData.getNewPinLifetime().isEmpty() + && !(inputData.getNewFileLifetime().isEmpty()) && (inputData.getRequestToken() != null)) { + globalStatus = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, + "Do not specify the request token to update the FileLifetime"); + } else if (!(inputData.getNewPinLifetime().isEmpty()) + && !(inputData.getNewFileLifetime().isEmpty()) && (inputData.getRequestToken() == null)) { + globalStatus = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, + "Attempt to extend PinLifetime without request token"); + } else if (!(inputData.getNewPinLifetime().isEmpty()) + && inputData.getNewFileLifetime().isEmpty() && (inputData.getRequestToken() == null)) { + globalStatus = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, + "Attempt to extend PinLifetime without request token"); + } + + if (globalStatus != null) { + ExtendFileLifeTimeCommand.log.debug(funcName + globalStatus.getExplanation()); + outputData.setReturnStatus(globalStatus); + outputData.setArrayOfFileStatuses(null); + printRequestOutcome(outputData.getReturnStatus(), inputData); + return outputData; + } + + /********************** + * Check user authentication and authorization + ******************************/ + GridUserInterface user = inputData.getUser(); + if (user == null) { + ExtendFileLifeTimeCommand.log.debug(funcName + "The user field is NULL"); + outputData.setReturnStatus(CommandHelper.buildStatus(TStatusCode.SRM_AUTHENTICATION_FAILURE, + "Unable to get user credential!")); + printRequestOutcome(outputData.getReturnStatus(), inputData); + outputData.setArrayOfFileStatuses(null); + return outputData; + } + + /********************************** + * Start to manage the request + ***********************************/ + ArrayOfTSURLLifetimeReturnStatus arrayOfFileStatus = new ArrayOfTSURLLifetimeReturnStatus(); + + if ((inputData.getRequestToken() == null) && (inputData.getNewPinLifetime().isEmpty())) { + log.debug(funcName + "Extending SURL lifetime..."); + globalStatus = manageExtendSURLLifetime(inputData.getNewFileLifetime(), + inputData.getArrayOfSURLs(), user, arrayOfFileStatus, inputData.getRequestToken()); + } else { + log.debug(funcName + "Extending PIN lifetime..."); + try { + globalStatus = manageExtendPinLifetime(inputData.getRequestToken(), + inputData.getNewPinLifetime(), inputData.getArrayOfSURLs(), user, arrayOfFileStatus); + } catch (IllegalArgumentException e) { + log.error(funcName + "Unexpected IllegalArgumentException: " + e.getMessage()); + globalStatus = + CommandHelper.buildStatus(TStatusCode.SRM_INTERNAL_ERROR, "Request Failed, retry."); + outputData.setReturnStatus(globalStatus); + outputData.setArrayOfFileStatuses(null); + printRequestOutcome(outputData.getReturnStatus(), inputData); + return outputData; + } + } + + outputData.setReturnStatus(globalStatus); + outputData.setArrayOfFileStatuses(arrayOfFileStatus); + printRequestOutcome(outputData.getReturnStatus(), inputData); + log.debug(funcName + "Finished."); + + return outputData; + } + + /** + * Extend the lifetime of a SURL. The parameter details is filled by this method and contains file + * level information on the execution of the request. + * + * @param newLifetime TLifeTimeInSeconds. + * @param arrayOfSURLS ArrayOfSURLs. + * @param guser VomsGridUser. + * @param arrayOfFileLifetimeStatus . ArrayOfTSURLLifetimeReturnStatus The returned file level + * information. + * @return TReturnStatus. The request status. + */ + private TReturnStatus manageExtendSURLLifetime(TLifeTimeInSeconds newLifetime, + ArrayOfSURLs arrayOfSURLS, GridUserInterface guser, ArrayOfTSURLLifetimeReturnStatus details, + TRequestToken requestToken) { + + if (details == null) { + ExtendFileLifeTimeCommand.log.debug( + "Function manageExtendSURLLifetime, class ExtendFileLifeTimeExecutor: parameter details is NULL"); + } + Namespace namespace = Namespace.getInstance(); + VolatileAndJiTCatalog catalog = VolatileAndJiTCatalog.getInstance(); + boolean requestSuccess = true; + boolean requestFailure = true; + + // For each requested SURL, try to extend its lifetime. + for (int i = 0; i < arrayOfSURLS.size(); i++) { + TSURL surl = arrayOfSURLS.getTSURL(i); + StoRI stori = null; + TStatusCode fileStatusCode; + String fileStatusExplanation; + try { + try { + stori = namespace.resolveStoRIbySURL(surl, guser); + } catch (IllegalArgumentException e) { + ExtendFileLifeTimeCommand.log.error("Unable to build StoRI by SURL and user", e); + fileStatusCode = TStatusCode.SRM_INTERNAL_ERROR; + fileStatusExplanation = "Unable to build StoRI by SURL and user"; + } catch (UnapprochableSurlException e) { + log.info("Unable to build a stori for surl " + surl + " for user " + guser + + " UnapprochableSurlException: " + e.getMessage()); + fileStatusCode = TStatusCode.SRM_AUTHORIZATION_FAILURE; + fileStatusExplanation = e.getMessage(); + } catch (NamespaceException e) { + log.info("Unable to build a stori for surl " + surl + " for user " + guser + + " NamespaceException: " + e.getMessage()); + fileStatusCode = TStatusCode.SRM_INTERNAL_ERROR; + fileStatusExplanation = e.getMessage(); + } catch (InvalidSURLException e) { + log.info("Unable to build a stori for surl " + surl + " for user " + guser + + " InvalidSURLException: " + e.getMessage()); + fileStatusCode = TStatusCode.SRM_INVALID_PATH; + fileStatusExplanation = e.getMessage(); + } + if (stori != null) { + LocalFile localFile = stori.getLocalFile(); + if (localFile.exists()) { + ExtendFileLifeTimeCommand.log.debug(stori.getPFN().toString()); + List volatileInfo = catalog.volatileInfoOn(stori.getPFN()); + if (volatileInfo.isEmpty()) { + fileStatusCode = TStatusCode.SRM_SUCCESS; + fileStatusExplanation = "Nothing to do, SURL is permanent"; + newLifetime = TLifeTimeInSeconds.makeInfinite(); + requestFailure = false; + } else if (volatileInfo.size() > 2) { + fileStatusCode = TStatusCode.SRM_INTERNAL_ERROR; + fileStatusExplanation = "Found more than one entry.... that's a BUG."; + // For lifetimes infinite means also unknown + newLifetime = TLifeTimeInSeconds.makeInfinite(); + requestSuccess = false; + } else if (isStoRISURLBusy(stori)) { + fileStatusCode = TStatusCode.SRM_FILE_BUSY; + fileStatusExplanation = + "File status is SRM_SPACE_AVAILABLE. SURL lifetime cannot be extend (try with PIN lifetime)"; + // For lifetimes infinite means also unknown + newLifetime = TLifeTimeInSeconds.makeInfinite(); + requestSuccess = false; + } else { // Ok, extend the lifetime of the SURL + // Update the DB with the new lifetime + catalog.trackVolatile(stori.getPFN(), (Calendar) volatileInfo.get(0), newLifetime); + // TODO: return the correct lifetime, i.e. the one which is + // written to the DB. + // TLifeTimeInSeconds writtenLifetime = (TLifeTimeInSeconds) + // volatileInfo.get(1); + + fileStatusCode = TStatusCode.SRM_SUCCESS; + fileStatusExplanation = "Lifetime extended"; + requestFailure = false; + } + } else { // Requested SURL does not exists in the filesystem + fileStatusCode = TStatusCode.SRM_INVALID_PATH; + fileStatusExplanation = "File does not exist"; + requestSuccess = false; + } + + // Set the file level information to be returned. + TReturnStatus fileStatus = new TReturnStatus(fileStatusCode, fileStatusExplanation); + if (fileStatus.getStatusCode().equals(TStatusCode.SRM_SUCCESS)) { + ExtendFileLifeTimeCommand.log + .info("srmExtendFileLifeTime: <" + guser + "> Request for [token:" + requestToken + + "] for [SURL:" + surl + "] with [lifetime:" + newLifetime + + " ] successfully done with: [status:" + fileStatus + "]"); + } else { + ExtendFileLifeTimeCommand.log.error("srmExtendFileLifeTime: <" + guser + + "> Request for [token:" + requestToken + "] for [SURL:" + surl + + "] with [lifetime:" + newLifetime + "] failed with: [status:" + fileStatus + "]"); + } + TSURLLifetimeReturnStatus lifetimeReturnStatus = + new TSURLLifetimeReturnStatus(surl, fileStatus, newLifetime, null); + details.addTSurlReturnStatus(lifetimeReturnStatus); + } + } catch (InvalidTSURLLifetimeReturnStatusAttributeException e3) { + ExtendFileLifeTimeCommand.log + .debug("Thrown InvalidTSURLLifetimeReturnStatusAttributeException"); + } + } + TReturnStatus globalStatus = null; + // Set global status + if (requestFailure) { + globalStatus = new TReturnStatus(TStatusCode.SRM_FAILURE, "All file requests are failed"); + } else if (requestSuccess) { + globalStatus = new TReturnStatus(TStatusCode.SRM_SUCCESS, + "All file requests are successfully completed"); + } else { + globalStatus = + new TReturnStatus(TStatusCode.SRM_PARTIAL_SUCCESS, "Details are on the file statuses"); + } + return globalStatus; + } + + /** + * Returns true if the status of the SURL of the received StoRI is SRM_SPACE_AVAILABLE, false + * otherwise. This method queries the DB, therefore pay attention to possible performance issues. + * + * @return boolean + */ + private boolean isStoRISURLBusy(StoRI element) { + + SURLStatusManager checker = SURLStatusManagerFactory.newSURLStatusManager(); + + return checker.isSURLBusy(element.getSURL()); + } + + /** + * Extend the PIN lifetime of a SURL. The parameter details is filled by this method and contains + * file level information on the execution of the request. + * + * @param requestToken TRequestToken. + * @param newPINLifetime TLifeTimeInSeconds. + * @param arrayOfSURLS ArrayOfSURLs. + * @param guser VomsGridUser. + * @param details ArrayOfTSURLLifetimeReturnStatus. + * @return TReturnStatus. The request status. + * @throws UnknownTokenException + * @throws IllegalArgumentException + */ + private TReturnStatus manageExtendPinLifetime(TRequestToken requestToken, + TLifeTimeInSeconds newPINLifetime, ArrayOfSURLs arrayOfSURLS, GridUserInterface guser, + ArrayOfTSURLLifetimeReturnStatus details) throws IllegalArgumentException { + + if (details == null) { + ExtendFileLifeTimeCommand.log.debug( + "Function manageExtendSURLLifetime, class ExtendFileLifeTimeExecutor: parameter details is NULL"); + } + TReturnStatus globalStatus = null; + List requestSURLsList; + try { + requestSURLsList = getListOfSURLsInTheRequest(guser, requestToken); + } catch (UnknownTokenException e4) { + return CommandHelper.buildStatus(TStatusCode.SRM_INVALID_REQUEST, "Invalid request token"); + } catch (ExpiredTokenException e) { + return CommandHelper.buildStatus(TStatusCode.SRM_REQUEST_TIMED_OUT, "Request expired"); + } catch (AuthzException e) { + return CommandHelper.buildStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage()); + } + if (requestSURLsList.isEmpty()) { + return CommandHelper.buildStatus(TStatusCode.SRM_INVALID_REQUEST, "Invalid request token"); + } + // Once we have the list of SURLs belonging to the request, we must check + // that the SURLs given by the user are consistent, that the resulting + // lifetime could be lower than the one requested (and for this we must read + // the Volatile table of the DB), that the SURLs are not released, aborted, + // expired or suspended and so on... therefore the purpose of all that stuff + // is to return the right information. I mean, no PIN lifetime is + // effectively extend, in StoRM the TURL corresponds to the SURL. + boolean requestSuccess = true; + boolean requestFailure = true; + TLifeTimeInSeconds PINLifetime; + TLifeTimeInSeconds dbLifetime = null; + for (int i = 0; i < arrayOfSURLS.size(); i++) { + TSURL surl = arrayOfSURLS.getTSURL(i); + TStatusCode statusOfTheSURL = null; + TStatusCode fileStatusCode; + String fileStatusExplanation; + boolean surlFound = false; + // Check if the current SURL belongs to the request token + for (int j = 0; j < requestSURLsList.size(); j++) { + SURLData surlData = (SURLData) requestSURLsList.get(j); + if (surl.equals(surlData.surl)) { + statusOfTheSURL = surlData.statusCode; + requestSURLsList.remove(j); + surlFound = true; + break; + } + } + try { + if (surlFound) { + ExtendFileLifeTimeCommand.log.debug("Found SURL: " + surl.getSURLString() + " (status: " + + statusOfTheSURL.toString() + ")"); + Namespace namespace = Namespace.getInstance(); + StoRI stori = null; + try { + stori = namespace.resolveStoRIbySURL(surl, guser); + } catch (IllegalArgumentException e) { + log.error("Unable to build StoRI by SURL and user", e); + } catch (Exception e) { + log.info(String.format("Unable to build a stori for surl %s for user %s, %s: %s", surl, + guser, e.getClass().getCanonicalName(), e.getMessage())); + } + if (stori != null) { + LocalFile localFile = stori.getLocalFile(); + if (localFile.exists()) { + VolatileAndJiTCatalog catalog = VolatileAndJiTCatalog.getInstance(); + List volatileInfo = catalog.volatileInfoOn(stori.getPFN()); + + if ((statusOfTheSURL != TStatusCode.SRM_FILE_PINNED) + && (statusOfTheSURL != TStatusCode.SRM_SPACE_AVAILABLE) + && (statusOfTheSURL != TStatusCode.SRM_SUCCESS)) { + fileStatusCode = TStatusCode.SRM_INVALID_REQUEST; + fileStatusExplanation = "No TURL available"; + PINLifetime = null; + requestSuccess = false; + } else if (volatileInfo.size() > 2) { + fileStatusCode = TStatusCode.SRM_INTERNAL_ERROR; + fileStatusExplanation = "Found more than one entry.... that's a BUG."; + // For lifetimes infinite means also unknown + PINLifetime = TLifeTimeInSeconds.makeInfinite(); + requestSuccess = false; + } else { // OK, extend the PIN lifetime. + // If the status is success the extension will not take place, + // only in case of empty parameter the current value are + // returned, otherwaise the request must + // fail! + + if ((statusOfTheSURL == TStatusCode.SRM_SUCCESS) && (!newPINLifetime.isEmpty())) { + + fileStatusCode = TStatusCode.SRM_INVALID_REQUEST; + fileStatusExplanation = "No TURL available"; + PINLifetime = null; + requestSuccess = false; + + } else { + + fileStatusCode = TStatusCode.SRM_SUCCESS; + + if (volatileInfo.isEmpty()) { // SURL is permanent + dbLifetime = TLifeTimeInSeconds.makeInfinite(); + } else { + dbLifetime = (TLifeTimeInSeconds) volatileInfo.get(1); + } + if ((!dbLifetime.isInfinite()) && (newPINLifetime.value() > dbLifetime.value())) { + PINLifetime = dbLifetime; + fileStatusExplanation = + "The requested PIN lifetime is greater than the lifetime of the SURL." + + " PIN lifetime is now equal to the lifetime of the SURL."; + } else { + PINLifetime = newPINLifetime; + fileStatusExplanation = "Lifetime extended"; + } + ExtendFileLifeTimeCommand.log.debug("New PIN lifetime is: " + PINLifetime.value() + + "(SURL: " + surl.getSURLString() + ")"); + // TODO: update the RequestSummaryCatalog with the new + // pinLifetime + // it is better to do it only once after the for loop + requestFailure = false; + } + } + } else { // file does not exist in the file system + fileStatusCode = TStatusCode.SRM_INVALID_PATH; + fileStatusExplanation = "Invalid path"; + PINLifetime = null; + requestSuccess = false; + + } + } else { + log.error("Unable to build StoRI by SURL and user"); + fileStatusCode = TStatusCode.SRM_INTERNAL_ERROR; + fileStatusExplanation = "Unable to build StoRI by SURL and user"; + // For lifetimes infinite means also unknown + PINLifetime = null; + requestSuccess = false; + } + } else { // SURL not found in the DB + ExtendFileLifeTimeCommand.log.debug("SURL: " + surl.getSURLString() + " NOT FOUND!"); + fileStatusCode = TStatusCode.SRM_INVALID_PATH; + fileStatusExplanation = "SURL not found in the request"; + PINLifetime = null; + requestSuccess = false; + } + // Set the file level information to be returned. + TReturnStatus fileStatus = new TReturnStatus(fileStatusCode, fileStatusExplanation); + if (fileStatus.getStatusCode().equals(TStatusCode.SRM_SUCCESS)) { + ExtendFileLifeTimeCommand.log + .info("srmExtendFileLifeTime: <" + guser + "> Request for [token:" + requestToken + + "] for [SURL:" + surl + "] with [pinlifetime: " + newPINLifetime + + "] successfully done with: [status:" + fileStatus.toString() + "]"); + } else { + ExtendFileLifeTimeCommand.log + .error("srmExtendFileLifeTime: <" + guser + "> Request for [token:" + requestToken + + "] for [SURL:" + surl + "] with [pinlifetime: " + newPINLifetime + + "] failed with: [status:" + fileStatus.toString() + "]"); + } + + TSURLLifetimeReturnStatus lifetimeReturnStatus = + new TSURLLifetimeReturnStatus(surl, fileStatus, dbLifetime, PINLifetime); + details.addTSurlReturnStatus(lifetimeReturnStatus); + } catch (InvalidTSURLLifetimeReturnStatusAttributeException e3) { + ExtendFileLifeTimeCommand.log + .debug("Thrown InvalidTSURLLifetimeReturnStatusAttributeException"); + } + } + + // Set global status + if (requestFailure) { + globalStatus = new TReturnStatus(TStatusCode.SRM_FAILURE, "All file requests are failed"); + } else if (requestSuccess) { + globalStatus = new TReturnStatus(TStatusCode.SRM_SUCCESS, + "All file requests are successfully completed"); + } else { + globalStatus = + new TReturnStatus(TStatusCode.SRM_PARTIAL_SUCCESS, "Details are on the file statuses"); + } + return globalStatus; + } + + /** + * Returns the list of SURLs and statuses (a List of SURLData) belonging to the request identified + * by the requestToken. + * + * @param requestToken TRequestToken + * @return List + * @throws UnknownTokenException + * @throws IllegalArgumentException + * @throws ExpiredTokenException + */ + private List getListOfSURLsInTheRequest(GridUserInterface user, + TRequestToken requestToken) + throws IllegalArgumentException, UnknownTokenException, ExpiredTokenException { + + List listOfSURLsInfo = new LinkedList(); + + SURLStatusManager checker = SURLStatusManagerFactory.newSURLStatusManager(); + + Map surlStatusMap = checker.getSURLStatuses(user, requestToken); + + if (!(surlStatusMap == null || surlStatusMap.isEmpty())) { + for (Entry surlStatus : surlStatusMap.entrySet()) { + listOfSURLsInfo + .add(new SURLData(surlStatus.getKey(), surlStatus.getValue().getStatusCode())); + } + } + return listOfSURLsInfo; + } + + private void printRequestOutcome(TReturnStatus status, ExtendFileLifeTimeInputData inputData) { + + if (inputData != null) { + if (inputData.getArrayOfSURLs() != null) { + if (inputData.getRequestToken() != null) { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData, + inputData.getRequestToken(), inputData.getArrayOfSURLs().asStringList()); + } else { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData, + inputData.getArrayOfSURLs().asStringList()); + } + + } else { + if (inputData.getRequestToken() != null) { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData, + inputData.getRequestToken()); + } else { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData); + } + } + + } else { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status); + } + } + + private class SURLData { + + public TSURL surl; + public TStatusCode statusCode; + + public SURLData(TSURL surl, TStatusCode statusCode) { + + this.surl = surl; + this.statusCode = statusCode; + } + } } diff --git a/src/main/java/it/grid/storm/synchcall/command/datatransfer/FileTransferRequestStatusCommand.java b/src/main/java/it/grid/storm/synchcall/command/datatransfer/FileTransferRequestStatusCommand.java index d51efd3c3..98ab5c891 100644 --- a/src/main/java/it/grid/storm/synchcall/command/datatransfer/FileTransferRequestStatusCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/datatransfer/FileTransferRequestStatusCommand.java @@ -1,11 +1,18 @@ package it.grid.storm.synchcall.command.datatransfer; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.authz.AuthzException; import it.grid.storm.catalogs.RequestSummaryCatalog; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.catalogs.surl.SURLStatusManager; import it.grid.storm.catalogs.surl.SURLStatusManagerFactory; -import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.model.RequestSummaryData; import it.grid.storm.srm.types.ArrayOfTSURLReturnStatus; import it.grid.storm.srm.types.TRequestToken; import it.grid.storm.srm.types.TRequestType; @@ -21,14 +28,6 @@ import it.grid.storm.synchcall.data.datatransfer.ManageFileTransferRequestFilesInputData; import it.grid.storm.synchcall.data.datatransfer.ManageFileTransferRequestInputData; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - public abstract class FileTransferRequestStatusCommand extends DataTransferCommand implements Command { diff --git a/src/main/java/it/grid/storm/synchcall/command/datatransfer/PtPAbortExecutor.java b/src/main/java/it/grid/storm/synchcall/command/datatransfer/PtPAbortExecutor.java index c3710558a..2f77af536 100644 --- a/src/main/java/it/grid/storm/synchcall/command/datatransfer/PtPAbortExecutor.java +++ b/src/main/java/it/grid/storm/synchcall/command/datatransfer/PtPAbortExecutor.java @@ -25,9 +25,31 @@ */ package it.grid.storm.synchcall.command.datatransfer; +import static it.grid.storm.srm.types.TStatusCode.SRM_ABORTED; +import static it.grid.storm.srm.types.TStatusCode.SRM_AUTHORIZATION_FAILURE; +import static it.grid.storm.srm.types.TStatusCode.SRM_FAILURE; +import static it.grid.storm.srm.types.TStatusCode.SRM_INTERNAL_ERROR; +import static it.grid.storm.srm.types.TStatusCode.SRM_INVALID_PATH; +import static it.grid.storm.srm.types.TStatusCode.SRM_INVALID_REQUEST; +import static it.grid.storm.srm.types.TStatusCode.SRM_PARTIAL_SUCCESS; +import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_INPROGRESS; +import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_QUEUED; +import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_TIMED_OUT; +import static it.grid.storm.srm.types.TStatusCode.SRM_SPACE_AVAILABLE; +import static it.grid.storm.srm.types.TStatusCode.SRM_SUCCESS; + +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Lists; + import it.grid.storm.authz.AuthzException; import it.grid.storm.catalogs.PtPChunkCatalog; -import it.grid.storm.catalogs.PtPPersistentChunkData; import it.grid.storm.catalogs.RequestSummaryCatalog; import it.grid.storm.catalogs.surl.SURLStatusManager; import it.grid.storm.catalogs.surl.SURLStatusManagerFactory; @@ -35,11 +57,11 @@ import it.grid.storm.filesystem.LocalFile; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.namespace.InvalidSURLException; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.NamespaceInterface; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.UnapprochableSurlException; +import it.grid.storm.persistence.model.PtPPersistentChunkData; import it.grid.storm.srm.types.ArrayOfSURLs; import it.grid.storm.srm.types.ArrayOfTSURLReturnStatus; import it.grid.storm.srm.types.TRequestToken; @@ -59,29 +81,6 @@ import it.grid.storm.synchcall.surl.ExpiredTokenException; import it.grid.storm.synchcall.surl.UnknownTokenException; -import static it.grid.storm.srm.types.TStatusCode.SRM_ABORTED; -import static it.grid.storm.srm.types.TStatusCode.SRM_AUTHORIZATION_FAILURE; -import static it.grid.storm.srm.types.TStatusCode.SRM_FAILURE; -import static it.grid.storm.srm.types.TStatusCode.SRM_INTERNAL_ERROR; -import static it.grid.storm.srm.types.TStatusCode.SRM_INVALID_PATH; -import static it.grid.storm.srm.types.TStatusCode.SRM_INVALID_REQUEST; -import static it.grid.storm.srm.types.TStatusCode.SRM_PARTIAL_SUCCESS; -import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_INPROGRESS; -import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_QUEUED; -import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_TIMED_OUT; -import static it.grid.storm.srm.types.TStatusCode.SRM_SPACE_AVAILABLE; -import static it.grid.storm.srm.types.TStatusCode.SRM_SUCCESS; - -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.Map; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.collect.Lists; - public class PtPAbortExecutor implements AbortExecutorInterface { private static final Logger log = LoggerFactory.getLogger(PtPAbortExecutor.class); @@ -89,7 +88,7 @@ public class PtPAbortExecutor implements AbortExecutorInterface { static Configuration config = Configuration.getInstance(); private static int maxLoopTimes = PtPAbortExecutor.config.getMaxLoop(); - private NamespaceInterface namespace; + private Namespace namespace; private final List acceptedStatuses = Lists.newArrayList(SRM_SPACE_AVAILABLE, SRM_REQUEST_QUEUED); @@ -97,7 +96,7 @@ public class PtPAbortExecutor implements AbortExecutorInterface { public AbortGeneralOutputData doIt(AbortInputData inputData) { // Used to delete the physical file - namespace = NamespaceDirector.getNamespace(); + namespace = Namespace.getInstance(); AbortGeneralOutputData outputData = new AbortGeneralOutputData(); ArrayOfTSURLReturnStatus arrayOfTSurlRetStatus = new ArrayOfTSURLReturnStatus(); @@ -418,7 +417,7 @@ private TSURLReturnStatus manageAuthorizedAbort(GridUserInterface user, TRequest TSURL surl, TReturnStatus status, AbortInputData inputData) { boolean failure = false; - namespace = NamespaceDirector.getNamespace(); + namespace = Namespace.getInstance(); TSURLReturnStatus surlReturnStatus = new TSURLReturnStatus(); surlReturnStatus.setSurl(surl); @@ -587,8 +586,9 @@ private TSURLReturnStatus manageAuthorizedAbort(GridUserInterface user, TRequest surlReturnStatus .setStatus(new TReturnStatus(SRM_SUCCESS, "File request successfully aborted.")); try { - NamespaceDirector.getNamespace() + Namespace.getInstance() .resolveVFSbyLocalFile(fileToRemove) + .getSpaceUpdater() .decreaseUsedSpace(sizeToRemove); } catch (NamespaceException e) { log.error(e.getMessage()); diff --git a/src/main/java/it/grid/storm/synchcall/command/datatransfer/PutDoneCommand.java b/src/main/java/it/grid/storm/synchcall/command/datatransfer/PutDoneCommand.java index b79d2610d..d83380e4d 100644 --- a/src/main/java/it/grid/storm/synchcall/command/datatransfer/PutDoneCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/datatransfer/PutDoneCommand.java @@ -15,6 +15,30 @@ package it.grid.storm.synchcall.command.datatransfer; +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; +import static it.grid.storm.srm.types.TStatusCode.SRM_ABORTED; +import static it.grid.storm.srm.types.TStatusCode.SRM_AUTHORIZATION_FAILURE; +import static it.grid.storm.srm.types.TStatusCode.SRM_DUPLICATION_ERROR; +import static it.grid.storm.srm.types.TStatusCode.SRM_FAILURE; +import static it.grid.storm.srm.types.TStatusCode.SRM_INTERNAL_ERROR; +import static it.grid.storm.srm.types.TStatusCode.SRM_INVALID_PATH; +import static it.grid.storm.srm.types.TStatusCode.SRM_INVALID_REQUEST; +import static it.grid.storm.srm.types.TStatusCode.SRM_PARTIAL_SUCCESS; +import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_TIMED_OUT; +import static it.grid.storm.srm.types.TStatusCode.SRM_SUCCESS; +import static it.grid.storm.synchcall.command.CommandHelper.buildStatus; + +import java.util.Calendar; +import java.util.List; +import java.util.Map; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; + import it.grid.storm.authz.AuthzException; import it.grid.storm.catalogs.VolatileAndJiTCatalog; import it.grid.storm.catalogs.surl.SURLStatusManager; @@ -25,10 +49,10 @@ import it.grid.storm.griduser.CannotMapUserException; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.LocalUser; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.StoRI; -import it.grid.storm.namespace.VirtualFSInterface; +import it.grid.storm.namespace.model.VirtualFS; import it.grid.storm.srm.types.ArrayOfTSURLReturnStatus; import it.grid.storm.srm.types.TRequestToken; import it.grid.storm.srm.types.TReturnStatus; @@ -45,30 +69,6 @@ import it.grid.storm.synchcall.surl.ExpiredTokenException; import it.grid.storm.synchcall.surl.UnknownTokenException; -import java.util.Calendar; -import java.util.List; -import java.util.Map; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Preconditions.checkNotNull; -import static it.grid.storm.srm.types.TStatusCode.SRM_ABORTED; -import static it.grid.storm.srm.types.TStatusCode.SRM_AUTHORIZATION_FAILURE; -import static it.grid.storm.srm.types.TStatusCode.SRM_DUPLICATION_ERROR; -import static it.grid.storm.srm.types.TStatusCode.SRM_FAILURE; -import static it.grid.storm.srm.types.TStatusCode.SRM_INTERNAL_ERROR; -import static it.grid.storm.srm.types.TStatusCode.SRM_INVALID_PATH; -import static it.grid.storm.srm.types.TStatusCode.SRM_INVALID_REQUEST; -import static it.grid.storm.srm.types.TStatusCode.SRM_PARTIAL_SUCCESS; -import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_TIMED_OUT; -import static it.grid.storm.srm.types.TStatusCode.SRM_SUCCESS; -import static it.grid.storm.synchcall.command.CommandHelper.buildStatus; - -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; - /** * This class is part of the StoRM project. Copyright (c) 2008 INFN-CNAF. */ @@ -102,9 +102,9 @@ private ManageFileTransferRequestFilesInputData inputDataSanityCheck(InputData i } return data; } - - private TReturnStatus buildGlobalStatus(boolean atLeastOneSuccess, - boolean atLeastOneFailure, boolean atLeastOneAborted) { + + private TReturnStatus buildGlobalStatus(boolean atLeastOneSuccess, boolean atLeastOneFailure, + boolean atLeastOneAborted) { if (atLeastOneSuccess) { if (!atLeastOneFailure && !atLeastOneAborted) { @@ -116,7 +116,7 @@ private TReturnStatus buildGlobalStatus(boolean atLeastOneSuccess, if (atLeastOneFailure) { if (!atLeastOneAborted) { return buildStatus(SRM_FAILURE, "All file requests are failed"); - } + } return buildStatus(SRM_FAILURE, "Some file requests are failed, the others are aborted"); } @@ -129,32 +129,33 @@ private TReturnStatus buildGlobalStatus(boolean atLeastOneSuccess, return buildStatus(SRM_INTERNAL_ERROR, "Request Failed, no surl status recognized, retry."); } - private void markSURLsReadyForRead(TRequestToken requestToken, List spaceAvailableSURLs) throws PutDoneCommandException { - + private void markSURLsReadyForRead(TRequestToken requestToken, List spaceAvailableSURLs) + throws PutDoneCommandException { + if (spaceAvailableSURLs.isEmpty()) { log.debug("markSURLsReadyForRead: empty spaceAvailableSURLs"); return; } - + SURLStatusManager checker = SURLStatusManagerFactory.newSURLStatusManager(); try { - + checker.markSURLsReadyForRead(requestToken, spaceAvailableSURLs); } catch (IllegalArgumentException e) { - + log.error("PutDone: Unexpected IllegalArgumentException '{}'", e.getMessage()); - throw new PutDoneCommandException(CommandHelper.buildStatus(SRM_INTERNAL_ERROR, "Request Failed, retry."), e); + throw new PutDoneCommandException( + CommandHelper.buildStatus(SRM_INTERNAL_ERROR, "Request Failed, retry."), e); } } - + private ArrayOfTSURLReturnStatus loadSURLsStatuses( - ManageFileTransferRequestFilesInputData inputData) - throws PutDoneCommandException { - + ManageFileTransferRequestFilesInputData inputData) throws PutDoneCommandException { + TRequestToken requestToken = inputData.getRequestToken(); List listOfSURLs = inputData.getArrayOfSURLs().getArrayList(); - + ArrayOfTSURLReturnStatus surlsStatuses = null; try { @@ -167,83 +168,78 @@ private ArrayOfTSURLReturnStatus loadSURLsStatuses( } catch (IllegalArgumentException e) { - log.error("PutDone: Unexpected IllegalArgumentException: {}", - e.getMessage(), e); + log.error("PutDone: Unexpected IllegalArgumentException: {}", e.getMessage(), e); throw new PutDoneCommandException(buildStatus(SRM_INTERNAL_ERROR, "Request Failed, retry.")); } catch (RequestUnknownException e) { - log.info( - "PutDone: Invalid request token and surl. RequestUnknownException: {}", - e.getMessage(), e); + log.info("PutDone: Invalid request token and surl. RequestUnknownException: {}", + e.getMessage(), e); throw new PutDoneCommandException( buildStatus(SRM_INVALID_REQUEST, "Invalid request token and surls")); } catch (UnknownTokenException e) { - log.info("PutDone: Invalid request token. UnknownTokenException: {}", - e.getMessage(), e); + log.info("PutDone: Invalid request token. UnknownTokenException: {}", e.getMessage(), e); throw new PutDoneCommandException(buildStatus(SRM_INVALID_REQUEST, "Invalid request token")); } catch (ExpiredTokenException e) { - log.info("PutDone: The request is expired: ExpiredTokenException: {}", - e.getMessage(), e); + log.info("PutDone: The request is expired: ExpiredTokenException: {}", e.getMessage(), e); throw new PutDoneCommandException(buildStatus(SRM_REQUEST_TIMED_OUT, "Request expired")); } return surlsStatuses; } - - + + /** - * Implements the srmPutDone. Used to notify the SRM that the client completed - * a file transfer to the TransferURL in the allocated space (by a - * PrepareToPut). + * Implements the srmPutDone. Used to notify the SRM that the client completed a file transfer to + * the TransferURL in the allocated space (by a PrepareToPut). */ public OutputData execute(InputData absData) { log.debug("PutDone: Started."); - + TReturnStatus globalStatus = null; ArrayOfTSURLReturnStatus surlsStatuses = null; - + boolean atLeastOneSuccess = false; boolean atLeastOneFailure = false; boolean atLeastOneAborted = false; ManageFileTransferRequestFilesInputData inputData = null; try { - + inputData = inputDataSanityCheck(absData); - + } catch (PutDoneCommandException e) { printRequestOutcome(e.getReturnStatus()); return new ManageFileTransferOutputData(e.getReturnStatus()); } - GridUserInterface user = inputData instanceof IdentityInputData - ? ((IdentityInputData) inputData).getUser() : null; + GridUserInterface user = + inputData instanceof IdentityInputData ? ((IdentityInputData) inputData).getUser() : null; TRequestToken requestToken = inputData.getRequestToken(); List spaceAvailableSURLs = Lists.newArrayList(); - + try { - + surlsStatuses = loadSURLsStatuses(inputData); - + } catch (PutDoneCommandException e) { - + printRequestOutcome(e.getReturnStatus(), inputData); - return new ManageFileTransferOutputData(e.getReturnStatus()); + return new ManageFileTransferOutputData(e.getReturnStatus()); } - - + + for (TSURLReturnStatus surlStatus : surlsStatuses.getArray()) { - + TReturnStatus newStatus; TReturnStatus currentStatus = surlStatus.getStatus(); - + switch (currentStatus.getStatusCode()) { case SRM_SPACE_AVAILABLE: @@ -283,24 +279,23 @@ public OutputData execute(InputData absData) { surlsStatuses.updateStatus(surlStatus, newStatus); } - + try { - + markSURLsReadyForRead(requestToken, spaceAvailableSURLs); } catch (PutDoneCommandException e) { - + printRequestOutcome(e.getReturnStatus(), inputData); - return new ManageFileTransferOutputData(e.getReturnStatus()); + return new ManageFileTransferOutputData(e.getReturnStatus()); } - + log.debug("PutDone: Computing final global status ..."); - globalStatus = buildGlobalStatus(atLeastOneSuccess, atLeastOneFailure, - atLeastOneAborted); - + globalStatus = buildGlobalStatus(atLeastOneSuccess, atLeastOneFailure, atLeastOneAborted); + log.debug("PutDone: Finished with status {}", globalStatus); printRequestOutcome(globalStatus, inputData); - + return new ManageFileTransferOutputData(globalStatus, surlsStatuses); } @@ -310,31 +305,30 @@ private static void printRequestOutcome(TReturnStatus status) { CommandHelper.printRequestOutcome(SRM_COMMAND, log, status); } - private static void printRequestOutcome(TReturnStatus status, ManageFileTransferRequestFilesInputData inputData) { + private static void printRequestOutcome(TReturnStatus status, + ManageFileTransferRequestFilesInputData inputData) { Preconditions.checkNotNull(inputData); Preconditions.checkNotNull(status); CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData, - inputData.getRequestToken(), inputData.getArrayOfSURLs().asStringList()); + inputData.getRequestToken(), inputData.getArrayOfSURLs().asStringList()); } private ArrayOfTSURLReturnStatus loadSURLsStatus(GridUserInterface user, - TRequestToken requestToken, List inputSURLs) - throws RequestUnknownException { + TRequestToken requestToken, List inputSURLs) throws RequestUnknownException { - ArrayOfTSURLReturnStatus returnStatuses = new ArrayOfTSURLReturnStatus( - inputSURLs.size()); + ArrayOfTSURLReturnStatus returnStatuses = new ArrayOfTSURLReturnStatus(inputSURLs.size()); SURLStatusManager checker = SURLStatusManagerFactory.newSURLStatusManager(); - Map surlsStatuses = checker.getSURLStatuses(user, - requestToken, inputSURLs); + Map surlsStatuses = + checker.getSURLStatuses(user, requestToken, inputSURLs); if (surlsStatuses.isEmpty()) { log.info("PutDone: No one of the requested surls found for the provided token"); throw new RequestUnknownException( - "No one of the requested surls found for the provided token"); + "No one of the requested surls found for the provided token"); } TReturnStatus status = null; @@ -348,7 +342,7 @@ private ArrayOfTSURLReturnStatus loadSURLsStatus(GridUserInterface user, } else { log.debug("PutDone: SURL '{}' NOT found in the DB!", surl); status = new TReturnStatus(SRM_INVALID_PATH, - "SURL does not refer to an existing file for the specified request token"); + "SURL does not refer to an existing file for the specified request token"); } TSURLReturnStatus surlRetStatus = new TSURLReturnStatus(surl, status); returnStatuses.addTSurlReturnStatus(surlRetStatus); @@ -356,100 +350,95 @@ private ArrayOfTSURLReturnStatus loadSURLsStatus(GridUserInterface user, return returnStatuses; } - public static boolean executePutDone(TSURL surl) throws PutDoneCommandException { - return executePutDone(surl, null); + public static boolean executePutDone(TSURL surl) throws PutDoneCommandException { + return executePutDone(surl, null); + } + + public static boolean executePutDone(TSURL surl, GridUserInterface user) + throws PutDoneCommandException { + + Preconditions.checkNotNull(surl, "Null SURL received"); + + log.debug("Executing PutDone for SURL: {}", surl.getSURLString()); + + String userStr = user == null ? "Anonymous" : user.toString(); + StoRI stori = null; + + try { + + stori = Namespace.getInstance().resolveStoRIbySURL(surl, user); + + } catch (IllegalArgumentException e) { + + log.error(String.format("User %s is unable to build a stori for surl %s, %s: %s", userStr, + surl, e.getClass().getName(), e.getMessage())); + throw new PutDoneCommandException(buildStatus(SRM_INTERNAL_ERROR, e.getMessage()), e); + + } catch (Exception e) { + + log.info(String.format("User %s is unable to build a stori for surl %s, %s: %s", userStr, + surl, e.getClass().getName(), e.getMessage()), e); + return false; + + } + + // 1- if the SURL is volatile update the entry in the Volatile table + if (VolatileAndJiTCatalog.getInstance().exists(stori.getPFN())) { + try { + VolatileAndJiTCatalog.getInstance().setStartTime(stori.getPFN(), Calendar.getInstance()); + } catch (Exception e) { + // impossible because of the "exists" check + } + } + + // 2- JiTs must me removed from the TURL + if (stori.hasJustInTimeACLs()) { + log.debug("PutDone: JiT case, removing ACEs on SURL: " + surl.toString()); + // Retrieve the PFN of the SURL parents + List storiParentsList = stori.getParents(); + List pfnParentsList = Lists.newArrayList(); + + for (StoRI parentStoRI : storiParentsList) { + pfnParentsList.add(parentStoRI.getPFN()); + } + LocalUser localUser = null; + try { + if (user != null) { + localUser = user.getLocalUser(); + } + } catch (CannotMapUserException e) { + log.warn("PutDone: Unable to get the local user for user {}. CannotMapUserException: {}", + user, e.getMessage(), e); + } + if (localUser != null) { + VolatileAndJiTCatalog.getInstance().expirePutJiTs(stori.getPFN(), localUser); + } else { + VolatileAndJiTCatalog.getInstance().removeAllJiTsOn(stori.getPFN()); + } + } + + // 3- compute the checksum and store it in an extended attribute + LocalFile localFile = stori.getLocalFile(); + + VirtualFS vfs = null; + try { + vfs = Namespace.getInstance().resolveVFSbyLocalFile(localFile); + } catch (NamespaceException e) { + log.error(e.getMessage(), e); + return false; } - public static boolean executePutDone(TSURL surl, GridUserInterface user) - throws PutDoneCommandException { - - Preconditions.checkNotNull(surl, "Null SURL received"); - - log.debug("Executing PutDone for SURL: {}", surl.getSURLString()); - - String userStr = user == null ? "Anonymous" : user.toString(); - StoRI stori = null; - - try { - - stori = NamespaceDirector.getNamespace().resolveStoRIbySURL(surl, user); - - } catch (IllegalArgumentException e) { - - log.error( - String.format("User %s is unable to build a stori for surl %s, %s: %s", - userStr, surl, e.getClass().getName(), e.getMessage())); - throw new PutDoneCommandException(buildStatus(SRM_INTERNAL_ERROR, e.getMessage()), e); - - } catch (Exception e) { - - log.info( - String.format("User %s is unable to build a stori for surl %s, %s: %s", - userStr, surl, e.getClass().getName(), e.getMessage()), e); - return false; - - } - - // 1- if the SURL is volatile update the entry in the Volatile table - if (VolatileAndJiTCatalog.getInstance().exists(stori.getPFN())) { - try { - VolatileAndJiTCatalog.getInstance().setStartTime(stori.getPFN(), - Calendar.getInstance()); - } catch (Exception e) { - // impossible because of the "exists" check - } - } - - // 2- JiTs must me removed from the TURL - if (stori.hasJustInTimeACLs()) { - log.debug("PutDone: JiT case, removing ACEs on SURL: " + surl.toString()); - // Retrieve the PFN of the SURL parents - List storiParentsList = stori.getParents(); - List pfnParentsList = Lists.newArrayList(); - - for (StoRI parentStoRI : storiParentsList) { - pfnParentsList.add(parentStoRI.getPFN()); - } - LocalUser localUser = null; - try { - if (user != null) { - localUser = user.getLocalUser(); - } - } catch (CannotMapUserException e) { - log.warn( - "PutDone: Unable to get the local user for user {}. CannotMapUserException: {}", - user, e.getMessage(), e); - } - if (localUser != null) { - VolatileAndJiTCatalog.getInstance().expirePutJiTs(stori.getPFN(), - localUser); - } else { - VolatileAndJiTCatalog.getInstance().removeAllJiTsOn(stori.getPFN()); - } - } - - // 3- compute the checksum and store it in an extended attribute - LocalFile localFile = stori.getLocalFile(); - - VirtualFSInterface vfs = null; - try { - vfs = NamespaceDirector.getNamespace().resolveVFSbyLocalFile(localFile); - } catch (NamespaceException e) { - log.error(e.getMessage(), e); - return false; - } - - // 4- Tape stuff management. - if (vfs.getStorageClassType().isTapeEnabled()) { - String fileAbosolutePath = localFile.getAbsolutePath(); - StormEA.removePinned(fileAbosolutePath); - StormEA.setPremigrate(fileAbosolutePath); - } - - // 5- Update UsedSpace into DB - vfs.increaseUsedSpace(localFile.getSize()); - - return true; - } + // 4- Tape stuff management. + if (vfs.getStorageClassType().isTapeEnabled()) { + String fileAbosolutePath = localFile.getAbsolutePath(); + StormEA.removePinned(fileAbosolutePath); + StormEA.setPremigrate(fileAbosolutePath); + } + + // 5- Update UsedSpace into DB + vfs.getSpaceUpdater().increaseUsedSpace(localFile.getSize()); + + return true; + } } diff --git a/src/main/java/it/grid/storm/synchcall/command/datatransfer/ReleaseFilesCommand.java b/src/main/java/it/grid/storm/synchcall/command/datatransfer/ReleaseFilesCommand.java index caf3184ab..738169333 100644 --- a/src/main/java/it/grid/storm/synchcall/command/datatransfer/ReleaseFilesCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/datatransfer/ReleaseFilesCommand.java @@ -17,12 +17,22 @@ package it.grid.storm.synchcall.command.datatransfer; +import java.util.ArrayList; +import java.util.Collection; +import java.util.EnumSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.authz.AuthzException; import it.grid.storm.catalogs.surl.SURLStatusManager; import it.grid.storm.catalogs.surl.SURLStatusManagerFactory; import it.grid.storm.ea.StormEA; import it.grid.storm.griduser.GridUserInterface; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.StoRI; import it.grid.storm.srm.types.ArrayOfTSURLReturnStatus; import it.grid.storm.srm.types.TRequestToken; @@ -41,16 +51,6 @@ import it.grid.storm.synchcall.data.datatransfer.ManageFileTransferRequestFilesInputData; import it.grid.storm.synchcall.data.datatransfer.ManageFileTransferRequestInputData; -import java.util.ArrayList; -import java.util.Collection; -import java.util.EnumSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** * * This class is part of the StoRM project. Copyright (c) 2008 INFN-CNAF. @@ -382,7 +382,7 @@ private void removePinneExtendedAttribute(List surlToRelease) { try { - stori = NamespaceDirector.getNamespace().resolveStoRIbySURL(surl); + stori = Namespace.getInstance().resolveStoRIbySURL(surl); } catch (Throwable e) { diff --git a/src/main/java/it/grid/storm/synchcall/command/datatransfer/RequestUnknownException.java b/src/main/java/it/grid/storm/synchcall/command/datatransfer/RequestUnknownException.java index 0850ecf93..5d3671972 100644 --- a/src/main/java/it/grid/storm/synchcall/command/datatransfer/RequestUnknownException.java +++ b/src/main/java/it/grid/storm/synchcall/command/datatransfer/RequestUnknownException.java @@ -15,15 +15,4 @@ public RequestUnknownException(String message) { super(message); } - - public RequestUnknownException(Throwable cause) { - - super(cause); - } - - public RequestUnknownException(String message, Throwable cause) { - - super(message, cause); - } - } diff --git a/src/main/java/it/grid/storm/synchcall/command/directory/LsCommand.java b/src/main/java/it/grid/storm/synchcall/command/directory/LsCommand.java index 8a56cd21c..c37599d7a 100644 --- a/src/main/java/it/grid/storm/synchcall/command/directory/LsCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/directory/LsCommand.java @@ -43,9 +43,8 @@ import it.grid.storm.namespace.InvalidDescendantsFileRequestException; import it.grid.storm.namespace.InvalidDescendantsPathRequestException; import it.grid.storm.namespace.InvalidSURLException; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.NamespaceInterface; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.UnapprochableSurlException; import it.grid.storm.srm.types.ArrayOfSURLs; @@ -82,28 +81,19 @@ import it.grid.storm.synchcall.data.directory.LSInputData; import it.grid.storm.synchcall.data.directory.LSOutputData; -/** - * This class is part of the StoRM project. Copyright (c) 2008 INFN-CNAF. - *

- * Authors: - * - * @author lucamag luca.magnoniATcnaf.infn.it - * @date = Dec 3, 2008 - */ - public class LsCommand extends DirectoryCommand implements Command { public static final Logger log = LoggerFactory.getLogger(LsCommand.class); private static final String SRM_COMMAND = "srmLs"; - private final NamespaceInterface namespace; + private final Namespace namespace; private boolean atLeastOneInputSURLIsDir; public LsCommand() { - namespace = NamespaceDirector.getNamespace(); + namespace = Namespace.getInstance(); } /** @@ -154,14 +144,14 @@ public OutputData execute(InputData data) { boolean allLevelRecursive; if (inputData.getAllLevelRecursive() == null) { - allLevelRecursive = DirectoryCommand.config.getLSallLevelRecursive(); + allLevelRecursive = DirectoryCommand.config.isLsDefaultAllLevelRecursive(); } else { allLevelRecursive = inputData.getAllLevelRecursive().booleanValue(); } int numOfLevels; if (inputData.getNumOfLevels() == null) { - numOfLevels = DirectoryCommand.config.getLSnumOfLevels(); + numOfLevels = DirectoryCommand.config.getLsDefaultNumOfLevels(); } else { numOfLevels = inputData.getNumOfLevels().intValue(); if (numOfLevels < 0) { @@ -178,7 +168,7 @@ public OutputData execute(InputData data) { if (inputData.getCount() == null) { // Set to max entries value. Plus one in order to be able to return // TOO_MANY_RESULTS. - count = DirectoryCommand.config.getLSMaxNumberOfEntry() + 1; + count = DirectoryCommand.config.getLsMaxNumberOfEntry() + 1; } else { count = inputData.getCount().intValue(); if (count < 0) { @@ -189,7 +179,7 @@ public OutputData execute(InputData data) { return outputData; } if (count == 0) { - count = DirectoryCommand.config.getLSMaxNumberOfEntry() + 1; + count = DirectoryCommand.config.getLsMaxNumberOfEntry() + 1; } coutOrOffsetAreSpecified = true; } @@ -197,7 +187,7 @@ public OutputData execute(InputData data) { int offset; if (inputData.getOffset() == null) { // Set to the default value. - offset = DirectoryCommand.config.getLSoffset(); + offset = DirectoryCommand.config.getLsDefaultOffset(); } else { offset = inputData.getOffset().intValue(); if (offset < 0) { @@ -215,7 +205,7 @@ public OutputData execute(InputData data) { String fileLevelExplanation = ""; int errorCount = 0; - int maxEntries = DirectoryCommand.config.getLSMaxNumberOfEntry(); + int maxEntries = DirectoryCommand.config.getLsMaxNumberOfEntry(); if (count < maxEntries) { maxEntries = count; @@ -383,7 +373,7 @@ public OutputData execute(InputData data) { if (numberOfReturnedEntries.intValue() >= maxEntries) { if (maxEntries < count) { globalStatus = CommandHelper.buildStatus(TStatusCode.SRM_TOO_MANY_RESULTS, - "Max returned entries is: " + DirectoryCommand.config.getLSMaxNumberOfEntry()); + "Max returned entries is: " + DirectoryCommand.config.getLsMaxNumberOfEntry()); printRequestOutcome(globalStatus, inputData); outputData.setStatus(globalStatus); return outputData; diff --git a/src/main/java/it/grid/storm/synchcall/command/directory/MkdirCommand.java b/src/main/java/it/grid/storm/synchcall/command/directory/MkdirCommand.java index c45168944..3c55ce075 100644 --- a/src/main/java/it/grid/storm/synchcall/command/directory/MkdirCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/directory/MkdirCommand.java @@ -17,13 +17,29 @@ package it.grid.storm.synchcall.command.directory; +import static it.grid.storm.filesystem.FilesystemPermission.ListTraverse; +import static it.grid.storm.filesystem.FilesystemPermission.ListTraverseWrite; +import static it.grid.storm.srm.types.TStatusCode.SRM_AUTHORIZATION_FAILURE; +import static it.grid.storm.srm.types.TStatusCode.SRM_DUPLICATION_ERROR; +import static it.grid.storm.srm.types.TStatusCode.SRM_FAILURE; +import static it.grid.storm.srm.types.TStatusCode.SRM_INTERNAL_ERROR; +import static it.grid.storm.srm.types.TStatusCode.SRM_INVALID_PATH; +import static it.grid.storm.srm.types.TStatusCode.SRM_SUCCESS; +import static it.grid.storm.synchcall.command.directory.MkdirException.srmAuthorizationFailure; +import static it.grid.storm.synchcall.command.directory.MkdirException.srmFailure; +import static it.grid.storm.synchcall.command.directory.MkdirException.srmInternalError; +import static it.grid.storm.synchcall.command.directory.MkdirException.srmInvalidPath; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Lists; + import it.grid.storm.acl.AclManager; import it.grid.storm.acl.AclManagerFS; import it.grid.storm.authz.AuthzDecision; import it.grid.storm.authz.AuthzDirector; -import it.grid.storm.authz.SpaceAuthzInterface; import it.grid.storm.authz.path.model.SRMFileRequest; -import it.grid.storm.authz.sa.model.SRMSpaceRequest; import it.grid.storm.config.Configuration; import it.grid.storm.filesystem.FilesystemPermission; import it.grid.storm.filesystem.LocalFile; @@ -31,18 +47,16 @@ import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.LocalUser; import it.grid.storm.namespace.InvalidSURLException; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.NamespaceInterface; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.UnapprochableSurlException; -import it.grid.storm.namespace.VirtualFSInterface; import it.grid.storm.namespace.model.ACLEntry; import it.grid.storm.namespace.model.DefaultACL; +import it.grid.storm.namespace.model.VirtualFS; import it.grid.storm.srm.types.SRMCommandException; import it.grid.storm.srm.types.TReturnStatus; import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSpaceToken; import it.grid.storm.srm.types.TStatusCode; import it.grid.storm.synchcall.command.Command; import it.grid.storm.synchcall.command.CommandHelper; @@ -53,25 +67,6 @@ import it.grid.storm.synchcall.data.directory.MkdirInputData; import it.grid.storm.synchcall.data.directory.MkdirOutputData; -import static it.grid.storm.filesystem.FilesystemPermission.ListTraverse; -import static it.grid.storm.filesystem.FilesystemPermission.ListTraverseWrite; -import static it.grid.storm.srm.types.TStatusCode.SRM_AUTHORIZATION_FAILURE; -import static it.grid.storm.srm.types.TStatusCode.SRM_DUPLICATION_ERROR; -import static it.grid.storm.srm.types.TStatusCode.SRM_FAILURE; -import static it.grid.storm.srm.types.TStatusCode.SRM_INTERNAL_ERROR; -import static it.grid.storm.srm.types.TStatusCode.SRM_INVALID_PATH; -import static it.grid.storm.srm.types.TStatusCode.SRM_SUCCESS; -import static it.grid.storm.synchcall.command.directory.MkdirException.srmAuthorizationFailure; -import static it.grid.storm.synchcall.command.directory.MkdirException.srmFailure; -import static it.grid.storm.synchcall.command.directory.MkdirException.srmInternalError; -import static it.grid.storm.synchcall.command.directory.MkdirException.srmInvalidPath; -import static java.lang.String.format; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.collect.Lists; - class MkdirException extends SRMCommandException { private static final long serialVersionUID = 1L; @@ -85,10 +80,6 @@ public static MkdirException srmInvalidPath(String message) { return new MkdirException(SRM_INVALID_PATH, message); } - public static MkdirException srmDuplicationError(String message) { - return new MkdirException(SRM_DUPLICATION_ERROR, message); - } - public static MkdirException srmInternalError(String message) { return new MkdirException(SRM_INTERNAL_ERROR, message); } @@ -117,13 +108,13 @@ public class MkdirCommand extends DirectoryCommand implements Command { private static final String SRM_COMMAND = "SrmMkdir"; - private final NamespaceInterface namespace; + private final Namespace namespace; private final Configuration configuration; private final AclManager aclManager; public MkdirCommand() { - namespace = NamespaceDirector.getNamespace(); + namespace = Namespace.getInstance(); configuration = Configuration.getInstance(); aclManager = AclManagerFS.getInstance(); } @@ -237,22 +228,6 @@ private boolean isAnonymous(GridUserInterface user) { private void checkUserAuthorization(StoRI stori, GridUserInterface user) throws MkdirException { - TSpaceToken token = stori.getVirtualFileSystem().getSpaceToken(); - SpaceAuthzInterface spaceAuth = AuthzDirector.getSpaceAuthz(token); - - boolean isSpaceAuthorized; - if (isAnonymous(user)) { - isSpaceAuthorized = spaceAuth.authorizeAnonymous(SRMSpaceRequest.MD); - } else { - isSpaceAuthorized = spaceAuth.authorize(user, SRMSpaceRequest.MD); - } - if (!isSpaceAuthorized) { - String msg = - format("User not authorized to perform srmMkdir request on the storage area: %s", token); - log.debug("srmMkdir:{}", msg); - throw srmAuthorizationFailure(msg); - } - AuthzDecision decision; if (isAnonymous(user)) { decision = @@ -283,7 +258,7 @@ private GridUserInterface getUser(InputData data) { private boolean increaseUsedSpaceInfo(LocalFile dir) { try { - return namespace.resolveVFSbyLocalFile(dir).increaseUsedSpace(dir.getSize()); + return namespace.resolveVFSbyLocalFile(dir).getSpaceUpdater().increaseUsedSpace(dir.getSize()); } catch (NamespaceException e) { log.error("srmMkdir: Unable to increase used space info [{}]", e.getMessage()); return false; @@ -293,16 +268,13 @@ private boolean increaseUsedSpaceInfo(LocalFile dir) { private void manageAcl(StoRI stori, GridUserInterface user) { FilesystemPermission permission = - configuration.getEnableWritePermOnDirectory() ? ListTraverseWrite : ListTraverse; + configuration.isDirectoryWritePermOnCreationEnabled() ? ListTraverseWrite : ListTraverse; try { - if (isAnonymous(user)) { - manageDefaultACL(stori.getLocalFile(), permission); - setHttpsServiceAcl(stori.getLocalFile(), permission); - } else { + if (!isAnonymous(user)) { setAcl(user, stori.getLocalFile(), stori.hasJustInTimeACLs(), permission); - manageDefaultACL(stori.getLocalFile(), permission); } + manageDefaultACL(stori.getLocalFile(), permission); } catch (NamespaceException | CannotMapUserException e) { log.error("srmMkdir: Unable to set ACL [{}]", e.getMessage()); } @@ -335,7 +307,7 @@ private void setAcl(GridUserInterface user, LocalFile file, boolean hasJiTACL, private void manageDefaultACL(LocalFile dir, FilesystemPermission permission) throws NamespaceException { - VirtualFSInterface vfs = namespace.resolveVFSbyLocalFile(dir); + VirtualFS vfs = namespace.resolveVFSbyLocalFile(dir); DefaultACL dacl = vfs.getCapabilities().getDefaultACL(); if ((dacl == null) || (dacl.isEmpty())) { log.debug("srmMkdir: default acl NULL or empty"); @@ -350,12 +322,6 @@ private void manageDefaultACL(LocalFile dir, FilesystemPermission permission) } } - private void setHttpsServiceAcl(LocalFile file, FilesystemPermission permission) { - - log.debug("SrmMkdir: Adding default ACL for directory {}: {}", file, permission); - aclManager.grantHttpsServiceGroupPermission(file, permission); - } - private void printRequestOutcome(TReturnStatus status, MkdirInputData inputData) { if (inputData != null) { diff --git a/src/main/java/it/grid/storm/synchcall/command/directory/MvCommand.java b/src/main/java/it/grid/storm/synchcall/command/directory/MvCommand.java index 3197b2786..1623c4baa 100644 --- a/src/main/java/it/grid/storm/synchcall/command/directory/MvCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/directory/MvCommand.java @@ -17,29 +17,25 @@ package it.grid.storm.synchcall.command.directory; -import it.grid.storm.acl.AclManagerFS; +import java.util.Arrays; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.authz.AuthzDecision; import it.grid.storm.authz.AuthzDirector; -import it.grid.storm.authz.SpaceAuthzInterface; import it.grid.storm.authz.path.model.SRMFileRequest; -import it.grid.storm.authz.sa.model.SRMSpaceRequest; import it.grid.storm.catalogs.surl.SURLStatusManager; import it.grid.storm.catalogs.surl.SURLStatusManagerFactory; -import it.grid.storm.filesystem.FilesystemPermission; import it.grid.storm.filesystem.LocalFile; -import it.grid.storm.griduser.CannotMapUserException; -import it.grid.storm.griduser.LocalUser; import it.grid.storm.namespace.InvalidSURLException; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.NamespaceInterface; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.UnapprochableSurlException; -import it.grid.storm.space.SpaceHelper; import it.grid.storm.srm.types.InvalidTSURLAttributesException; import it.grid.storm.srm.types.TReturnStatus; import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSpaceToken; import it.grid.storm.srm.types.TStatusCode; import it.grid.storm.synchcall.command.Command; import it.grid.storm.synchcall.command.CommandHelper; @@ -51,15 +47,9 @@ import it.grid.storm.synchcall.data.directory.MvInputData; import it.grid.storm.synchcall.data.directory.MvOutputData; -import java.util.Arrays; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** - * This class is part of the StoRM project. Copyright: Copyright (c) 2008 - * Company: INFN-CNAF and ICTP/EGRID project This class implements the SrmMv - * Command. + * This class is part of the StoRM project. Copyright: Copyright (c) 2008 Company: INFN-CNAF and + * ICTP/EGRID project This class implements the SrmMv Command. * * @author lucamag * @date May 28, 2008 @@ -69,563 +59,422 @@ public class MvCommand extends DirectoryCommand implements Command { public static final Logger log = LoggerFactory.getLogger(MvCommand.class); - private static final String SRM_COMMAND = "SrmMv"; - private final NamespaceInterface namespace; - - public MvCommand() { - - namespace = NamespaceDirector.getNamespace(); - - } - - /** - * Method that provide SrmMv functionality. - * - * @param inputData - * Contains information about input data for Mv request. - * @return outputData Contains output data - */ - public OutputData execute(InputData data) { - - log.debug("srmMv: Start execution."); - MvOutputData outputData = new MvOutputData(); - MvInputData inputData = (MvInputData) data; - - /** - * Validate MvInputData. The check is done at this level to separate - * internal StoRM logic from xmlrpc specific operation. - */ - - if ((inputData == null) || (inputData.getFromSURL() == null) - || (inputData.getToSURL() == null)) { - outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_FAILURE, - "Invalid parameter specified.")); - log.warn("srmMv: Request failed with [status: {}]", - outputData.getStatus()); - - return outputData; - } - - TSURL fromSURL = inputData.getFromSURL(); - - if (fromSURL.isEmpty()) { - log.warn("srmMv: unable to perform the operation, empty fromSurl"); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INVALID_PATH, "Invalid fromSURL specified!")); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - - TSURL toSURL = inputData.getToSURL(); - - if (toSURL.isEmpty()) { - log.error("srmMv: unable to perform the operation, empty toSurl"); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INVALID_PATH, "Invalid toSURL specified!")); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - - StoRI fromStori = null; - try { - if (inputData instanceof IdentityInputData) { - try { - fromStori = namespace.resolveStoRIbySURL(fromSURL, - ((IdentityInputData) inputData).getUser()); - } catch (UnapprochableSurlException e) { - log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", - fromSURL, - DataHelper.getRequestor(inputData), - e.getMessage()); - - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (NamespaceException e) { - log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", - fromSURL, - DataHelper.getRequestor(inputData), - e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INTERNAL_ERROR, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (InvalidSURLException e) { - log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", - fromSURL, - DataHelper.getRequestor(inputData), - e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INVALID_PATH, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - } - } else { - try { - fromStori = namespace.resolveStoRIbySURL(fromSURL); - } catch (UnapprochableSurlException e) { - log.info("srmMv: Unable to build a stori for surl {}. {}",fromSURL - ,e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (NamespaceException e) { - log.info("srmMv: Unable to build a stori for surl {}. {}",fromSURL - ,e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INTERNAL_ERROR, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (InvalidSURLException e) { - log.info("srmMv: Unable to build a stori for surl {}. {}",fromSURL - ,e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INVALID_PATH, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - } - } catch (IllegalArgumentException e) { - log.warn("srmMv: Unable to build StoRI by SURL: {}. {}", fromSURL - ,e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INVALID_REQUEST, "Unable to build StoRI by SURL")); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - - StoRI toStori = null;; - try { - if (inputData instanceof IdentityInputData) { - try { - toStori = namespace.resolveStoRIbySURL(toSURL, - ((IdentityInputData) inputData).getUser()); - } catch (UnapprochableSurlException e) { - log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", - fromSURL, - DataHelper.getRequestor(inputData), - e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (NamespaceException e) { - log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", - fromSURL, - DataHelper.getRequestor(inputData), - e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INTERNAL_ERROR, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (InvalidSURLException e) { - log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", - fromSURL, - DataHelper.getRequestor(inputData), - e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INVALID_PATH, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - } else { - try { - toStori = namespace.resolveStoRIbySURL(toSURL); - } catch (UnapprochableSurlException e) { - log.info("srmMv: Unable to build a stori for surl {}. {}",fromSURL - ,e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (NamespaceException e) { - log.info("srmMv: Unable to build a stori for surl {}. {}",fromSURL - ,e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INTERNAL_ERROR, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (InvalidSURLException e) { - log.info("srmMv: Unable to build a stori for surl {}. {}",fromSURL - ,e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INVALID_PATH, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - } - } catch (IllegalArgumentException e) { - log.error("srmMv: Unable to build StoRI by SURL: {}. {}", fromSURL - ,e.getMessage(),e); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INTERNAL_ERROR, - "Unable to build StoRI by destination SURL")); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - - TSpaceToken token = new SpaceHelper().getTokenFromStoRI(log, fromStori); - SpaceAuthzInterface spaceAuth = AuthzDirector.getSpaceAuthz(token); - - boolean isSpaceAuthorized; - if (inputData instanceof IdentityInputData) { - isSpaceAuthorized = spaceAuth.authorize( - ((IdentityInputData) inputData).getUser(), SRMSpaceRequest.MV); - } else { - isSpaceAuthorized = spaceAuth.authorizeAnonymous(SRMSpaceRequest.MV); - } - if (!isSpaceAuthorized) { - log.debug("srmMv: User not authorized to perform srmMv on SA: {}", token); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_AUTHORIZATION_FAILURE, - ": User not authorized to perform srmMv on SA: " + token)); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - - if (fromStori.getLocalFile().getPath() - .compareTo(toStori.getLocalFile().getPath()) == 0) { - outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_SUCCESS, - "Source SURL and target SURL are the same file.")); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - - if (toStori.getLocalFile().exists()) { - if (toStori.getLocalFile().isDirectory()) { - try { - toStori = buildDestinationStoryForFolder(toSURL, fromStori, data); - } catch (IllegalArgumentException e) { - log.debug("srmMv : Unable to build StoRI for SURL {}. {}", - toSURL, e.getMessage()); - - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INTERNAL_ERROR, "Unable to build StoRI by SURL")); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (UnapprochableSurlException e) { - log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", - toSURL, DataHelper.getRequestor(inputData),e.getMessage()); - - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (InvalidTSURLAttributesException e) { - log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", - toSURL, DataHelper.getRequestor(inputData),e.getMessage()); - - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INVALID_PATH, "Invalid toSURL specified!")); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (NamespaceException e) { - log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", - toSURL, DataHelper.getRequestor(inputData),e.getMessage()); - - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INTERNAL_ERROR, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (InvalidSURLException e) { - log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", - toSURL, DataHelper.getRequestor(inputData),e.getMessage()); - - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INVALID_PATH, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - } else { - log.debug("srmMv : destination SURL {} already exists.", toSURL); - outputData.setStatus(CommandHelper - .buildStatus(TStatusCode.SRM_DUPLICATION_ERROR, - "destination SURL already exists!")); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - } - - AuthzDecision sourceDecision; - if (inputData instanceof IdentityInputData) { - sourceDecision = AuthzDirector.getPathAuthz().authorize( - ((IdentityInputData) inputData).getUser(), SRMFileRequest.MV_source, - fromStori, toStori); - } else { - sourceDecision = AuthzDirector.getPathAuthz().authorizeAnonymous( - SRMFileRequest.MV_source, fromStori, toStori); - } - AuthzDecision destinationDecision; - if (inputData instanceof IdentityInputData) { - destinationDecision = AuthzDirector.getPathAuthz().authorize( - ((IdentityInputData) inputData).getUser(), SRMFileRequest.MV_dest, - fromStori, toStori); - } else { - destinationDecision = AuthzDirector.getPathAuthz().authorizeAnonymous( - SRMFileRequest.MV_dest, fromStori, toStori); - } - TReturnStatus returnStatus; - if ((sourceDecision.equals(AuthzDecision.PERMIT)) - && (destinationDecision.equals(AuthzDecision.PERMIT))) { - - log.debug("SrmMv: Mv authorized for user {}. Source: {}. Target: {}", - DataHelper.getRequestor(inputData), - fromStori.getPFN(), - toStori.getPFN()); - - returnStatus = manageAuthorizedMV(fromStori, toStori.getLocalFile()); - if (returnStatus.isSRM_SUCCESS()) { - LocalUser user = null; - if (inputData instanceof IdentityInputData) { - try { - user = ((IdentityInputData) inputData).getUser().getLocalUser(); - } catch (CannotMapUserException e) { - log - .warn("srmMv: user mapping error {}", e.getMessage()); - - if (log.isDebugEnabled()){ - log.error(e.getMessage(),e); - } - - returnStatus - .extendExplaination("unable to set user acls on the destination file"); - } - } - if (user != null) { - setAcl(fromStori, toStori, user); - } else { - setAcl(fromStori, toStori); - } - } else { - log.warn("srmMv: <{}> Request for [fromSURL={}; toSURL={}] failed with [status: {}]", - DataHelper.getRequestor(inputData), - fromSURL, - toSURL, - returnStatus); - } - } else { - - String errorMsg = "Authorization error"; - - if (sourceDecision.equals(AuthzDecision.PERMIT)) { - errorMsg = - "User is not authorized to create and/or write the destination file"; - } else { - if (destinationDecision.equals(AuthzDecision.PERMIT)) { - errorMsg = - "User is not authorized to read and/or delete the source file"; - } else { - errorMsg = - "User is neither authorized to read and/or delete the source file " - + "nor to create and/or write the destination file"; - } - } - - returnStatus = - CommandHelper.buildStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, - errorMsg); - } - outputData.setStatus(returnStatus); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - - private StoRI buildDestinationStoryForFolder(TSURL toSURL, StoRI fromStori, - InputData inputData) throws IllegalArgumentException, - InvalidTSURLAttributesException, UnapprochableSurlException, - NamespaceException, InvalidSURLException { - - StoRI toStori; - String toSURLString = toSURL.getSURLString(); - if (!(toSURLString.endsWith("/"))) { - toSURLString += "/"; - } - toSURLString += fromStori.getFilename(); - log.debug("srmMv: New toSURL: {}", toSURLString); - if (inputData instanceof IdentityInputData) { - toStori = namespace.resolveStoRIbySURL( - TSURL.makeFromStringValidate(toSURLString), - ((IdentityInputData) inputData).getUser()); - } else { - toStori = namespace.resolveStoRIbySURL(TSURL - .makeFromStringValidate(toSURLString)); - } - return toStori; - } - - private void setAcl(StoRI oldFileStoRI, StoRI newFileStoRI) { - - try { - AclManagerFS.getInstance().moveHttpsPermissions( - oldFileStoRI.getLocalFile(), newFileStoRI.getLocalFile()); - } catch (IllegalArgumentException e) { - log - .error("Unable to move permissions from the old to the new file.{}", - e.getMessage(), e); - } - } - - private void setAcl(StoRI oldFileStoRI, StoRI newFileStoRI, - LocalUser localUser) { - - setAcl(oldFileStoRI, newFileStoRI); - if (newFileStoRI.hasJustInTimeACLs()) { - // JiT - try { - AclManagerFS.getInstance().grantHttpsUserPermission( - newFileStoRI.getLocalFile(), localUser, - FilesystemPermission.ReadWrite); - } catch (IllegalArgumentException e) { - log - .error("Unable to grant user read and write permission on file. {}", - e.getMessage(), - e); - } - } else { - // AoT - try { - AclManagerFS.getInstance().grantHttpsGroupPermission( - newFileStoRI.getLocalFile(), localUser, - FilesystemPermission.ReadWrite); - } catch (IllegalArgumentException e) { - log - .error("Unable to grant group read and write permission on file. {}" - ,e.getMessage(),e); - } - } - } - - /** - * Split PFN , recursive creation is not supported, as reported at page 16 of - * Srm v2.1 spec. - * - * @param user - * VomsGridUser - * @param LocalFile - * fromFile - * @param LocalFile - * toFile - * @return TReturnStatus - */ - private TReturnStatus manageAuthorizedMV(StoRI fromStori, LocalFile toFile) { - - boolean creationDone; - - String explanation = ""; - TStatusCode statusCode = TStatusCode.EMPTY; - - LocalFile fromFile = fromStori.getLocalFile(); - LocalFile toParent = toFile.getParentFile(); - - /* - * Controllare che File sorgente esiste Esiste directory destinazione(che - * esista e sia directory) Non esiste file deestinazione - */ - - boolean sourceExists = false; - boolean targetDirExists = false; - boolean targetFileExists = false; - - if (fromFile != null) { - sourceExists = fromFile.exists(); - } - - if (toParent != null) { - targetDirExists = toParent.exists() && toParent.isDirectory(); - } - - if (toFile != null) { - targetFileExists = toFile.exists(); - } - - if (sourceExists && targetDirExists && !targetFileExists) { - - SURLStatusManager checker = SURLStatusManagerFactory - .newSURLStatusManager(); - - if(checker.isSURLBusy(fromStori.getSURL())){ - log - .debug("srmMv request failure: fromSURL is busy."); - explanation = "There is an active SrmPrepareToPut on from SURL."; - return CommandHelper - .buildStatus(TStatusCode.SRM_FILE_BUSY, explanation); - } - - /** - * Check if there is an active SrmPrepareToGet on the source SURL. In that - * case SrmMv() fails with SRM_FILE_BUSY. - */ - - if (checker.isSURLPinned(fromStori.getSURL())){ - log - .debug("SrmMv: requests fails because the source SURL is being used from other requests."); - explanation = "There is an active SrmPrepareToGet on from SURL"; - return CommandHelper - .buildStatus(TStatusCode.SRM_FILE_BUSY, explanation); - } - - /** - * Perform the SrmMv() operation. - */ - creationDone = fromFile.renameTo(toFile.getPath()); - - if (creationDone) { - log.debug("SrmMv: Request success!"); - explanation = "SURL moved with success"; - statusCode = TStatusCode.SRM_SUCCESS; - } else { - log.debug("SrmMv: Requests fails because the path is invalid."); - explanation = "Invalid path"; - statusCode = TStatusCode.SRM_INVALID_PATH; - } - - } else { - if (!sourceExists) { // and it is a file - log - .debug("SrmMv: request fails because the source SURL does not exists!"); - explanation = "Source SURL does not exists!"; - statusCode = TStatusCode.SRM_INVALID_PATH; - } else { - if (!targetDirExists) { - log - .debug("SrmMv: request fails because the target directory does not exitts."); - explanation = "Target directory does not exits!"; - statusCode = TStatusCode.SRM_INVALID_PATH; - } else { - if (targetFileExists) { - log.debug("SrmMv: request fails because the target SURL exists."); - explanation = "Target SURL exists!"; - statusCode = TStatusCode.SRM_DUPLICATION_ERROR; - } else { - log.debug("SrmMv request failure! That is a BUG!"); - explanation = "That is a bug!"; - statusCode = TStatusCode.SRM_INTERNAL_ERROR; - } - } - } - } - - return CommandHelper.buildStatus(statusCode, explanation); - } - - private void printRequestOutcome(TReturnStatus status, MvInputData inputData) { - - if (inputData != null) { - if (inputData.getFromSURL() != null && inputData.getToSURL() != null) { - CommandHelper.printRequestOutcome( - SRM_COMMAND, - log, - status, - inputData, - Arrays.asList(new String[] { inputData.getFromSURL().toString(), - inputData.getFromSURL().toString() })); - } else { - CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData); - } - } else { - CommandHelper.printRequestOutcome(SRM_COMMAND, log, status); - } - } + private static final String SRM_COMMAND = "SrmMv"; + private final Namespace namespace; + + public MvCommand() { + + namespace = Namespace.getInstance(); + + } + + /** + * Method that provide SrmMv functionality. + * + * @param inputData Contains information about input data for Mv request. + * @return outputData Contains output data + */ + public OutputData execute(InputData data) { + + log.debug("srmMv: Start execution."); + MvOutputData outputData = new MvOutputData(); + MvInputData inputData = (MvInputData) data; + + /** + * Validate MvInputData. The check is done at this level to separate internal StoRM logic from + * xmlrpc specific operation. + */ + + if ((inputData == null) || (inputData.getFromSURL() == null) + || (inputData.getToSURL() == null)) { + outputData.setStatus( + CommandHelper.buildStatus(TStatusCode.SRM_FAILURE, "Invalid parameter specified.")); + log.warn("srmMv: Request failed with [status: {}]", outputData.getStatus()); + + return outputData; + } + + TSURL fromSURL = inputData.getFromSURL(); + + if (fromSURL.isEmpty()) { + log.warn("srmMv: unable to perform the operation, empty fromSurl"); + outputData.setStatus( + CommandHelper.buildStatus(TStatusCode.SRM_INVALID_PATH, "Invalid fromSURL specified!")); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + + TSURL toSURL = inputData.getToSURL(); + + if (toSURL.isEmpty()) { + log.error("srmMv: unable to perform the operation, empty toSurl"); + outputData.setStatus( + CommandHelper.buildStatus(TStatusCode.SRM_INVALID_PATH, "Invalid toSURL specified!")); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + + StoRI fromStori = null; + try { + if (inputData instanceof IdentityInputData) { + try { + fromStori = + namespace.resolveStoRIbySURL(fromSURL, ((IdentityInputData) inputData).getUser()); + } catch (UnapprochableSurlException e) { + log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", fromSURL, + DataHelper.getRequestor(inputData), e.getMessage()); + + outputData.setStatus( + CommandHelper.buildStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (NamespaceException e) { + log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", fromSURL, + DataHelper.getRequestor(inputData), e.getMessage()); + outputData + .setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INTERNAL_ERROR, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (InvalidSURLException e) { + log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", fromSURL, + DataHelper.getRequestor(inputData), e.getMessage()); + outputData + .setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INVALID_PATH, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + } + } else { + try { + fromStori = namespace.resolveStoRIbySURL(fromSURL); + } catch (UnapprochableSurlException e) { + log.info("srmMv: Unable to build a stori for surl {}. {}", fromSURL, e.getMessage()); + outputData.setStatus( + CommandHelper.buildStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (NamespaceException e) { + log.info("srmMv: Unable to build a stori for surl {}. {}", fromSURL, e.getMessage()); + outputData + .setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INTERNAL_ERROR, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (InvalidSURLException e) { + log.info("srmMv: Unable to build a stori for surl {}. {}", fromSURL, e.getMessage()); + outputData + .setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INVALID_PATH, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + } + } catch (IllegalArgumentException e) { + log.warn("srmMv: Unable to build StoRI by SURL: {}. {}", fromSURL, e.getMessage()); + outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INVALID_REQUEST, + "Unable to build StoRI by SURL")); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + + StoRI toStori = null;; + try { + if (inputData instanceof IdentityInputData) { + try { + toStori = namespace.resolveStoRIbySURL(toSURL, ((IdentityInputData) inputData).getUser()); + } catch (UnapprochableSurlException e) { + log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", fromSURL, + DataHelper.getRequestor(inputData), e.getMessage()); + outputData.setStatus( + CommandHelper.buildStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (NamespaceException e) { + log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", fromSURL, + DataHelper.getRequestor(inputData), e.getMessage()); + outputData + .setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INTERNAL_ERROR, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (InvalidSURLException e) { + log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", fromSURL, + DataHelper.getRequestor(inputData), e.getMessage()); + outputData + .setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INVALID_PATH, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + } else { + try { + toStori = namespace.resolveStoRIbySURL(toSURL); + } catch (UnapprochableSurlException e) { + log.info("srmMv: Unable to build a stori for surl {}. {}", fromSURL, e.getMessage()); + outputData.setStatus( + CommandHelper.buildStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (NamespaceException e) { + log.info("srmMv: Unable to build a stori for surl {}. {}", fromSURL, e.getMessage()); + outputData + .setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INTERNAL_ERROR, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (InvalidSURLException e) { + log.info("srmMv: Unable to build a stori for surl {}. {}", fromSURL, e.getMessage()); + outputData + .setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INVALID_PATH, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + } + } catch (IllegalArgumentException e) { + log.error("srmMv: Unable to build StoRI by SURL: {}. {}", fromSURL, e.getMessage(), e); + outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INTERNAL_ERROR, + "Unable to build StoRI by destination SURL")); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + + if (fromStori.getLocalFile().getPath().compareTo(toStori.getLocalFile().getPath()) == 0) { + outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_SUCCESS, + "Source SURL and target SURL are the same file.")); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + + if (toStori.getLocalFile().exists()) { + if (toStori.getLocalFile().isDirectory()) { + try { + toStori = buildDestinationStoryForFolder(toSURL, fromStori, data); + } catch (IllegalArgumentException e) { + log.debug("srmMv : Unable to build StoRI for SURL {}. {}", toSURL, e.getMessage()); + + outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INTERNAL_ERROR, + "Unable to build StoRI by SURL")); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (UnapprochableSurlException e) { + log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", toSURL, + DataHelper.getRequestor(inputData), e.getMessage()); + + outputData.setStatus( + CommandHelper.buildStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (InvalidTSURLAttributesException e) { + log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", toSURL, + DataHelper.getRequestor(inputData), e.getMessage()); + + outputData.setStatus( + CommandHelper.buildStatus(TStatusCode.SRM_INVALID_PATH, "Invalid toSURL specified!")); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (NamespaceException e) { + log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", toSURL, + DataHelper.getRequestor(inputData), e.getMessage()); + + outputData + .setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INTERNAL_ERROR, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (InvalidSURLException e) { + log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", toSURL, + DataHelper.getRequestor(inputData), e.getMessage()); + + outputData + .setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INVALID_PATH, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + } else { + log.debug("srmMv : destination SURL {} already exists.", toSURL); + outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_DUPLICATION_ERROR, + "destination SURL already exists!")); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + } + + AuthzDecision sourceDecision; + if (inputData instanceof IdentityInputData) { + sourceDecision = AuthzDirector.getPathAuthz() + .authorize(((IdentityInputData) inputData).getUser(), SRMFileRequest.MV_source, fromStori, + toStori); + } else { + sourceDecision = AuthzDirector.getPathAuthz() + .authorizeAnonymous(SRMFileRequest.MV_source, fromStori, toStori); + } + AuthzDecision destinationDecision; + if (inputData instanceof IdentityInputData) { + destinationDecision = AuthzDirector.getPathAuthz() + .authorize(((IdentityInputData) inputData).getUser(), SRMFileRequest.MV_dest, fromStori, + toStori); + } else { + destinationDecision = AuthzDirector.getPathAuthz() + .authorizeAnonymous(SRMFileRequest.MV_dest, fromStori, toStori); + } + TReturnStatus returnStatus; + if ((sourceDecision.equals(AuthzDecision.PERMIT)) + && (destinationDecision.equals(AuthzDecision.PERMIT))) { + + log.debug("SrmMv: Mv authorized for user {}. Source: {}. Target: {}", + DataHelper.getRequestor(inputData), fromStori.getPFN(), toStori.getPFN()); + + returnStatus = manageAuthorizedMV(fromStori, toStori.getLocalFile()); + + } else { + + String errorMsg = "Authorization error"; + + if (sourceDecision.equals(AuthzDecision.PERMIT)) { + errorMsg = "User is not authorized to create and/or write the destination file"; + } else { + if (destinationDecision.equals(AuthzDecision.PERMIT)) { + errorMsg = "User is not authorized to read and/or delete the source file"; + } else { + errorMsg = "User is neither authorized to read and/or delete the source file " + + "nor to create and/or write the destination file"; + } + } + + returnStatus = CommandHelper.buildStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, errorMsg); + } + outputData.setStatus(returnStatus); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + + private StoRI buildDestinationStoryForFolder(TSURL toSURL, StoRI fromStori, InputData inputData) + throws IllegalArgumentException, InvalidTSURLAttributesException, UnapprochableSurlException, + NamespaceException, InvalidSURLException { + + StoRI toStori; + String toSURLString = toSURL.getSURLString(); + if (!(toSURLString.endsWith("/"))) { + toSURLString += "/"; + } + toSURLString += fromStori.getFilename(); + log.debug("srmMv: New toSURL: {}", toSURLString); + if (inputData instanceof IdentityInputData) { + toStori = namespace.resolveStoRIbySURL(TSURL.makeFromStringValidate(toSURLString), + ((IdentityInputData) inputData).getUser()); + } else { + toStori = namespace.resolveStoRIbySURL(TSURL.makeFromStringValidate(toSURLString)); + } + return toStori; + } + + /** + * Split PFN , recursive creation is not supported, as reported at page 16 of Srm v2.1 spec. + * + * @param user VomsGridUser + * @param LocalFile fromFile + * @param LocalFile toFile + * @return TReturnStatus + */ + private TReturnStatus manageAuthorizedMV(StoRI fromStori, LocalFile toFile) { + + boolean creationDone; + + String explanation = ""; + TStatusCode statusCode = TStatusCode.EMPTY; + + LocalFile fromFile = fromStori.getLocalFile(); + LocalFile toParent = toFile.getParentFile(); + + /* + * Controllare che File sorgente esiste Esiste directory destinazione(che esista e sia + * directory) Non esiste file deestinazione + */ + + boolean sourceExists = false; + boolean targetDirExists = false; + boolean targetFileExists = false; + + if (fromFile != null) { + sourceExists = fromFile.exists(); + } + + if (toParent != null) { + targetDirExists = toParent.exists() && toParent.isDirectory(); + } + + if (toFile != null) { + targetFileExists = toFile.exists(); + } + + if (sourceExists && targetDirExists && !targetFileExists) { + + SURLStatusManager checker = SURLStatusManagerFactory.newSURLStatusManager(); + + if (checker.isSURLBusy(fromStori.getSURL())) { + log.debug("srmMv request failure: fromSURL is busy."); + explanation = "There is an active SrmPrepareToPut on from SURL."; + return CommandHelper.buildStatus(TStatusCode.SRM_FILE_BUSY, explanation); + } + + /** + * Check if there is an active SrmPrepareToGet on the source SURL. In that case SrmMv() fails + * with SRM_FILE_BUSY. + */ + + if (checker.isSURLPinned(fromStori.getSURL())) { + log.debug( + "SrmMv: requests fails because the source SURL is being used from other requests."); + explanation = "There is an active SrmPrepareToGet on from SURL"; + return CommandHelper.buildStatus(TStatusCode.SRM_FILE_BUSY, explanation); + } + + /** + * Perform the SrmMv() operation. + */ + creationDone = fromFile.renameTo(toFile.getPath()); + + if (creationDone) { + log.debug("SrmMv: Request success!"); + explanation = "SURL moved with success"; + statusCode = TStatusCode.SRM_SUCCESS; + } else { + log.debug("SrmMv: Requests fails because the path is invalid."); + explanation = "Invalid path"; + statusCode = TStatusCode.SRM_INVALID_PATH; + } + + } else { + if (!sourceExists) { // and it is a file + log.debug("SrmMv: request fails because the source SURL does not exists!"); + explanation = "Source SURL does not exists!"; + statusCode = TStatusCode.SRM_INVALID_PATH; + } else { + if (!targetDirExists) { + log.debug("SrmMv: request fails because the target directory does not exitts."); + explanation = "Target directory does not exits!"; + statusCode = TStatusCode.SRM_INVALID_PATH; + } else { + if (targetFileExists) { + log.debug("SrmMv: request fails because the target SURL exists."); + explanation = "Target SURL exists!"; + statusCode = TStatusCode.SRM_DUPLICATION_ERROR; + } else { + log.debug("SrmMv request failure! That is a BUG!"); + explanation = "That is a bug!"; + statusCode = TStatusCode.SRM_INTERNAL_ERROR; + } + } + } + } + + return CommandHelper.buildStatus(statusCode, explanation); + } + + private void printRequestOutcome(TReturnStatus status, MvInputData inputData) { + + if (inputData != null) { + if (inputData.getFromSURL() != null && inputData.getToSURL() != null) { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData, Arrays.asList( + new String[] {inputData.getFromSURL().toString(), inputData.getFromSURL().toString()})); + } else { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData); + } + } else { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status); + } + } } diff --git a/src/main/java/it/grid/storm/synchcall/command/directory/RmCommand.java b/src/main/java/it/grid/storm/synchcall/command/directory/RmCommand.java index 3a41ffadd..c08e97d78 100644 --- a/src/main/java/it/grid/storm/synchcall/command/directory/RmCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/directory/RmCommand.java @@ -24,17 +24,14 @@ import it.grid.storm.authz.AuthzDecision; import it.grid.storm.authz.AuthzDirector; -import it.grid.storm.authz.SpaceAuthzInterface; import it.grid.storm.authz.path.model.SRMFileRequest; -import it.grid.storm.authz.sa.model.SRMSpaceRequest; import it.grid.storm.catalogs.surl.SURLStatusManager; import it.grid.storm.catalogs.surl.SURLStatusManagerFactory; import it.grid.storm.filesystem.LocalFile; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.namespace.InvalidSURLException; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.NamespaceInterface; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.UnapprochableSurlException; import it.grid.storm.srm.types.ArrayOfTSURLReturnStatus; @@ -42,7 +39,6 @@ import it.grid.storm.srm.types.TReturnStatus; import it.grid.storm.srm.types.TSURL; import it.grid.storm.srm.types.TSURLReturnStatus; -import it.grid.storm.srm.types.TSpaceToken; import it.grid.storm.srm.types.TStatusCode; import it.grid.storm.synchcall.command.Command; import it.grid.storm.synchcall.command.CommandHelper; @@ -75,11 +71,11 @@ public class RmCommand implements Command { private static final String SRM_COMMAND = "srmRm"; private static final Logger log = LoggerFactory.getLogger(RmCommand.class); - private final NamespaceInterface namespace; + private final Namespace namespace; public RmCommand() { - namespace = NamespaceDirector.getNamespace(); + namespace = Namespace.getInstance(); } @@ -205,7 +201,7 @@ private TReturnStatus removeFile(TSURL surl, GridUserInterface user, RmInputData returnStatus = new TReturnStatus(TStatusCode.SRM_SUCCESS, "File removed"); try { - NamespaceDirector.getNamespace().resolveVFSbyLocalFile(localFile).decreaseUsedSpace(fileSize); + namespace.resolveVFSbyLocalFile(localFile).getSpaceUpdater().decreaseUsedSpace(fileSize); } catch (NamespaceException e) { log.error(e.getMessage()); returnStatus.extendExplaination("Unable to decrease used space: " + e.getMessage()); @@ -270,20 +266,6 @@ private StoRI resolveStoRI(TSURL surl, GridUserInterface user) throws RmExceptio private void checkUserAuthorization(StoRI stori, GridUserInterface user) throws RmException { - TSpaceToken token = stori.getVirtualFileSystem().getSpaceToken(); - SpaceAuthzInterface spaceAuth = AuthzDirector.getSpaceAuthz(token); - - boolean isSpaceAuthorized; - if (isAnonymous(user)) { - isSpaceAuthorized = spaceAuth.authorizeAnonymous(SRMSpaceRequest.RM); - } else { - isSpaceAuthorized = spaceAuth.authorize(user, SRMSpaceRequest.RM); - } - if (!isSpaceAuthorized) { - log.debug("srmRm: User not authorized to perform srmRm on SA: {}", token); - throw new RmException(TStatusCode.SRM_AUTHORIZATION_FAILURE, - "User not authorized to perform srmRm request on the storage area"); - } AuthzDecision decision; if (isAnonymous(user)) { decision = diff --git a/src/main/java/it/grid/storm/synchcall/command/directory/RmdirCommand.java b/src/main/java/it/grid/storm/synchcall/command/directory/RmdirCommand.java index eb485b8e1..6b097798b 100644 --- a/src/main/java/it/grid/storm/synchcall/command/directory/RmdirCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/directory/RmdirCommand.java @@ -24,21 +24,17 @@ import it.grid.storm.authz.AuthzDecision; import it.grid.storm.authz.AuthzDirector; -import it.grid.storm.authz.SpaceAuthzInterface; import it.grid.storm.authz.path.model.SRMFileRequest; -import it.grid.storm.authz.sa.model.SRMSpaceRequest; import it.grid.storm.filesystem.LocalFile; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.namespace.InvalidSURLException; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.NamespaceInterface; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.UnapprochableSurlException; import it.grid.storm.srm.types.SRMCommandException; import it.grid.storm.srm.types.TReturnStatus; import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSpaceToken; import it.grid.storm.srm.types.TStatusCode; import it.grid.storm.synchcall.command.Command; import it.grid.storm.synchcall.command.CommandHelper; @@ -60,31 +56,33 @@ public RmdirException(TStatusCode code, String message) { } } + class TSize { - - private long size; - - TSize(long size) { - this.size = size; - } - - public void add(long n) { - size += n; - } - - public void dec(long n) { - size -= n; - } - - public long get() { - return size; - } - + + private long size; + + TSize(long size) { + this.size = size; + } + + public void add(long n) { + size += n; + } + + public void dec(long n) { + size -= n; + } + + public long get() { + return size; + } + } + /** - * This class is part of the StoRM project. Copyright: Copyright (c) 2008 - * Company: INFN-CNAF and ICTP/EGRID project + * This class is part of the StoRM project. Copyright: Copyright (c) 2008 Company: INFN-CNAF and + * ICTP/EGRID project * * @author lucamag * @date May 27, 2008 @@ -92,253 +90,221 @@ public long get() { public class RmdirCommand extends DirectoryCommand implements Command { - + public static final Logger log = LoggerFactory.getLogger(RmdirCommand.class); - private static final String SRM_COMMAND = "srmRmdir"; - private final NamespaceInterface namespace; - - public RmdirCommand() { - - namespace = NamespaceDirector.getNamespace(); - - } - - /** - * Method that provide SrmRmdir functionality. - * - * @param inputData - * Contains information about input data for Rmdir request. - * @return OutputData Contains output data - */ - public OutputData execute(InputData data) { - - RmdirOutputData outputData = null; - log.debug("SrmRmdir: Start execution."); - checkInputData(data); - outputData = doRmdir((RmdirInputData) data); - log.debug("srmRmdir return status: {}", outputData.getStatus()); - printRequestOutcome(outputData.getStatus(), (RmdirInputData) data); - return outputData; - - } - - private RmdirOutputData doRmdir(RmdirInputData data) { - - TSURL surl = null; - GridUserInterface user = null; - StoRI stori = null; - TReturnStatus returnStatus = null; - boolean recursion = false; - TSize size = new TSize(0); - - try { - surl = getSURL(data); - user = getUser(data); - recursion = isRecursive(data); - stori = resolveStoRI(surl, user); - checkUserAuthorization(stori, user); - log.debug("srmRmdir: rmdir authorized for {}. Dir={}. Recursive={}", - userToString(user), stori.getPFN(), recursion); - returnStatus = removeFolder(stori.getLocalFile(), recursion, size); - log.debug("srmRmdir: decrease used space of {} bytes", size.get()); - try { - decreaseUsedSpace(stori.getLocalFile(), size.get()); - } catch (NamespaceException e) { - log.error("srmRmdir: {}", e.getMessage()); - returnStatus.extendExplaination("Unable to decrease used space: " - + e.getMessage()); - } - } catch (RmdirException e) { - log.error("srmRmdir: {}", e.getMessage()); - returnStatus = e.getReturnStatus(); - } - - log.debug("srmRmdir: returned status is {}", returnStatus); - return new RmdirOutputData(returnStatus); - } - - private void checkInputData(InputData data) - throws IllegalArgumentException { - - if (data == null) { - throw new IllegalArgumentException("Invalid input data: NULL"); - } - if (!(data instanceof RmdirInputData)) { - throw new IllegalArgumentException("Invalid input data type"); - } - } - - private StoRI resolveStoRI(TSURL surl, GridUserInterface user) - throws RmdirException { - - String formatStr = "Unable to build a stori for surl {} for user {}: {}"; - try { - return namespace.resolveStoRIbySURL(surl, user); - } catch (UnapprochableSurlException e) { - log.error(formatStr, surl, userToString(user), e.getMessage()); - throw new RmdirException(TStatusCode.SRM_AUTHORIZATION_FAILURE, - e.getMessage()); - } catch (NamespaceException e) { - log.error(formatStr, surl, userToString(user), e.getMessage()); - throw new RmdirException(TStatusCode.SRM_INTERNAL_ERROR, e.getMessage()); - } catch (InvalidSURLException e) { - log.error(formatStr, surl, userToString(user), e.getMessage()); - throw new RmdirException(TStatusCode.SRM_INVALID_PATH, e.getMessage()); - } catch (IllegalArgumentException e) { - log.error(formatStr, surl, userToString(user), e.getMessage()); - throw new RmdirException(TStatusCode.SRM_INTERNAL_ERROR, e.getMessage()); - } - } - - private boolean isAnonymous(GridUserInterface user) { - - return (user == null); - } - - private String userToString(GridUserInterface user) { - - return isAnonymous(user) ? "anonymous" : user.getDn(); - } - - private void checkUserAuthorization(StoRI stori, GridUserInterface user) - throws RmdirException { - - TSpaceToken token = stori.getVirtualFileSystem().getSpaceToken(); - SpaceAuthzInterface spaceAuth = AuthzDirector.getSpaceAuthz(token); - - boolean isSpaceAuthorized; - if (isAnonymous(user)) { - isSpaceAuthorized = spaceAuth.authorizeAnonymous(SRMSpaceRequest.RMD); - } else { - isSpaceAuthorized = spaceAuth.authorize(user, SRMSpaceRequest.RMD); - } - if (!isSpaceAuthorized) { - log.debug("srmRmdir: User not authorized to perform srmRmdir request " - + "on the storage area: {}", token); - throw new RmdirException(TStatusCode.SRM_AUTHORIZATION_FAILURE, - "User is not authorized to remove the directory on the storage area " - + token); - } - - AuthzDecision decision; - if (isAnonymous(user)) { - decision = AuthzDirector.getPathAuthz().authorizeAnonymous( - SRMFileRequest.RMD, stori.getStFN()); - } else { - decision = AuthzDirector.getPathAuthz().authorize(user, - SRMFileRequest.RMD, stori); - } - if (!decision.equals(AuthzDecision.PERMIT)) { - log.debug("srmRmdir: User is not authorized to delete the directory"); - throw new RmdirException(TStatusCode.SRM_AUTHORIZATION_FAILURE, - "User is not authorized to remove the directory"); - } - return; - } - - private GridUserInterface getUser(InputData data) { - - if (data instanceof IdentityInputData) { - return ((IdentityInputData) data).getUser(); - } - return null; - } - - private TSURL getSURL(RmdirInputData data) throws RmdirException { - - TSURL surl = ((RmdirInputData) data).getSurl(); - if (surl == null) { - throw new RmdirException(TStatusCode.SRM_FAILURE, - "SURL specified is NULL"); - } - if (surl.isEmpty()) { - throw new RmdirException(TStatusCode.SRM_FAILURE, - "SURL specified is empty"); - } - return surl; - } - - private boolean isRecursive(RmdirInputData data) { - - return data.getRecursive().booleanValue(); - } - - private void decreaseUsedSpace(LocalFile localFile, long sizeToRemove) - throws NamespaceException { - - NamespaceDirector.getNamespace().resolveVFSbyLocalFile(localFile) - .decreaseUsedSpace(sizeToRemove); - } - - private TReturnStatus removeFolder(LocalFile dir, boolean recursive, TSize size) - throws RmdirException { - - /* - * Check if dir exists and is a directory, if recursion is enabled when - * directory is not empty, etc... - */ - - if (!dir.exists()) { - return new TReturnStatus(TStatusCode.SRM_INVALID_PATH, - "Directory does not exists"); - } - if (!dir.isDirectory()) { - return new TReturnStatus(TStatusCode.SRM_INVALID_PATH, "Not a directory"); - } - if (!recursive && (dir.listFiles().length > 0)) { - return new TReturnStatus(TStatusCode.SRM_NON_EMPTY_DIRECTORY, - "Directory is not empty"); - } - - if (recursive) { - LocalFile[] list = dir.listFiles(); - log.debug("srmRmdir: removing {} content", dir); - for (LocalFile element : list) { - log.debug("srmRmdir: removing {}", element); - if (element.isDirectory()) { - removeFolder(element, recursive, size); - } else { - removeFile(element, size); - } - } - } - log.debug("srmRmdir: removing {}", dir); - removeEmptyDirectory(dir, size); - return new TReturnStatus(TStatusCode.SRM_SUCCESS, "Directory removed with success!"); - } - - private void removeEmptyDirectory(LocalFile directory, TSize size) - throws RmdirException { - - removeFile(directory, size); - } - - private void removeFile(LocalFile file, TSize size) throws RmdirException { - - long fileSize = file.length(); - if (!file.delete()) { - log.error("srmRmdir: Unable to delete {}", file); - throw new RmdirException(TStatusCode.SRM_FAILURE, - "Unable to delete " + file.getAbsolutePath()); - } - size.add(fileSize); - } - - private void printRequestOutcome(TReturnStatus status, - RmdirInputData inputData) { - - if (inputData != null) { - if (inputData.getSurl() != null) { - CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData, - Arrays.asList(inputData.getSurl().toString())); - } else { - CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData); - } - - } else { - CommandHelper.printRequestOutcome(SRM_COMMAND, log, status); - } - } - -} \ No newline at end of file + private static final String SRM_COMMAND = "srmRmdir"; + private final Namespace namespace; + + public RmdirCommand() { + + namespace = Namespace.getInstance(); + + } + + /** + * Method that provide SrmRmdir functionality. + * + * @param inputData Contains information about input data for Rmdir request. + * @return OutputData Contains output data + */ + public OutputData execute(InputData data) { + + RmdirOutputData outputData = null; + log.debug("SrmRmdir: Start execution."); + checkInputData(data); + outputData = doRmdir((RmdirInputData) data); + log.debug("srmRmdir return status: {}", outputData.getStatus()); + printRequestOutcome(outputData.getStatus(), (RmdirInputData) data); + return outputData; + + } + + private RmdirOutputData doRmdir(RmdirInputData data) { + + TSURL surl = null; + GridUserInterface user = null; + StoRI stori = null; + TReturnStatus returnStatus = null; + boolean recursion = false; + TSize size = new TSize(0); + + try { + surl = getSURL(data); + user = getUser(data); + recursion = isRecursive(data); + stori = resolveStoRI(surl, user); + checkUserAuthorization(stori, user); + log.debug("srmRmdir: rmdir authorized for {}. Dir={}. Recursive={}", userToString(user), + stori.getPFN(), recursion); + returnStatus = removeFolder(stori.getLocalFile(), recursion, size); + log.debug("srmRmdir: decrease used space of {} bytes", size.get()); + try { + decreaseUsedSpace(stori.getLocalFile(), size.get()); + } catch (NamespaceException e) { + log.error("srmRmdir: {}", e.getMessage()); + returnStatus.extendExplaination("Unable to decrease used space: " + e.getMessage()); + } + } catch (RmdirException e) { + log.error("srmRmdir: {}", e.getMessage()); + returnStatus = e.getReturnStatus(); + } + + log.debug("srmRmdir: returned status is {}", returnStatus); + return new RmdirOutputData(returnStatus); + } + + private void checkInputData(InputData data) throws IllegalArgumentException { + + if (data == null) { + throw new IllegalArgumentException("Invalid input data: NULL"); + } + if (!(data instanceof RmdirInputData)) { + throw new IllegalArgumentException("Invalid input data type"); + } + } + + private StoRI resolveStoRI(TSURL surl, GridUserInterface user) throws RmdirException { + + String formatStr = "Unable to build a stori for surl {} for user {}: {}"; + try { + return namespace.resolveStoRIbySURL(surl, user); + } catch (UnapprochableSurlException e) { + log.error(formatStr, surl, userToString(user), e.getMessage()); + throw new RmdirException(TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage()); + } catch (NamespaceException e) { + log.error(formatStr, surl, userToString(user), e.getMessage()); + throw new RmdirException(TStatusCode.SRM_INTERNAL_ERROR, e.getMessage()); + } catch (InvalidSURLException e) { + log.error(formatStr, surl, userToString(user), e.getMessage()); + throw new RmdirException(TStatusCode.SRM_INVALID_PATH, e.getMessage()); + } catch (IllegalArgumentException e) { + log.error(formatStr, surl, userToString(user), e.getMessage()); + throw new RmdirException(TStatusCode.SRM_INTERNAL_ERROR, e.getMessage()); + } + } + + private boolean isAnonymous(GridUserInterface user) { + + return (user == null); + } + + private String userToString(GridUserInterface user) { + + return isAnonymous(user) ? "anonymous" : user.getDn(); + } + + private void checkUserAuthorization(StoRI stori, GridUserInterface user) throws RmdirException { + + AuthzDecision decision; + if (isAnonymous(user)) { + decision = + AuthzDirector.getPathAuthz().authorizeAnonymous(SRMFileRequest.RMD, stori.getStFN()); + } else { + decision = AuthzDirector.getPathAuthz().authorize(user, SRMFileRequest.RMD, stori); + } + if (!decision.equals(AuthzDecision.PERMIT)) { + log.debug("srmRmdir: User is not authorized to delete the directory"); + throw new RmdirException(TStatusCode.SRM_AUTHORIZATION_FAILURE, + "User is not authorized to remove the directory"); + } + return; + } + + private GridUserInterface getUser(InputData data) { + + if (data instanceof IdentityInputData) { + return ((IdentityInputData) data).getUser(); + } + return null; + } + + private TSURL getSURL(RmdirInputData data) throws RmdirException { + + TSURL surl = ((RmdirInputData) data).getSurl(); + if (surl == null) { + throw new RmdirException(TStatusCode.SRM_FAILURE, "SURL specified is NULL"); + } + if (surl.isEmpty()) { + throw new RmdirException(TStatusCode.SRM_FAILURE, "SURL specified is empty"); + } + return surl; + } + + private boolean isRecursive(RmdirInputData data) { + + return data.getRecursive().booleanValue(); + } + + private void decreaseUsedSpace(LocalFile localFile, long sizeToRemove) throws NamespaceException { + + namespace.resolveVFSbyLocalFile(localFile).getSpaceUpdater().decreaseUsedSpace(sizeToRemove); + } + + private TReturnStatus removeFolder(LocalFile dir, boolean recursive, TSize size) + throws RmdirException { + + /* + * Check if dir exists and is a directory, if recursion is enabled when directory is not empty, + * etc... + */ + + if (!dir.exists()) { + return new TReturnStatus(TStatusCode.SRM_INVALID_PATH, "Directory does not exists"); + } + if (!dir.isDirectory()) { + return new TReturnStatus(TStatusCode.SRM_INVALID_PATH, "Not a directory"); + } + if (!recursive && (dir.listFiles().length > 0)) { + return new TReturnStatus(TStatusCode.SRM_NON_EMPTY_DIRECTORY, "Directory is not empty"); + } + + if (recursive) { + LocalFile[] list = dir.listFiles(); + log.debug("srmRmdir: removing {} content", dir); + for (LocalFile element : list) { + log.debug("srmRmdir: removing {}", element); + if (element.isDirectory()) { + removeFolder(element, recursive, size); + } else { + removeFile(element, size); + } + } + } + log.debug("srmRmdir: removing {}", dir); + removeEmptyDirectory(dir, size); + return new TReturnStatus(TStatusCode.SRM_SUCCESS, "Directory removed with success!"); + } + + private void removeEmptyDirectory(LocalFile directory, TSize size) throws RmdirException { + + removeFile(directory, size); + } + + private void removeFile(LocalFile file, TSize size) throws RmdirException { + + long fileSize = file.length(); + if (!file.delete()) { + log.error("srmRmdir: Unable to delete {}", file); + throw new RmdirException(TStatusCode.SRM_FAILURE, + "Unable to delete " + file.getAbsolutePath()); + } + size.add(fileSize); + } + + private void printRequestOutcome(TReturnStatus status, RmdirInputData inputData) { + + if (inputData != null) { + if (inputData.getSurl() != null) { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData, + Arrays.asList(inputData.getSurl().toString())); + } else { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData); + } + + } else { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status); + } + } + +} diff --git a/src/main/java/it/grid/storm/synchcall/command/discovery/PingCommand.java b/src/main/java/it/grid/storm/synchcall/command/discovery/PingCommand.java index 2dcc73da4..ace8386c1 100644 --- a/src/main/java/it/grid/storm/synchcall/command/discovery/PingCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/discovery/PingCommand.java @@ -123,7 +123,7 @@ private Properties loadProperties() { Properties properties = new Properties(); Configuration config = Configuration.getInstance(); - String configurationPATH = config.namespaceConfigPath(); + String configurationPATH = config.getConfigurationDir().getAbsolutePath(); String pingPropertiesFileName = config.getPingValuesPropertiesFilename(); String propertiesFile = configurationPATH + File.separator + pingPropertiesFileName; diff --git a/src/main/java/it/grid/storm/synchcall/command/space/GetSpaceMetaDataCommand.java b/src/main/java/it/grid/storm/synchcall/command/space/GetSpaceMetaDataCommand.java index a6013fb7c..ea9bbefa8 100644 --- a/src/main/java/it/grid/storm/synchcall/command/space/GetSpaceMetaDataCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/space/GetSpaceMetaDataCommand.java @@ -45,12 +45,11 @@ import it.grid.storm.synchcall.data.space.IdentityGetSpaceMetaDataInputData; /** - * This class is part of the StoRM project. Copyright: Copyright (c) 2008 - * Company: INFN-CNAF and ICTP/EGRID project + * This class is part of the StoRM project. Copyright: Copyright (c) 2008 Company: INFN-CNAF and + * ICTP/EGRID project * - * This class represents the GetSpaceMetaDataManager Class. This class hava a - * reseveSpace method that perform all operation nedded to satisfy a SRM space - * release request. + * This class represents the GetSpaceMetaDataManager Class. This class hava a reseveSpace method + * that perform all operation nedded to satisfy a SRM space release request. * * @author lucamag * @date May 29, 2008 @@ -59,174 +58,153 @@ public class GetSpaceMetaDataCommand extends SpaceCommand implements Command { - public static final Logger log = LoggerFactory - .getLogger(GetSpaceMetaDataCommand.class); - - private ReservedSpaceCatalog catalog = null; - - private static final String SRM_COMMAND = "srmGetSpaceMetaData"; - - /** - * Constructor. Bind the Executor with ReservedSpaceCatalog - */ - - public GetSpaceMetaDataCommand() { - - catalog = new ReservedSpaceCatalog(); - } - - /** - * - * @param data - * GetSpaceMetaDataInputData - * @return GetSpaceMetaDataOutputData - */ - public OutputData execute(InputData indata) { - - log.debug(""); - log.debug(" Updating SA with GPFS quotas results"); - GPFSQuotaManager.INSTANCE.triggerComputeQuotas(); - - IdentityGetSpaceMetaDataInputData data; - if (indata instanceof IdentityInputData) { - data = (IdentityGetSpaceMetaDataInputData) indata; - } else { - GetSpaceMetaDataOutputData outputData = new GetSpaceMetaDataOutputData(); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_NOT_SUPPORTED, "Anonymous user can not perform" - + SRM_COMMAND)); - printRequestOutcome(outputData.getStatus(), - (GetSpaceMetaDataInputData) indata); - return outputData; - } - int errorCount = 0; - ArrayOfTMetaDataSpace arrayData = new ArrayOfTMetaDataSpace(); - TReturnStatus globalStatus = null; - - TMetaDataSpace metadata = null; - - for (TSpaceToken token : data.getSpaceTokenArray().getTSpaceTokenArray()) { - StorageSpaceData spaceData = null; - try { - spaceData = catalog.getStorageSpace(token); - } catch (TransferObjectDecodingException e) { - log.error("Error getting storage space data for token {}. {}", - token, e.getMessage(),e); - metadata = createFailureMetadata(token, TStatusCode.SRM_INTERNAL_ERROR, - "Error building space data from row DB data", data.getUser()); - errorCount++; - arrayData.addTMetaDataSpace(metadata); - continue; - - } catch (DataAccessException e) { - log.error("Error getting storage space data for token {}. {}", - token, e.getMessage(),e); - metadata = createFailureMetadata(token, TStatusCode.SRM_INTERNAL_ERROR, - "Error retrieving row space token data from DB", data.getUser()); - errorCount++; - arrayData.addTMetaDataSpace(metadata); - continue; - } - if (spaceData != null) { - if (!spaceData.isInitialized()) { - log.warn("Uninitialized storage data found for token {}", token); - metadata = createFailureMetadata(token, TStatusCode.SRM_FAILURE, - "Storage Space not initialized yet", data.getUser()); - errorCount++; - } else { - try { - metadata = new TMetaDataSpace(spaceData); - } catch (InvalidTMetaDataSpaceAttributeException e) { - log.error("Metadata error. {}", e.getMessage(), e); - metadata = createFailureMetadata(token, - TStatusCode.SRM_INTERNAL_ERROR, - "Error building Storage Space Metadata from row data", - data.getUser()); - errorCount++; - } catch (InvalidTSizeAttributesException e) { - log.error("Metadata error. {}", e.getMessage(), e); - metadata = createFailureMetadata(token, - TStatusCode.SRM_INTERNAL_ERROR, - "Error building Storage Space Metadata from row data", - data.getUser()); - errorCount++; - } - } - } else { - log.warn("Unable to retrieve space data for token {}.",token); - metadata = createFailureMetadata(token, - TStatusCode.SRM_INVALID_REQUEST, "Space Token not found", - data.getUser()); - errorCount++; - } - arrayData.addTMetaDataSpace(metadata); - } - - boolean requestSuccess = (errorCount == 0); - boolean requestFailure = (errorCount == data.getSpaceTokenArray().size()); - - if (requestSuccess) { - globalStatus = new TReturnStatus(TStatusCode.SRM_SUCCESS, ""); - - log.info("srmGetSpaceMetadata: user <{}> Request for [spaceTokens: {}] " - + "done succesfully with: [status: {}]", data.getUser(), - data.getSpaceTokenArray(), globalStatus); - - } else { - if (requestFailure) { - globalStatus = new TReturnStatus(TStatusCode.SRM_FAILURE, - "No valid space tokens"); - - log.info( - "srmGetSpaceMetadata: user <{}> Request for [spaceTokens: {}] " - + "failed with: [status: {}]", data.getUser(), - data.getSpaceTokenArray(), globalStatus); - - } else { - - globalStatus = new TReturnStatus(TStatusCode.SRM_PARTIAL_SUCCESS, - "Check space tokens statuses for details"); - - log.info( - "srmGetSpaceMetadata: user <{}> Request for [spaceTokens: {}] " - + "partially done with: [status: {}]", data.getUser(), - data.getSpaceTokenArray(), globalStatus); - - } - } - - GetSpaceMetaDataOutputData response = null; - try { - response = new GetSpaceMetaDataOutputData(globalStatus, arrayData); - } catch (InvalidGetSpaceMetaDataOutputAttributeException e) { - log.error(e.getMessage(),e); - } - return response; - } - - private TMetaDataSpace createFailureMetadata(TSpaceToken token, - TStatusCode statusCode, String message, GridUserInterface user) { - - TMetaDataSpace metadata = TMetaDataSpace.makeEmpty(); - metadata.setSpaceToken(token); - - try { - metadata.setStatus(new TReturnStatus(statusCode, message)); - } catch (IllegalArgumentException e) { - log.error(e.getMessage(),e); - } - - return metadata; - } - - private void printRequestOutcome(TReturnStatus status, - GetSpaceMetaDataInputData inputData) { - - if (inputData != null) { - CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData); - } else { - CommandHelper.printRequestOutcome(SRM_COMMAND, log, status); - } - } + public static final Logger log = LoggerFactory.getLogger(GetSpaceMetaDataCommand.class); + + private ReservedSpaceCatalog catalog = ReservedSpaceCatalog.getInstance(); + + private static final String SRM_COMMAND = "srmGetSpaceMetaData"; + + /** + * + * @param data GetSpaceMetaDataInputData + * @return GetSpaceMetaDataOutputData + */ + public OutputData execute(InputData indata) { + + log.debug(""); + log.debug(" Updating SA with GPFS quotas results"); + GPFSQuotaManager.INSTANCE.triggerComputeQuotas(); + + IdentityGetSpaceMetaDataInputData data; + if (indata instanceof IdentityInputData) { + data = (IdentityGetSpaceMetaDataInputData) indata; + } else { + GetSpaceMetaDataOutputData outputData = new GetSpaceMetaDataOutputData(); + outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_NOT_SUPPORTED, + "Anonymous user can not perform" + SRM_COMMAND)); + printRequestOutcome(outputData.getStatus(), (GetSpaceMetaDataInputData) indata); + return outputData; + } + int errorCount = 0; + ArrayOfTMetaDataSpace arrayData = new ArrayOfTMetaDataSpace(); + TReturnStatus globalStatus = null; + + TMetaDataSpace metadata = null; + + for (TSpaceToken token : data.getSpaceTokenArray().getTSpaceTokenArray()) { + StorageSpaceData spaceData = null; + try { + spaceData = catalog.getStorageSpace(token); + } catch (TransferObjectDecodingException e) { + log.error("Error getting storage space data for token {}. {}", token, e.getMessage(), e); + metadata = createFailureMetadata(token, TStatusCode.SRM_INTERNAL_ERROR, + "Error building space data from row DB data", data.getUser()); + errorCount++; + arrayData.addTMetaDataSpace(metadata); + continue; + + } catch (DataAccessException e) { + log.error("Error getting storage space data for token {}. {}", token, e.getMessage(), e); + metadata = createFailureMetadata(token, TStatusCode.SRM_INTERNAL_ERROR, + "Error retrieving row space token data from DB", data.getUser()); + errorCount++; + arrayData.addTMetaDataSpace(metadata); + continue; + } + if (spaceData != null) { + if (!spaceData.isInitialized()) { + log.warn("Uninitialized storage data found for token {}", token); + metadata = createFailureMetadata(token, TStatusCode.SRM_FAILURE, + "Storage Space not initialized yet", data.getUser()); + errorCount++; + } else { + try { + metadata = new TMetaDataSpace(spaceData); + } catch (InvalidTMetaDataSpaceAttributeException e) { + log.error("Metadata error. {}", e.getMessage(), e); + metadata = createFailureMetadata(token, TStatusCode.SRM_INTERNAL_ERROR, + "Error building Storage Space Metadata from row data", data.getUser()); + errorCount++; + } catch (InvalidTSizeAttributesException e) { + log.error("Metadata error. {}", e.getMessage(), e); + metadata = createFailureMetadata(token, TStatusCode.SRM_INTERNAL_ERROR, + "Error building Storage Space Metadata from row data", data.getUser()); + errorCount++; + } + } + } else { + log.warn("Unable to retrieve space data for token {}.", token); + metadata = createFailureMetadata(token, TStatusCode.SRM_INVALID_REQUEST, + "Space Token not found", data.getUser()); + errorCount++; + } + arrayData.addTMetaDataSpace(metadata); + } + + boolean requestSuccess = (errorCount == 0); + boolean requestFailure = (errorCount == data.getSpaceTokenArray().size()); + + if (requestSuccess) { + globalStatus = new TReturnStatus(TStatusCode.SRM_SUCCESS, ""); + + log.info( + "srmGetSpaceMetadata: user <{}> Request for [spaceTokens: {}] " + + "done succesfully with: [status: {}]", + data.getUser(), data.getSpaceTokenArray(), globalStatus); + + } else { + if (requestFailure) { + globalStatus = new TReturnStatus(TStatusCode.SRM_FAILURE, "No valid space tokens"); + + log.info( + "srmGetSpaceMetadata: user <{}> Request for [spaceTokens: {}] " + + "failed with: [status: {}]", + data.getUser(), data.getSpaceTokenArray(), globalStatus); + + } else { + + globalStatus = new TReturnStatus(TStatusCode.SRM_PARTIAL_SUCCESS, + "Check space tokens statuses for details"); + + log.info( + "srmGetSpaceMetadata: user <{}> Request for [spaceTokens: {}] " + + "partially done with: [status: {}]", + data.getUser(), data.getSpaceTokenArray(), globalStatus); + + } + } + + GetSpaceMetaDataOutputData response = null; + try { + response = new GetSpaceMetaDataOutputData(globalStatus, arrayData); + } catch (InvalidGetSpaceMetaDataOutputAttributeException e) { + log.error(e.getMessage(), e); + } + return response; + } + + private TMetaDataSpace createFailureMetadata(TSpaceToken token, TStatusCode statusCode, + String message, GridUserInterface user) { + + TMetaDataSpace metadata = TMetaDataSpace.makeEmpty(); + metadata.setSpaceToken(token); + + try { + metadata.setStatus(new TReturnStatus(statusCode, message)); + } catch (IllegalArgumentException e) { + log.error(e.getMessage(), e); + } + + return metadata; + } + + private void printRequestOutcome(TReturnStatus status, GetSpaceMetaDataInputData inputData) { + + if (inputData != null) { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData); + } else { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status); + } + } } diff --git a/src/main/java/it/grid/storm/synchcall/command/space/GetSpaceTokensCommand.java b/src/main/java/it/grid/storm/synchcall/command/space/GetSpaceTokensCommand.java index ee40eb5d5..8f0cab2e9 100644 --- a/src/main/java/it/grid/storm/synchcall/command/space/GetSpaceTokensCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/space/GetSpaceTokensCommand.java @@ -36,9 +36,8 @@ import it.grid.storm.synchcall.data.space.GetSpaceTokensOutputData; /** - * This class is part of the StoRM project. Copyright: Copyright (c) 2008 - * Company: INFN-CNAF and ICTP/EGRID project * Execute the GetSpaceTokens - * request. + * This class is part of the StoRM project. Copyright: Copyright (c) 2008 Company: INFN-CNAF and + * ICTP/EGRID project * Execute the GetSpaceTokens request. * * @author lucamag * @author Alberto Forti @@ -49,16 +48,10 @@ public class GetSpaceTokensCommand extends SpaceCommand implements Command { - public static final Logger log = LoggerFactory - .getLogger(GetSpaceTokensCommand.class); + public static final Logger log = LoggerFactory.getLogger(GetSpaceTokensCommand.class); private static final String SRM_COMMAND = "srmGetSpaceTokens"; - private ReservedSpaceCatalog catalog = null; - - public GetSpaceTokensCommand() { - - catalog = new ReservedSpaceCatalog(); - }; + private ReservedSpaceCatalog catalog = ReservedSpaceCatalog.getInstance(); public OutputData execute(InputData data) { @@ -68,11 +61,9 @@ public OutputData execute(InputData data) { inputData = (IdentityGetSpaceTokensInputData) data; } else { outputData = new GetSpaceTokensOutputData(); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_NOT_SUPPORTED, "Anonymous user can not perform" - + SRM_COMMAND)); - printRequestOutcome(outputData.getStatus(), - (GetSpaceTokensInputData) data); + outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_NOT_SUPPORTED, + "Anonymous user can not perform" + SRM_COMMAND)); + printRequestOutcome(outputData.getStatus(), (GetSpaceTokensInputData) data); return outputData; } @@ -84,13 +75,12 @@ public OutputData execute(InputData data) { if (user == null) { log.debug("GetSpaceTokens: the user field is NULL"); status = new TReturnStatus(TStatusCode.SRM_AUTHENTICATION_FAILURE, - "Unable to get user credential!"); + "Unable to get user credential!"); - log.error("srmGetSpaceTokens: <{}> " - + "Request for [spaceTokenDescription:{}] failed with: [status: {}]", - user, - inputData.getSpaceTokenAlias(), - status); + log.error( + "srmGetSpaceTokens: <{}> " + + "Request for [spaceTokenDescription:{}] failed with: [status: {}]", + user, inputData.getSpaceTokenAlias(), status); outputData = new GetSpaceTokensOutputData(status, null); return outputData; @@ -98,39 +88,33 @@ public OutputData execute(InputData data) { String spaceAlias = inputData.getSpaceTokenAlias(); log.debug("spaceAlias= {}", spaceAlias); - - ArrayOfTSpaceToken arrayOfSpaceTokens = catalog.getSpaceTokens(user, - spaceAlias); + + ArrayOfTSpaceToken arrayOfSpaceTokens = catalog.getSpaceTokens(user, spaceAlias); if (arrayOfSpaceTokens.size() == 0) { arrayOfSpaceTokens = catalog.getSpaceTokensByAlias(spaceAlias); } - if (arrayOfSpaceTokens.size() == 0) { - if (spaceAlias != null) { - status = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, - "'userSpaceTokenDescription' does not refer to an existing space"); - } else { - status = new TReturnStatus(TStatusCode.SRM_FAILURE, - "No space tokens owned by this user"); - } - arrayOfSpaceTokens = null; - } else { - status = new TReturnStatus(TStatusCode.SRM_SUCCESS, ""); - } + if (arrayOfSpaceTokens.size() == 0) { + if (spaceAlias != null) { + status = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, + "'userSpaceTokenDescription' does not refer to an existing space"); + } else { + status = new TReturnStatus(TStatusCode.SRM_FAILURE, "No space tokens owned by this user"); + } + arrayOfSpaceTokens = null; + } else { + status = new TReturnStatus(TStatusCode.SRM_SUCCESS, ""); + } if (status.isSRM_SUCCESS()) { - log.info("srmGetSpaceTokens: <{}> Request for [spaceTokenDescription: {}] " - + "succesfully done with: [status: {}]", - user, - inputData.getSpaceTokenAlias(), - status); + log.info( + "srmGetSpaceTokens: <{}> Request for [spaceTokenDescription: {}] " + + "succesfully done with: [status: {}]", + user, inputData.getSpaceTokenAlias(), status); } else { log.error("srmGetSpaceTokens: <{}> Request for [spaceTokenDescription: {}] " - + "failed with: [status: {}]", - user, - inputData.getSpaceTokenAlias(), - status); + + "failed with: [status: {}]", user, inputData.getSpaceTokenAlias(), status); } outputData = new GetSpaceTokensOutputData(status, arrayOfSpaceTokens); @@ -139,8 +123,7 @@ public OutputData execute(InputData data) { } - private void printRequestOutcome(TReturnStatus status, - GetSpaceTokensInputData data) { + private void printRequestOutcome(TReturnStatus status, GetSpaceTokensInputData data) { if (data != null) { CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, data); diff --git a/src/main/java/it/grid/storm/synchcall/command/space/ReleaseSpaceCommand.java b/src/main/java/it/grid/storm/synchcall/command/space/ReleaseSpaceCommand.java index 9bef19955..a4283d409 100644 --- a/src/main/java/it/grid/storm/synchcall/command/space/ReleaseSpaceCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/space/ReleaseSpaceCommand.java @@ -39,9 +39,8 @@ import org.slf4j.LoggerFactory; /** - * This class represents the ReleaseSpaceManager Class. This class hava a - * reseveSpace method that perform all operation nedded to satisfy a SRM space - * release request. + * This class represents the ReleaseSpaceManager Class. This class hava a reseveSpace method that + * perform all operation nedded to satisfy a SRM space release request. * * @author Magnoni Luca * @author Cnaf -INFN Bologna @@ -51,21 +50,12 @@ public class ReleaseSpaceCommand extends SpaceCommand implements Command { - private final ReservedSpaceCatalog catalog; + private final ReservedSpaceCatalog catalog = ReservedSpaceCatalog.getInstance(); - /** - * Logger - */ - private static final Logger log = LoggerFactory - .getLogger(ReleaseSpaceCommand.class); + private static final Logger log = LoggerFactory.getLogger(ReleaseSpaceCommand.class); private static final String SRM_COMMAND = "srmReleaseSpace"; - public ReleaseSpaceCommand() { - - catalog = new ReservedSpaceCatalog(); - }; - public OutputData execute(InputData indata) { ReleaseSpaceOutputData outputData = new ReleaseSpaceOutputData(); @@ -73,20 +63,16 @@ public OutputData execute(InputData indata) { if (indata instanceof IdentityInputData) { inputData = (IdentityReleaseSpaceInputData) indata; } else { - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_NOT_SUPPORTED, "Anonymous user can not perform" - + SRM_COMMAND)); - printRequestOutcome(outputData.getStatus(), - (ReleaseSpaceInputData) indata); + outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_NOT_SUPPORTED, + "Anonymous user can not perform" + SRM_COMMAND)); + printRequestOutcome(outputData.getStatus(), (ReleaseSpaceInputData) indata); return outputData; } TReturnStatus returnStatus = null; - if ((inputData == null) - || ((inputData != null) && (inputData.getSpaceToken() == null))) { + if ((inputData == null) || ((inputData != null) && (inputData.getSpaceToken() == null))) { log.error("Empty space token."); - returnStatus = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, - "SpaceToken is empty."); + returnStatus = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, "SpaceToken is empty."); outputData.setStatus(returnStatus); return outputData; } @@ -95,11 +81,11 @@ public OutputData execute(InputData indata) { if (user == null) { log.debug("Null user credentials."); returnStatus = new TReturnStatus(TStatusCode.SRM_AUTHENTICATION_FAILURE, - "Unable to get user credential"); + "Unable to get user credential"); outputData.setStatus(returnStatus); - log.error("srmReleaseSpace: <{}> Request for [spacetoken: {}] failed " - + "with: [status: {}]", user, inputData.getSpaceToken(), returnStatus); + log.error("srmReleaseSpace: <{}> Request for [spacetoken: {}] failed " + "with: [status: {}]", + user, inputData.getSpaceToken(), returnStatus); return outputData; } @@ -113,16 +99,16 @@ public OutputData execute(InputData indata) { try { data = catalog.getStorageSpace(inputData.getSpaceToken()); } catch (Throwable e) { - log.error("Error fetching data for space token {}. {}", - inputData.getSpaceToken(), e.getMessage(), e); + log.error("Error fetching data for space token {}. {}", inputData.getSpaceToken(), + e.getMessage(), e); explanation = "Error building space data from row DB data."; statusCode = TStatusCode.SRM_INTERNAL_ERROR; returnStatus = new TReturnStatus(statusCode, explanation); outputData.setStatus(returnStatus); - log.error("srmReleaseSpace: <{}> Request for [spacetoken: {}] failed " - + "with: [status: {}]", user, inputData.getSpaceToken(), returnStatus); + log.error("srmReleaseSpace: <{}> Request for [spacetoken: {}] failed " + "with: [status: {}]", + user, inputData.getSpaceToken(), returnStatus); return outputData; } @@ -133,8 +119,8 @@ public OutputData execute(InputData indata) { returnStatus = new TReturnStatus(statusCode, explanation); outputData.setStatus(returnStatus); - log.error("srmReleaseSpace: <{}> Request for [spacetoken: {}] failed " - + "with: [status: {}]", user, inputData.getSpaceToken(), returnStatus); + log.error("srmReleaseSpace: <{}> Request for [spacetoken: {}] failed " + "with: [status: {}]", + user, inputData.getSpaceToken(), returnStatus); return outputData; } @@ -170,13 +156,12 @@ public OutputData execute(InputData indata) { if (returnStatus.isSRM_SUCCESS()) { log.error("srmReleaseSpace: <{}> Request for [spacetoken: {}] succesfully done " - + "with: [status: {}]", user, inputData.getSpaceToken(), returnStatus); - + + "with: [status: {}]", user, inputData.getSpaceToken(), returnStatus); + } else { log.error("srmReleaseSpace: <" + user + "> Request for [spacetoken:" - + inputData.getSpaceToken() + "] for failed with: [status:" - + returnStatus + "]"); + + inputData.getSpaceToken() + "] for failed with: [status:" + returnStatus + "]"); } @@ -185,14 +170,12 @@ public OutputData execute(InputData indata) { /** * - * @param user - * GridUserInterface - * @param data - * StorageSpaceData + * @param user GridUserInterface + * @param data StorageSpaceData * @return TReturnStatus */ private TReturnStatus manageAuthorizedReleaseSpace(StorageSpaceData data, - GridUserInterface user) { + GridUserInterface user) { String spaceFileName; PFN pfn = data.getSpaceFileName(); @@ -206,19 +189,17 @@ private TReturnStatus manageAuthorizedReleaseSpace(StorageSpaceData data, return new TReturnStatus(TStatusCode.SRM_SUCCESS, "Space Released."); } else { return new TReturnStatus(TStatusCode.SRM_INTERNAL_ERROR, - "Space removed, but spaceToken was not found in the DB"); + "Space removed, but spaceToken was not found in the DB"); } } else { - return new TReturnStatus(TStatusCode.SRM_FAILURE, - "Space can not be removed by StoRM!"); + return new TReturnStatus(TStatusCode.SRM_FAILURE, "Space can not be removed by StoRM!"); } } else { - return new TReturnStatus(TStatusCode.SRM_FAILURE, "SRM Internal failure."); + return new TReturnStatus(TStatusCode.SRM_FAILURE, "SRM Internal failure."); } } - private void printRequestOutcome(TReturnStatus status, - ReleaseSpaceInputData indata) { + private void printRequestOutcome(TReturnStatus status, ReleaseSpaceInputData indata) { if (indata != null) { CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, indata); diff --git a/src/main/java/it/grid/storm/synchcall/command/space/ReserveSpaceCommand.java b/src/main/java/it/grid/storm/synchcall/command/space/ReserveSpaceCommand.java index 8aa35775a..d99ac914b 100644 --- a/src/main/java/it/grid/storm/synchcall/command/space/ReserveSpaceCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/space/ReserveSpaceCommand.java @@ -17,9 +17,13 @@ package it.grid.storm.synchcall.command.space; +import java.util.Date; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.acl.AclManager; import it.grid.storm.acl.AclManagerFS; -import it.grid.storm.catalogs.InvalidSpaceDataAttributesException; import it.grid.storm.catalogs.ReservedSpaceCatalog; import it.grid.storm.common.types.PFN; import it.grid.storm.common.types.SizeUnit; @@ -30,13 +34,13 @@ import it.grid.storm.griduser.CannotMapUserException; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.LocalUser; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.NamespaceInterface; import it.grid.storm.namespace.StoRI; -import it.grid.storm.namespace.VirtualFSInterface; +import it.grid.storm.namespace.model.VirtualFS; import it.grid.storm.namespace.naming.NamespaceUtil; import it.grid.storm.persistence.exceptions.DataAccessException; +import it.grid.storm.persistence.exceptions.InvalidSpaceDataAttributesException; import it.grid.storm.persistence.model.TransferObjectDecodingException; import it.grid.storm.space.StorageSpaceData; import it.grid.storm.srm.types.InvalidTSizeAttributesException; @@ -63,14 +67,9 @@ import it.grid.storm.synchcall.data.space.ReserveSpaceInputData; import it.grid.storm.synchcall.data.space.ReserveSpaceOutputData; -import java.util.Date; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** - * This class is part of the StoRM project. Copyright: Copyright (c) 2008 - * Company: INFN-CNAF and ICTP/EGRID project + * This class is part of the StoRM project. Copyright: Copyright (c) 2008 Company: INFN-CNAF and + * ICTP/EGRID project * * @author lucamag * @author Riccardo Zappi @@ -81,10 +80,9 @@ public class ReserveSpaceCommand extends SpaceCommand implements Command { private ReservedSpaceCatalog catalog; - private static final Logger log = LoggerFactory - .getLogger(ReserveSpaceCommand.class); + private static final Logger log = LoggerFactory.getLogger(ReserveSpaceCommand.class); - private NamespaceInterface namespace; + private Namespace namespace; private static final String SRM_COMMAND = "srmReserveSpace"; @@ -92,14 +90,15 @@ public class ReserveSpaceCommand extends SpaceCommand implements Command { String explanation = null; private void logRequestSuccess(GridUserInterface user, TSizeInBytes desSize, - TSizeInBytes guarSize, TLifeTimeInSeconds lifetime, - TRetentionPolicyInfo rpinfo, TReturnStatus status) { - - log.info("srmReservespace: <{}> Request for [desiredSizeOfTotalSpace: {}," - + " desiredSizeOfGuaranteedSpace: {}] with " - + "[desiredLifetimeOfReservedSpace: {}, retentionPolicyInfo: {}]" - + "succesfully done with: [status: {}]", user, desSize, guarSize, - lifetime, rpinfo, status); + TSizeInBytes guarSize, TLifeTimeInSeconds lifetime, TRetentionPolicyInfo rpinfo, + TReturnStatus status) { + + log.info( + "srmReservespace: <{}> Request for [desiredSizeOfTotalSpace: {}," + + " desiredSizeOfGuaranteedSpace: {}] with " + + "[desiredLifetimeOfReservedSpace: {}, retentionPolicyInfo: {}]" + + "succesfully done with: [status: {}]", + user, desSize, guarSize, lifetime, rpinfo, status); } private void logRequestFailure(TStatusCode code, String explanation) { @@ -109,29 +108,27 @@ private void logRequestFailure(TStatusCode code, String explanation) { } private void logRequestFailure(GridUserInterface user, TSizeInBytes desSize, - TSizeInBytes guarSize, TLifeTimeInSeconds lifetime, - TRetentionPolicyInfo rpinfo, TStatusCode code, String explanation) { + TSizeInBytes guarSize, TLifeTimeInSeconds lifetime, TRetentionPolicyInfo rpinfo, + TStatusCode code, String explanation) { TReturnStatus status = new TReturnStatus(code, explanation); log.error("srmReservespace: <{}> Request for [desiredSizeOfTotalSpace: {}," - + " desiredSizeOfGuaranteedSpace: {}] with " - + "[desiredLifetimeOfReservedSpace: {}, retentionPolicyInfo: {}]" - + "failed with: [status: {}]", user, desSize, guarSize, lifetime, rpinfo, - status); + + " desiredSizeOfGuaranteedSpace: {}] with " + + "[desiredLifetimeOfReservedSpace: {}, retentionPolicyInfo: {}]" + + "failed with: [status: {}]", user, desSize, guarSize, lifetime, rpinfo, status); } public ReserveSpaceCommand() { - namespace = NamespaceDirector.getNamespace(); - catalog = new ReservedSpaceCatalog(); + namespace = Namespace.getInstance(); + catalog = ReservedSpaceCatalog.getInstance(); } /** * Method that provide space reservation for srmReserveSpace request. * - * @param data - * Contain information about data procived in SRM request. + * @param data Contain information about data procived in SRM request. * @return SpaceResOutputData that contain all SRM return parameter. * @todo Implement this it.grid.storm.synchcall.space.SpaceManager method */ @@ -142,11 +139,9 @@ public OutputData execute(InputData indata) { data = (IdentityReserveSpaceInputData) indata; } else { GetSpaceMetaDataOutputData outputData = new GetSpaceMetaDataOutputData(); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_NOT_SUPPORTED, "Anonymous user can not perform" - + SRM_COMMAND)); - printRequestOutcome(outputData.getStatus(), - (ReserveSpaceInputData) indata); + outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_NOT_SUPPORTED, + "Anonymous user can not perform" + SRM_COMMAND)); + printRequestOutcome(outputData.getStatus(), (ReserveSpaceInputData) indata); return outputData; } log.debug(":reserveSpace start."); @@ -161,23 +156,21 @@ public OutputData execute(InputData indata) { } catch (Exception e) { log.error(e.getMessage(), e); - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); return manageError(statusCode, explanation); } - VirtualFSInterface vfs = null; + VirtualFS vfs = null; try { vfs = getSpaceVFS(spaceFN); } catch (Exception e) { log.error(e.getMessage(), e); - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); return manageError(statusCode, explanation); } @@ -190,43 +183,37 @@ public OutputData execute(InputData indata) { log.error(e.getMessage(), e); - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); return manageError(statusCode, explanation); } SpaceSize spaceSize = null; try { - spaceSize = computeSpaceSize(data.getDesiredSize(), - data.getGuaranteedSize(), vfs); + spaceSize = computeSpaceSize(data.getDesiredSize(), data.getGuaranteedSize(), vfs); } catch (Exception e) { log.error(e.getMessage(), e); - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); return manageError(statusCode, explanation); } StoRI spaceStori = null; try { - spaceStori = getSpaceStoRI(vfs, relativeSpaceFN, - spaceSize.getDesiderataSpaceSize()); + spaceStori = getSpaceStoRI(vfs, relativeSpaceFN, spaceSize.getDesiderataSpaceSize()); } catch (Exception e) { log.error(e.getMessage(), e); - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); return manageError(statusCode, explanation); } - log - .debug("Reserve Space File Size: {}", spaceSize.getDesiderataSpaceSize()); + log.debug("Reserve Space File Size: {}", spaceSize.getDesiderataSpaceSize()); try { spaceStori.getSpace().fakeAllot(); @@ -235,9 +222,8 @@ public OutputData execute(InputData indata) { statusCode = TStatusCode.SRM_INTERNAL_ERROR; explanation = "Unable to create Space File into filesystem. \n"; - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); return manageError(statusCode, explanation); } @@ -248,9 +234,8 @@ public OutputData execute(InputData indata) { } catch (Exception e) { log.error(e.getMessage(), e); - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); revertAllocation(spaceStori.getSpace()); return manageError(statusCode, explanation); @@ -258,15 +243,14 @@ public OutputData execute(InputData indata) { TSpaceToken spaceToken = null; try { - spaceToken = registerIntoDB(data.getUser(), data.getSpaceTokenAlias(), - spaceSize.getTotalSize(), spaceSize.getDesiderataSpaceSize(), - data.getSpaceLifetime(), spaceStori.getPFN()); + spaceToken = + registerIntoDB(data.getUser(), data.getSpaceTokenAlias(), spaceSize.getTotalSize(), + spaceSize.getDesiderataSpaceSize(), data.getSpaceLifetime(), spaceStori.getPFN()); } catch (Exception e) { log.error(e.getMessage(), e); - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); revertAllocation(spaceStori.getSpace()); return manageError(statusCode, explanation); @@ -276,16 +260,14 @@ public OutputData execute(InputData indata) { try { output = buildOutput(spaceSize, spaceToken, data.getSpaceLifetime()); - logRequestSuccess(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), output.getStatus()); + logRequestSuccess(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), output.getStatus()); } catch (Exception e) { statusCode = TStatusCode.SRM_INTERNAL_ERROR; explanation = "Unable to build a valid output object "; - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); revertAllocation(spaceStori.getSpace()); return manageError(statusCode, explanation); } @@ -301,8 +283,8 @@ private void revertAllocation(Space space) { } } - private StoRI getSpaceStoRI(VirtualFSInterface vfs, String relativeSpaceFN, - TSizeInBytes desiderataSpaceSize) throws Exception { + private StoRI getSpaceStoRI(VirtualFS vfs, String relativeSpaceFN, + TSizeInBytes desiderataSpaceSize) throws Exception { StoRI spaceFile = null; try { @@ -338,31 +320,28 @@ private boolean checkParameters(IdentityReserveSpaceInputData data) { log.debug("Null retentionPolicyInfo."); statusCode = TStatusCode.SRM_INVALID_REQUEST; explanation = "RetentionPolicy not specified."; - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); return false; } TAccessLatency latency = data.getRetentionPolicyInfo().getAccessLatency(); - TRetentionPolicy retentionPolicy = data.getRetentionPolicyInfo() - .getRetentionPolicy(); + TRetentionPolicy retentionPolicy = data.getRetentionPolicyInfo().getRetentionPolicy(); - if (!((latency == null || latency.equals(TAccessLatency.EMPTY) || latency - .equals(TAccessLatency.ONLINE)) && (retentionPolicy == null - || retentionPolicy.equals(TRetentionPolicy.EMPTY) || retentionPolicy - .equals(TRetentionPolicy.REPLICA)))) { + if (!((latency == null || latency.equals(TAccessLatency.EMPTY) + || latency.equals(TAccessLatency.ONLINE)) + && (retentionPolicy == null || retentionPolicy.equals(TRetentionPolicy.EMPTY) + || retentionPolicy.equals(TRetentionPolicy.REPLICA)))) { - log.debug("Invalid retentionPolicyInfo: {}, {}", data - .getRetentionPolicyInfo().getAccessLatency(), data - .getRetentionPolicyInfo().getRetentionPolicy()); + log.debug("Invalid retentionPolicyInfo: {}, {}", + data.getRetentionPolicyInfo().getAccessLatency(), + data.getRetentionPolicyInfo().getRetentionPolicy()); statusCode = TStatusCode.SRM_NOT_SUPPORTED; explanation = "RetentionPolicy requested cannot be satisfied."; - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); return false; } @@ -384,9 +363,9 @@ private String getSpaceFN(GridUserInterface user) throws Exception { return spaceFN; } - private VirtualFSInterface getSpaceVFS(String spaceFN) throws Exception { + private VirtualFS getSpaceVFS(String spaceFN) throws Exception { - VirtualFSInterface vfs = null; + VirtualFS vfs = null; try { vfs = namespace.resolveVFSbyAbsolutePath(spaceFN); log.debug("Space File belongs to VFS : {}", vfs.getAliasName()); @@ -400,19 +379,15 @@ private VirtualFSInterface getSpaceVFS(String spaceFN) throws Exception { return vfs; } - private void setDefaults(IdentityReserveSpaceInputData data, - VirtualFSInterface vfs) { + private void setDefaults(IdentityReserveSpaceInputData data, VirtualFS vfs) { if (data.getRetentionPolicyInfo().getAccessLatency() == null - || data.getRetentionPolicyInfo().getAccessLatency() - .equals(TAccessLatency.EMPTY)) { + || data.getRetentionPolicyInfo().getAccessLatency().equals(TAccessLatency.EMPTY)) { data.getRetentionPolicyInfo().setAccessLatency(TAccessLatency.ONLINE); } if (data.getRetentionPolicyInfo().getRetentionPolicy() == null - || data.getRetentionPolicyInfo().getRetentionPolicy() - .equals(TRetentionPolicy.EMPTY)) { - data.getRetentionPolicyInfo() - .setRetentionPolicy(TRetentionPolicy.REPLICA); + || data.getRetentionPolicyInfo().getRetentionPolicy().equals(TRetentionPolicy.EMPTY)) { + data.getRetentionPolicyInfo().setRetentionPolicy(TRetentionPolicy.REPLICA); } if (data.getSpaceLifetime().isEmpty()) { log.debug("LifeTime is EMPTY. Using default value."); @@ -420,13 +395,12 @@ private void setDefaults(IdentityReserveSpaceInputData data, } } - private SpaceSize computeSpaceSize(TSizeInBytes totalSize, - TSizeInBytes guarSize, VirtualFSInterface vfs) throws Exception { + private SpaceSize computeSpaceSize(TSizeInBytes totalSize, TSizeInBytes guarSize, + VirtualFS vfs) throws Exception { TSizeInBytes desiderataSpaceSize = TSizeInBytes.makeEmpty(); - if ((!(totalSize.isEmpty())) - && (!((guarSize.isEmpty()) || guarSize.value() == 0))) { + if ((!(totalSize.isEmpty())) && (!((guarSize.isEmpty()) || guarSize.value() == 0))) { if (totalSize.value() < guarSize.value()) { log.debug("Error: totalSize < guaranteedSize"); statusCode = TStatusCode.SRM_INVALID_REQUEST; @@ -461,8 +435,8 @@ private SpaceSize computeSpaceSize(TSizeInBytes totalSize, } /* - * At this point either totalSize and guarSize contains significative value. - * desiderataSpaceSize is setted to totalSize. + * At this point either totalSize and guarSize contains significative value. desiderataSpaceSize + * is setted to totalSize. */ desiderataSpaceSize = totalSize; // This is valid because StoRM only reserve GUARANTEED space. @@ -470,23 +444,20 @@ private SpaceSize computeSpaceSize(TSizeInBytes totalSize, TSizeInBytes freeSpace = null; try { - freeSpace = TSizeInBytes.make(vfs.getFilesystem().getFreeSpace(), - SizeUnit.BYTES); + freeSpace = TSizeInBytes.make(vfs.getFilesystem().getFreeSpace(), SizeUnit.BYTES); } catch (InvalidTSizeAttributesException e) { - log - .debug("Error while retrieving free Space in underlying Filesystem", e); + log.debug("Error while retrieving free Space in underlying Filesystem", e); statusCode = TStatusCode.SRM_INTERNAL_ERROR; - explanation = "Error while retrieving free Space in underlying Filesystem \n" - + e; + explanation = "Error while retrieving free Space in underlying Filesystem \n" + e; throw new Exception(explanation); } catch (NamespaceException ex) { - log - .debug( + log.debug( "Error while retrieving free Space in underlying Filesystem. Unable to retrieve FS Driver", ex); statusCode = TStatusCode.SRM_INTERNAL_ERROR; - explanation = "Error while retrieving free Space in underlying Filesystem. Unable to retrieve FS Driver \n" - + ex; + explanation = + "Error while retrieving free Space in underlying Filesystem. Unable to retrieve FS Driver \n" + + ex; throw new Exception(explanation); } @@ -498,8 +469,7 @@ private SpaceSize computeSpaceSize(TSizeInBytes totalSize, if (freeSpace.value() < desiderataSpaceSize.value()) { if (freeSpace.value() < guarSize.value()) { // Not enough freespace - log - .debug(":reserveSpace Not Enough Free Space on storage!"); + log.debug(":reserveSpace Not Enough Free Space on storage!"); statusCode = TStatusCode.SRM_NO_FREE_SPACE; explanation = "SRM has not more free space."; throw new Exception(explanation); @@ -512,21 +482,18 @@ private SpaceSize computeSpaceSize(TSizeInBytes totalSize, return this.new SpaceSize(desiderataSpaceSize, totalSize, lower_space); } - private String getRelativeSpaceFilePath(VirtualFSInterface vfs, String spaceFN) - throws Exception { + private String getRelativeSpaceFilePath(VirtualFS vfs, String spaceFN) throws Exception { String relativeSpaceFN = null; - relativeSpaceFN = NamespaceUtil.extractRelativePath(vfs.getRootPath(), - spaceFN); + relativeSpaceFN = NamespaceUtil.extractRelativePath(vfs.getRootPath(), spaceFN); log.debug("relativeSpaceFN: {}", relativeSpaceFN); return relativeSpaceFN; } - private void setSpaceFilePermissions(StoRI spaceStori, GridUserInterface user) - throws Exception { + private void setSpaceFilePermissions(StoRI spaceStori, GridUserInterface user) throws Exception { FilesystemPermission fp = FilesystemPermission.ReadWrite; @@ -542,8 +509,7 @@ private void setSpaceFilePermissions(StoRI spaceStori, GridUserInterface user) throw new Exception(explanation); } if (localFile == null || localUser == null) { - log.error("ACL setup error. localFile={} , localUser={}", localFile, - localUser); + log.error("ACL setup error. localFile={} , localUser={}", localFile, localUser); statusCode = TStatusCode.SRM_INTERNAL_ERROR; explanation = "Unable to setting up the ACL "; throw new Exception(explanation); @@ -569,23 +535,21 @@ private void setSpaceFilePermissions(StoRI spaceStori, GridUserInterface user) } } - private TSpaceToken registerIntoDB(GridUserInterface user, - String spaceTokenAlias, TSizeInBytes totalSize, - TSizeInBytes desiderataSpaceSize, TLifeTimeInSeconds lifeTime, PFN pfn) - throws Exception { + private TSpaceToken registerIntoDB(GridUserInterface user, String spaceTokenAlias, + TSizeInBytes totalSize, TSizeInBytes desiderataSpaceSize, TLifeTimeInSeconds lifeTime, + PFN pfn) throws Exception { StorageSpaceData spaceData = null; try { - spaceData = new StorageSpaceData(user, TSpaceType.PERMANENT, - spaceTokenAlias, totalSize, desiderataSpaceSize, lifeTime, null, - new Date(), pfn); + spaceData = new StorageSpaceData(user, TSpaceType.PERMANENT, spaceTokenAlias, totalSize, + desiderataSpaceSize, lifeTime, null, new Date(), pfn); } catch (InvalidSpaceDataAttributesException e) { log.debug("Unable to create Storage Space Data", e); statusCode = TStatusCode.SRM_INTERNAL_ERROR; explanation = "Unable to create storage space data."; - logRequestFailure(user, totalSize, desiderataSpaceSize, lifeTime, null, - statusCode, explanation); + logRequestFailure(user, totalSize, desiderataSpaceSize, lifeTime, null, statusCode, + explanation); throw new Exception(explanation); } @@ -601,8 +565,8 @@ private TSpaceToken registerIntoDB(GridUserInterface user, log.debug("Unable to register Storage Space Data into DB", e); statusCode = TStatusCode.SRM_INTERNAL_ERROR; explanation = "Unable to register Storage Space Data into DB."; - logRequestFailure(user, totalSize, desiderataSpaceSize, lifeTime, null, - statusCode, explanation); + logRequestFailure(user, totalSize, desiderataSpaceSize, lifeTime, null, statusCode, + explanation); throw new Exception(explanation); } @@ -614,31 +578,30 @@ private TSpaceToken registerIntoDB(GridUserInterface user, statusCode = TStatusCode.SRM_INTERNAL_ERROR; explanation = "Unable to create space token."; - logRequestFailure(user, totalSize, desiderataSpaceSize, lifeTime, null, - statusCode, explanation); + logRequestFailure(user, totalSize, desiderataSpaceSize, lifeTime, null, statusCode, + explanation); throw new Exception(explanation); } return spaceToken; } - private ReserveSpaceOutputData buildOutput(SpaceSize spaceSize, - TSpaceToken spaceToken, TLifeTimeInSeconds lifeTime) throws Exception { + private ReserveSpaceOutputData buildOutput(SpaceSize spaceSize, TSpaceToken spaceToken, + TLifeTimeInSeconds lifeTime) throws Exception { TReturnStatus status = null; - if (!spaceSize.isLowerSpace()) { - status = new TReturnStatus(TStatusCode.SRM_SUCCESS, - "Space Reservation done"); + if (!spaceSize.isLowerSpace()) { + status = new TReturnStatus(TStatusCode.SRM_SUCCESS, "Space Reservation done"); - } else { - status = new TReturnStatus(TStatusCode.SRM_LOWER_SPACE_GRANTED, - "Space Reservation done, lower space granted."); - } + } else { + status = new TReturnStatus(TStatusCode.SRM_LOWER_SPACE_GRANTED, + "Space Reservation done, lower space granted."); + } ReserveSpaceOutputData outputData = null; try { outputData = new ReserveSpaceOutputData(spaceSize.getTotalSize(), - spaceSize.getDesiderataSpaceSize(), lifeTime, spaceToken, status); + spaceSize.getDesiderataSpaceSize(), lifeTime, spaceToken, status); } catch (InvalidReserveSpaceOutputDataAttributesException e) { log.error(e.getMessage(), e); statusCode = TStatusCode.SRM_INTERNAL_ERROR; @@ -654,8 +617,7 @@ private class SpaceSize { private final TSizeInBytes totalSize; private final boolean lowerSpace; - public SpaceSize(TSizeInBytes desiderataSpaceSize, TSizeInBytes totalSize, - boolean lowerSpace) { + public SpaceSize(TSizeInBytes desiderataSpaceSize, TSizeInBytes totalSize, boolean lowerSpace) { this.desiderataSpaceSize = desiderataSpaceSize; this.totalSize = totalSize; @@ -681,9 +643,7 @@ protected boolean isLowerSpace() { /** * Method that reset an already done reservation to the original status. * - * @param token - * TSpaceToken that contains information about data procived in SRM - * request. + * @param token TSpaceToken that contains information about data procived in SRM request. * @return TReturnStatus that contains of all SRM return parameters. */ public TReturnStatus resetReservation(TSpaceToken token) { @@ -715,7 +675,7 @@ public TReturnStatus resetReservation(TSpaceToken token) { String spaceFN = spacePFN.toString(); - VirtualFSInterface vfs = null; + VirtualFS vfs = null; try { vfs = namespace.resolveVFSbyAbsolutePath(spaceFN); log.debug("Space File belongs to VFS : {}", vfs.getAliasName()); @@ -727,8 +687,7 @@ public TReturnStatus resetReservation(TSpaceToken token) { } String relativeSpaceFN = null; - relativeSpaceFN = NamespaceUtil.extractRelativePath(vfs.getRootPath(), - spaceFN); + relativeSpaceFN = NamespaceUtil.extractRelativePath(vfs.getRootPath(), spaceFN); log.debug("relativeSpaceFN: {}", relativeSpaceFN); @@ -741,8 +700,7 @@ public TReturnStatus resetReservation(TSpaceToken token) { StoRI spaceFile = null; try { - spaceFile = vfs.createSpace(relativeSpaceFN, - desiderataSpaceSize.value()); + spaceFile = vfs.createSpace(relativeSpaceFN, desiderataSpaceSize.value()); } catch (NamespaceException e) { log.debug(e.getMessage(), e); statusCode = TStatusCode.SRM_INTERNAL_ERROR; @@ -768,8 +726,7 @@ public TReturnStatus resetReservation(TSpaceToken token) { LocalFile localFile = spaceFile.getLocalFile(); LocalUser localUser = user.getLocalUser(); if (localFile == null || localUser == null) { - log.error("ACL setup error. localFile={} localUser={}", localFile, - localUser); + log.error("ACL setup error. localFile={} localUser={}", localFile, localUser); statusCode = TStatusCode.SRM_INTERNAL_ERROR; explanation = "Unable to setting up the ACL "; return manageErrorStatus(statusCode, explanation); @@ -795,8 +752,7 @@ public TReturnStatus resetReservation(TSpaceToken token) { LocalFile localFile = spaceFile.getLocalFile(); LocalUser localUser = user.getLocalUser(); if (localFile == null || localUser == null) { - log.error("ACL setup error. localFile={} localUser={}", localFile, - localUser); + log.error("ACL setup error. localFile={} localUser={}", localFile, localUser); statusCode = TStatusCode.SRM_INTERNAL_ERROR; explanation = "Unable to setting up the ACL "; return manageErrorStatus(statusCode, explanation); @@ -827,16 +783,14 @@ public TReturnStatus resetReservation(TSpaceToken token) { } catch (DataAccessException e) { log.error(e.getMessage(), e); statusCode = TStatusCode.SRM_INTERNAL_ERROR; - explanation = "Error persisting space token data into the DB\n" - + e.getMessage(); + explanation = "Error persisting space token data into the DB\n" + e.getMessage(); return manageErrorStatus(statusCode, explanation); } return manageErrorStatus(TStatusCode.SRM_SUCCESS, "Successfull creation."); } - public TReturnStatus updateReservation(TSpaceToken token, - TSizeInBytes sizeToAdd, TSURL toSurl) { + public TReturnStatus updateReservation(TSpaceToken token, TSizeInBytes sizeToAdd, TSURL toSurl) { String explanation = null; TStatusCode statusCode = TStatusCode.EMPTY; @@ -865,7 +819,7 @@ public TReturnStatus updateReservation(TSpaceToken token, String spaceFN = null; spaceFN = spacePFN.toString(); - VirtualFSInterface vfs = null; + VirtualFS vfs = null; try { vfs = namespace.resolveVFSbyAbsolutePath(spaceFN); log.debug("Space File belongs to VFS : {}", vfs.getAliasName()); @@ -878,8 +832,7 @@ public TReturnStatus updateReservation(TSpaceToken token, String relativeSpaceFN = null; - relativeSpaceFN = NamespaceUtil.extractRelativePath(vfs.getRootPath(), - spaceFN); + relativeSpaceFN = NamespaceUtil.extractRelativePath(vfs.getRootPath(), spaceFN); TSizeInBytes desiderataSpaceSize = sdata.getTotalSpaceSize(); TSizeInBytes availableSize = sdata.getAvailableSpaceSize(); @@ -888,8 +841,8 @@ public TReturnStatus updateReservation(TSpaceToken token, log.debug("Size of removed file: {}" + sizeToAdd.value()); try { - desiderataSpaceSize = TSizeInBytes.make( - availableSize.value() + sizeToAdd.value(), SizeUnit.BYTES); + desiderataSpaceSize = + TSizeInBytes.make(availableSize.value() + sizeToAdd.value(), SizeUnit.BYTES); } catch (InvalidTSizeAttributesException e) { log.error(e.getMessage()); } @@ -932,8 +885,7 @@ public TReturnStatus updateReservation(TSpaceToken token, localFile = spaceFile.getLocalFile(); LocalUser localUser = user.getLocalUser(); if (localFile == null || localUser == null) { - log.error("ACL setup error. localFile={} localUser={}", localFile, - localUser); + log.error("ACL setup error. localFile={} localUser={}", localFile, localUser); revertOldSpaceFileDeletion(localFile); statusCode = TStatusCode.SRM_INTERNAL_ERROR; explanation = "Unable to setting up the ACL "; @@ -962,16 +914,14 @@ public TReturnStatus updateReservation(TSpaceToken token, localFile = spaceFile.getLocalFile(); LocalUser localUser = user.getLocalUser(); if (localFile == null || localUser == null) { - log.error("ACL setup error. localFile={} localUser={}", localFile, - localUser); + log.error("ACL setup error. localFile={} localUser={}", localFile, localUser); revertOldSpaceFileDeletion(localFile); statusCode = TStatusCode.SRM_INTERNAL_ERROR; explanation = "Unable to setting up the ACL "; return manageErrorStatus(statusCode, explanation); } else { try { - manager.grantGroupPermission(spaceFile.getLocalFile(), localUser, - fp); + manager.grantGroupPermission(spaceFile.getLocalFile(), localUser, fp); } catch (IllegalArgumentException e) { log.error(e.getMessage(), e); revertOldSpaceFileDeletion(localFile); @@ -990,14 +940,13 @@ public TReturnStatus updateReservation(TSpaceToken token, } try { - availableSize = TSizeInBytes.make(sdata.getAvailableSpaceSize().value() - + sizeToAdd.value(), SizeUnit.BYTES); + availableSize = TSizeInBytes.make(sdata.getAvailableSpaceSize().value() + sizeToAdd.value(), + SizeUnit.BYTES); } catch (InvalidTSizeAttributesException e) { log.error(e.getMessage(), e); revertOldSpaceFileDeletion(localFile); statusCode = TStatusCode.SRM_INTERNAL_ERROR; - explanation = "Error computing new available space size\n" - + e.getMessage(); + explanation = "Error computing new available space size\n" + e.getMessage(); return manageErrorStatus(statusCode, explanation); } @@ -1009,8 +958,7 @@ public TReturnStatus updateReservation(TSpaceToken token, log.error(e.getMessage(), e); revertOldSpaceFileDeletion(localFile); statusCode = TStatusCode.SRM_INTERNAL_ERROR; - explanation = "Error persisting space token data into the DB\n" - + e.getMessage(); + explanation = "Error persisting space token data into the DB\n" + e.getMessage(); return manageErrorStatus(statusCode, explanation); } return manageErrorStatus(TStatusCode.SRM_SUCCESS, "Successfull creation."); @@ -1020,8 +968,7 @@ private void revertOldSpaceFileDeletion(LocalFile localFile) { } - private ReserveSpaceOutputData manageError(TStatusCode statusCode, - String explanation) { + private ReserveSpaceOutputData manageError(TStatusCode statusCode, String explanation) { TReturnStatus status = null; try { @@ -1033,8 +980,7 @@ private ReserveSpaceOutputData manageError(TStatusCode statusCode, return new ReserveSpaceOutputData(status); } - private TReturnStatus manageErrorStatus(TStatusCode statusCode, - String explanation) { + private TReturnStatus manageErrorStatus(TStatusCode statusCode, String explanation) { TReturnStatus status = null; try { @@ -1045,8 +991,7 @@ private TReturnStatus manageErrorStatus(TStatusCode statusCode, return status; } - private void printRequestOutcome(TReturnStatus status, - ReserveSpaceInputData data) { + private void printRequestOutcome(TReturnStatus status, ReserveSpaceInputData data) { if (data != null) { CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, data); diff --git a/src/main/java/it/grid/storm/synchcall/data/datatransfer/AbortRequestOutputData.java b/src/main/java/it/grid/storm/synchcall/data/datatransfer/AbortRequestOutputData.java index 6ef5a8bf6..66979905d 100644 --- a/src/main/java/it/grid/storm/synchcall/data/datatransfer/AbortRequestOutputData.java +++ b/src/main/java/it/grid/storm/synchcall/data/datatransfer/AbortRequestOutputData.java @@ -27,61 +27,49 @@ package it.grid.storm.synchcall.data.datatransfer; import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.synchcall.data.OutputData; public class AbortRequestOutputData extends AbortGeneralOutputData { - private TReturnStatus returnStatus = null; + private TReturnStatus returnStatus = null; - public AbortRequestOutputData() { + public AbortRequestOutputData() { - } + } - public AbortRequestOutputData(TReturnStatus retStatus) - // throws InvalidAbortRequestOutputDataAttributeException - { + public AbortRequestOutputData(TReturnStatus retStatus) { - boolean ok = (retStatus == null); + this.returnStatus = retStatus; + } - if (!ok) { - ;// throw new InvalidAbortRequestOutputDataAttributeException(retStatus); - } + public static AbortRequestOutputData make(AbortGeneralOutputData generalOutData) { - this.returnStatus = retStatus; - } + return new AbortRequestOutputData(generalOutData.getReturnStatus()); + } - public static AbortRequestOutputData make( - AbortGeneralOutputData generalOutData) { + /** + * Returns the returnStatus field + * + * @return TReturnStatus + */ + public TReturnStatus getReturnStatus() { - // Create an output data from an AbortFiles output data. - // new AbortRequestOutputData(generalOutData.getReturnStatus()); - return new AbortRequestOutputData(generalOutData.getReturnStatus()); - } + return returnStatus; + } - /** - * Returns the returnStatus field - * - * @return TReturnStatus - */ - public TReturnStatus getReturnStatus() { + /** + * Set the returnStatus field + * + * @param returnStatus + */ + public void setReturnStatus(TReturnStatus returnStatus) { - return returnStatus; - } + this.returnStatus = returnStatus; + } - /** - * Set the returnStatus field - * - * @param returnStatus - */ - public void setReturnStatus(TReturnStatus returnStatus) { + public boolean isSuccess() { - this.returnStatus = returnStatus; - } - - public boolean isSuccess() { - - // TODO Auto-generated method stub - return false; - } + // TODO Auto-generated method stub + return false; + } } diff --git a/src/main/java/it/grid/storm/synchcall/data/datatransfer/AnonymousPrepareToPutInputData.java b/src/main/java/it/grid/storm/synchcall/data/datatransfer/AnonymousPrepareToPutInputData.java index 34168f7d9..848cbb304 100644 --- a/src/main/java/it/grid/storm/synchcall/data/datatransfer/AnonymousPrepareToPutInputData.java +++ b/src/main/java/it/grid/storm/synchcall/data/datatransfer/AnonymousPrepareToPutInputData.java @@ -17,10 +17,10 @@ package it.grid.storm.synchcall.data.datatransfer; -import it.grid.storm.catalogs.OverwriteModeConverter; import it.grid.storm.common.types.TURLPrefix; import it.grid.storm.common.types.TimeUnit; import it.grid.storm.config.Configuration; +import it.grid.storm.persistence.converter.OverwriteModeConverter; import it.grid.storm.srm.types.TLifeTimeInSeconds; import it.grid.storm.srm.types.TOverwriteMode; import it.grid.storm.srm.types.TSURL; @@ -30,91 +30,73 @@ * @author Michele Dibenedetto * */ -public class AnonymousPrepareToPutInputData extends - AnonymousFileTransferInputData implements PrepareToPutInputData { - - private TOverwriteMode overwriteMode = OverwriteModeConverter.getInstance() - .toSTORM(Configuration.getInstance().getDefaultOverwriteMode()); - private TSizeInBytes fileSize = TSizeInBytes.makeEmpty(); - private TLifeTimeInSeconds desiredFileLifetime; - - /** - * @param user - * @param surl - * @param transferProtocols - * @throws IllegalArgumentException - * @throws IllegalStateException - */ - public AnonymousPrepareToPutInputData(TSURL surl, TURLPrefix transferProtocols) - throws IllegalArgumentException, IllegalStateException { - - super(surl, transferProtocols); - this.desiredFileLifetime = TLifeTimeInSeconds.make(Configuration - .getInstance().getFileLifetimeDefault(), TimeUnit.SECONDS); - - } - - public AnonymousPrepareToPutInputData(TSURL surl, - TURLPrefix transferProtocols, TLifeTimeInSeconds desiredFileLifetime) - throws IllegalArgumentException, IllegalStateException { - - this(surl, transferProtocols); - this.desiredFileLifetime = desiredFileLifetime; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.synchcall.data.datatransfer.PrepareToPutInputData# - * getOverwriteMode() - */ - @Override - public TOverwriteMode getOverwriteMode() { - - return overwriteMode; - } - - @Override - public void setOverwriteMode(TOverwriteMode overwriteMode) { - - this.overwriteMode = overwriteMode; - } - - /* - * (non-Javadoc) - * - * @see - * it.grid.storm.synchcall.data.datatransfer.PrepareToPutInputData#getFileSize - * () - */ - @Override - public TSizeInBytes getFileSize() { - - return fileSize; - } - - @Override - public void setFileSize(TSizeInBytes fileSize) { - - this.fileSize = fileSize; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.synchcall.data.datatransfer.PrepareToPutInputData# - * getDesiredFileLifetime() - */ - @Override - public TLifeTimeInSeconds getDesiredFileLifetime() { - - return desiredFileLifetime; - } - - @Override - public void setDesiredFileLifetime(TLifeTimeInSeconds desiredFileLifetime) { - - this.desiredFileLifetime = desiredFileLifetime; - } +public class AnonymousPrepareToPutInputData extends AnonymousFileTransferInputData + implements PrepareToPutInputData { + + private static Configuration c = Configuration.getInstance(); + + private TOverwriteMode overwriteMode; + private TSizeInBytes fileSize; + private TLifeTimeInSeconds desiredFileLifetime; + + /** + * @param user + * @param surl + * @param transferProtocols + * @throws IllegalArgumentException + * @throws IllegalStateException + */ + public AnonymousPrepareToPutInputData(TSURL surl, TURLPrefix transferProtocols) + throws IllegalArgumentException, IllegalStateException { + + this(surl, transferProtocols, + TLifeTimeInSeconds.make(c.getFileLifetimeDefault(), TimeUnit.SECONDS)); + } + + public AnonymousPrepareToPutInputData(TSURL surl, TURLPrefix transferProtocols, + TLifeTimeInSeconds desiredFileLifetime) + throws IllegalArgumentException, IllegalStateException { + + super(surl, transferProtocols); + setDesiredFileLifetime(desiredFileLifetime); + setOverwriteMode(OverwriteModeConverter.toSTORM(c.getDefaultOverwriteMode())); + setFileSize(TSizeInBytes.makeEmpty()); + } + + @Override + public TOverwriteMode getOverwriteMode() { + + return overwriteMode; + } + + @Override + public void setOverwriteMode(TOverwriteMode overwriteMode) { + + this.overwriteMode = overwriteMode; + } + + @Override + public TSizeInBytes getFileSize() { + + return fileSize; + } + + @Override + public void setFileSize(TSizeInBytes fileSize) { + + this.fileSize = fileSize; + } + + @Override + public TLifeTimeInSeconds getDesiredFileLifetime() { + + return desiredFileLifetime; + } + + @Override + public void setDesiredFileLifetime(TLifeTimeInSeconds desiredFileLifetime) { + + this.desiredFileLifetime = desiredFileLifetime; + } } diff --git a/src/main/java/it/grid/storm/synchcall/data/directory/MvOutputData.java b/src/main/java/it/grid/storm/synchcall/data/directory/MvOutputData.java index 42cd006d1..07c7f1581 100644 --- a/src/main/java/it/grid/storm/synchcall/data/directory/MvOutputData.java +++ b/src/main/java/it/grid/storm/synchcall/data/directory/MvOutputData.java @@ -25,53 +25,50 @@ */ package it.grid.storm.synchcall.data.directory; -import java.util.Vector; - import it.grid.storm.srm.types.TReturnStatus; import it.grid.storm.synchcall.data.OutputData; import it.grid.storm.synchcall.data.exception.InvalidMvOutputAttributeException; public class MvOutputData implements OutputData { - private TReturnStatus returnStatus = null; + private TReturnStatus returnStatus = null; - public MvOutputData() { + public MvOutputData() { - } + } - public MvOutputData(TReturnStatus retStatus) - throws InvalidMvOutputAttributeException { + public MvOutputData(TReturnStatus retStatus) throws InvalidMvOutputAttributeException { - boolean ok = (retStatus == null); - if (!ok) { - throw new InvalidMvOutputAttributeException(retStatus); - } - this.returnStatus = retStatus; - } + boolean ok = (retStatus == null); + if (!ok) { + throw new InvalidMvOutputAttributeException(retStatus); + } + this.returnStatus = retStatus; + } - /** - * Method that return Status. - */ + /** + * Method that return Status. + */ - public TReturnStatus getStatus() { + public TReturnStatus getStatus() { - return returnStatus; - } + return returnStatus; + } - /** - * Set ReturnStatus - * - */ - public void setStatus(TReturnStatus retStat) { + /** + * Set ReturnStatus + * + */ + public void setStatus(TReturnStatus retStat) { - this.returnStatus = retStat; - } + this.returnStatus = retStat; + } - // @Override - public boolean isSuccess() { + // @Override + public boolean isSuccess() { - // TODO Auto-generated method stub - return true; - } + // TODO Auto-generated method stub + return true; + } } diff --git a/src/main/java/it/grid/storm/synchcall/data/discovery/PingOutputData.java b/src/main/java/it/grid/storm/synchcall/data/discovery/PingOutputData.java index 4b26cf09a..937c862e1 100644 --- a/src/main/java/it/grid/storm/synchcall/data/discovery/PingOutputData.java +++ b/src/main/java/it/grid/storm/synchcall/data/discovery/PingOutputData.java @@ -17,15 +17,11 @@ package it.grid.storm.synchcall.data.discovery; -import java.util.Iterator; - import it.grid.storm.srm.types.ArrayOfTExtraInfo; -import it.grid.storm.srm.types.TExtraInfo; import it.grid.storm.synchcall.data.OutputData; /** - * This class is part of the StoRM project. This class represents the Ping - * Output Data + * This class is part of the StoRM project. This class represents the Ping Output Data * * Copyright: Copyright (c) 2008 Company: INFN-CNAF and ICTP/EGRID project * @@ -38,73 +34,69 @@ public class PingOutputData implements OutputData { - private String versionInfo = null; - private ArrayOfTExtraInfo extraInfoArray = null; + private String versionInfo = null; + private ArrayOfTExtraInfo extraInfoArray = null; - public PingOutputData() { + public PingOutputData() { - } + } - public PingOutputData(String versionInfo, ArrayOfTExtraInfo otherInfo) { + public PingOutputData(String versionInfo, ArrayOfTExtraInfo otherInfo) { - this.versionInfo = versionInfo; - this.extraInfoArray = otherInfo; - } + this.versionInfo = versionInfo; + this.extraInfoArray = otherInfo; + } - /** - * Set versionInfo. - * - * @param versionInfo - * String - */ - public void setVersionInfo(String versionInfo) { + /** + * Set versionInfo. + * + * @param versionInfo String + */ + public void setVersionInfo(String versionInfo) { - this.versionInfo = versionInfo; - } + this.versionInfo = versionInfo; + } - /** - * Get versionInfo. - * - * @return String - */ - public String getVersionInfo() { + /** + * Get versionInfo. + * + * @return String + */ + public String getVersionInfo() { - return this.versionInfo; - } + return this.versionInfo; + } - /** - * Set extraInfoArray. - * - * @param extraInfoArray - * TExtraInfo - */ - public void setExtraInfoArray(ArrayOfTExtraInfo otherInfo) { + /** + * Set extraInfoArray. + * + * @param extraInfoArray TExtraInfo + */ + public void setExtraInfoArray(ArrayOfTExtraInfo otherInfo) { - this.extraInfoArray = otherInfo; - } + this.extraInfoArray = otherInfo; + } - /** - * Get extraInfoArray. - * - * @return TExtraInfo - */ - public ArrayOfTExtraInfo getExtraInfoArray() { + /** + * Get extraInfoArray. + * + * @return TExtraInfo + */ + public ArrayOfTExtraInfo getExtraInfoArray() { - return this.extraInfoArray; - } + return this.extraInfoArray; + } - // TODO - public boolean isSuccess() { + public boolean isSuccess() { - // TODO Auto-generated method stub - return true; - } + return true; + } - public String toString() { + public String toString() { - String result = versionInfo; - result += this.extraInfoArray.toString(); - return result; + String result = versionInfo; + result += this.extraInfoArray.toString(); + return result; - } + } } diff --git a/src/main/java/it/grid/storm/synchcall/data/exception/InvalidAbortFilesInputDataAttributeException.java b/src/main/java/it/grid/storm/synchcall/data/exception/InvalidAbortFilesInputDataAttributeException.java index fbe667b86..10ff5ad32 100644 --- a/src/main/java/it/grid/storm/synchcall/data/exception/InvalidAbortFilesInputDataAttributeException.java +++ b/src/main/java/it/grid/storm/synchcall/data/exception/InvalidAbortFilesInputDataAttributeException.java @@ -31,15 +31,20 @@ public class InvalidAbortFilesInputDataAttributeException extends Exception { - private boolean nullSurlInfo = true; + /** + * + */ + private static final long serialVersionUID = 1L; - public InvalidAbortFilesInputDataAttributeException(ArrayOfSURLs surlInfo) { + private boolean nullSurlInfo = true; - nullSurlInfo = (surlInfo == null); - } + public InvalidAbortFilesInputDataAttributeException(ArrayOfSURLs surlInfo) { - public String toString() { + nullSurlInfo = (surlInfo == null); + } - return "nullSurlInfo = " + nullSurlInfo; - } + public String toString() { + + return "nullSurlInfo = " + nullSurlInfo; + } } diff --git a/src/main/java/it/grid/storm/synchcall/data/exception/InvalidAbortFilesOutputDataAttributeException.java b/src/main/java/it/grid/storm/synchcall/data/exception/InvalidAbortFilesOutputDataAttributeException.java index b4d5f899b..cc74f9bcc 100644 --- a/src/main/java/it/grid/storm/synchcall/data/exception/InvalidAbortFilesOutputDataAttributeException.java +++ b/src/main/java/it/grid/storm/synchcall/data/exception/InvalidAbortFilesOutputDataAttributeException.java @@ -31,17 +31,21 @@ public class InvalidAbortFilesOutputDataAttributeException extends Exception { - private boolean nullSurlStatus = true; + /** + * + */ + private static final long serialVersionUID = 1L; - public InvalidAbortFilesOutputDataAttributeException( - ArrayOfTSURLReturnStatus surlStatus) { + private boolean nullSurlStatus = true; - nullSurlStatus = (surlStatus == null); - } + public InvalidAbortFilesOutputDataAttributeException(ArrayOfTSURLReturnStatus surlStatus) { - public String toString() { + nullSurlStatus = (surlStatus == null); + } - return "nullSurlStatusArray = " + nullSurlStatus; - } + public String toString() { + + return "nullSurlStatusArray = " + nullSurlStatus; + } } diff --git a/src/main/java/it/grid/storm/synchcall/data/exception/InvalidAbortGeneralInputDataAttributeException.java b/src/main/java/it/grid/storm/synchcall/data/exception/InvalidAbortGeneralInputDataAttributeException.java index ef3147372..421650418 100644 --- a/src/main/java/it/grid/storm/synchcall/data/exception/InvalidAbortGeneralInputDataAttributeException.java +++ b/src/main/java/it/grid/storm/synchcall/data/exception/InvalidAbortGeneralInputDataAttributeException.java @@ -31,15 +31,20 @@ public class InvalidAbortGeneralInputDataAttributeException extends Exception { - private boolean nullSurlInfo = true; + /** + * + */ + private static final long serialVersionUID = 1L; - public InvalidAbortGeneralInputDataAttributeException(ArrayOfSURLs surlInfo) { + private boolean nullSurlInfo = true; - nullSurlInfo = (surlInfo == null); - } + public InvalidAbortGeneralInputDataAttributeException(ArrayOfSURLs surlInfo) { - public String toString() { + nullSurlInfo = (surlInfo == null); + } - return "nullSurlInfo = " + nullSurlInfo; - } + public String toString() { + + return "nullSurlInfo = " + nullSurlInfo; + } } diff --git a/src/main/java/it/grid/storm/synchcall/data/exception/InvalidAbortGeneralOutputDataAttributeException.java b/src/main/java/it/grid/storm/synchcall/data/exception/InvalidAbortGeneralOutputDataAttributeException.java index 21cff82b3..d55748443 100644 --- a/src/main/java/it/grid/storm/synchcall/data/exception/InvalidAbortGeneralOutputDataAttributeException.java +++ b/src/main/java/it/grid/storm/synchcall/data/exception/InvalidAbortGeneralOutputDataAttributeException.java @@ -31,17 +31,21 @@ public class InvalidAbortGeneralOutputDataAttributeException extends Exception { - private boolean nullSurlStatus = true; + /** + * + */ + private static final long serialVersionUID = 1L; - public InvalidAbortGeneralOutputDataAttributeException( - ArrayOfTSURLReturnStatus surlStatus) { + private boolean nullSurlStatus = true; - nullSurlStatus = (surlStatus == null); - } + public InvalidAbortGeneralOutputDataAttributeException(ArrayOfTSURLReturnStatus surlStatus) { - public String toString() { + nullSurlStatus = (surlStatus == null); + } - return "nullSurlStatusArray = " + nullSurlStatus; - } + public String toString() { + + return "nullSurlStatusArray = " + nullSurlStatus; + } } diff --git a/src/main/java/it/grid/storm/synchcall/data/exception/InvalidMkdirInputAttributeException.java b/src/main/java/it/grid/storm/synchcall/data/exception/InvalidMkdirInputAttributeException.java index f8df1c9e9..2f694d535 100644 --- a/src/main/java/it/grid/storm/synchcall/data/exception/InvalidMkdirInputAttributeException.java +++ b/src/main/java/it/grid/storm/synchcall/data/exception/InvalidMkdirInputAttributeException.java @@ -31,15 +31,20 @@ public class InvalidMkdirInputAttributeException extends Exception { - private boolean nullSurl = true; + /** + * + */ + private static final long serialVersionUID = 1L; - public InvalidMkdirInputAttributeException(TSURL surl) { + private boolean nullSurl = true; - nullSurl = (surl == null); - } + public InvalidMkdirInputAttributeException(TSURL surl) { - public String toString() { + nullSurl = (surl == null); + } - return "nullSurl = " + nullSurl; - } + public String toString() { + + return "nullSurl = " + nullSurl; + } } diff --git a/src/main/java/it/grid/storm/synchcall/data/exception/InvalidMvInputAttributeException.java b/src/main/java/it/grid/storm/synchcall/data/exception/InvalidMvInputAttributeException.java index 69a3addb1..48108ce1c 100644 --- a/src/main/java/it/grid/storm/synchcall/data/exception/InvalidMvInputAttributeException.java +++ b/src/main/java/it/grid/storm/synchcall/data/exception/InvalidMvInputAttributeException.java @@ -31,17 +31,22 @@ public class InvalidMvInputAttributeException extends Exception { - private boolean nullFromSurl = true; - private boolean nullToSurl = true; + /** + * + */ + private static final long serialVersionUID = 1L; - public InvalidMvInputAttributeException(TSURL fromSURL, TSURL toSURL) { + private boolean nullFromSurl = true; + private boolean nullToSurl = true; - nullFromSurl = (fromSURL == null); - nullToSurl = (toSURL == null); - } + public InvalidMvInputAttributeException(TSURL fromSURL, TSURL toSURL) { - public String toString() { + nullFromSurl = (fromSURL == null); + nullToSurl = (toSURL == null); + } - return "nullFromSurl = " + nullFromSurl + " , nullToSURL = " + nullToSurl; - } + public String toString() { + + return "nullFromSurl = " + nullFromSurl + " , nullToSURL = " + nullToSurl; + } } diff --git a/src/main/java/it/grid/storm/synchcall/data/exception/InvalidMvOutputAttributeException.java b/src/main/java/it/grid/storm/synchcall/data/exception/InvalidMvOutputAttributeException.java index ee9332e94..3ca0ef87a 100644 --- a/src/main/java/it/grid/storm/synchcall/data/exception/InvalidMvOutputAttributeException.java +++ b/src/main/java/it/grid/storm/synchcall/data/exception/InvalidMvOutputAttributeException.java @@ -21,16 +21,21 @@ public class InvalidMvOutputAttributeException extends Exception { - private boolean nullStat = true; + /** + * + */ + private static final long serialVersionUID = 1L; - public InvalidMvOutputAttributeException(TReturnStatus stat) { + private boolean nullStat = true; - nullStat = (stat == null); - } + public InvalidMvOutputAttributeException(TReturnStatus stat) { - public String toString() { + nullStat = (stat == null); + } - return "nullStatus = " + nullStat; - } + public String toString() { + + return "nullStatus = " + nullStat; + } } diff --git a/src/main/java/it/grid/storm/synchcall/data/exception/InvalidPutDoneOutputAttributeException.java b/src/main/java/it/grid/storm/synchcall/data/exception/InvalidPutDoneOutputAttributeException.java index d932f2601..8c52fc213 100644 --- a/src/main/java/it/grid/storm/synchcall/data/exception/InvalidPutDoneOutputAttributeException.java +++ b/src/main/java/it/grid/storm/synchcall/data/exception/InvalidPutDoneOutputAttributeException.java @@ -31,17 +31,21 @@ public class InvalidPutDoneOutputAttributeException extends Exception { - private boolean nullSurlStatus = true; + /** + * + */ + private static final long serialVersionUID = 1L; - public InvalidPutDoneOutputAttributeException( - ArrayOfTSURLReturnStatus surlStatus) { + private boolean nullSurlStatus = true; - nullSurlStatus = (surlStatus == null); - } + public InvalidPutDoneOutputAttributeException(ArrayOfTSURLReturnStatus surlStatus) { - public String toString() { + nullSurlStatus = (surlStatus == null); + } - return "nullSurlStatusArray = " + nullSurlStatus; - } + public String toString() { + + return "nullSurlStatusArray = " + nullSurlStatus; + } } diff --git a/src/main/java/it/grid/storm/synchcall/data/exception/InvalidRmInputAttributeException.java b/src/main/java/it/grid/storm/synchcall/data/exception/InvalidRmInputAttributeException.java index b449082c8..cd5e25396 100644 --- a/src/main/java/it/grid/storm/synchcall/data/exception/InvalidRmInputAttributeException.java +++ b/src/main/java/it/grid/storm/synchcall/data/exception/InvalidRmInputAttributeException.java @@ -29,19 +29,22 @@ import it.grid.storm.srm.types.ArrayOfSURLs; -import java.util.Vector; - public class InvalidRmInputAttributeException extends Exception { - private boolean nullSurlInfo = true; + /** + * + */ + private static final long serialVersionUID = 1L; + + private boolean nullSurlInfo = true; - public InvalidRmInputAttributeException(ArrayOfSURLs surl) { + public InvalidRmInputAttributeException(ArrayOfSURLs surl) { - nullSurlInfo = (surl == null); - } + nullSurlInfo = (surl == null); + } - public String toString() { + public String toString() { - return "nullSurlInfo = " + nullSurlInfo; - } + return "nullSurlInfo = " + nullSurlInfo; + } } diff --git a/src/main/java/it/grid/storm/synchcall/data/exception/InvalidRmOutputAttributeException.java b/src/main/java/it/grid/storm/synchcall/data/exception/InvalidRmOutputAttributeException.java index a985b09c7..25c438bbb 100644 --- a/src/main/java/it/grid/storm/synchcall/data/exception/InvalidRmOutputAttributeException.java +++ b/src/main/java/it/grid/storm/synchcall/data/exception/InvalidRmOutputAttributeException.java @@ -29,19 +29,22 @@ import it.grid.storm.srm.types.ArrayOfTSURLReturnStatus; -import java.util.Vector; - public class InvalidRmOutputAttributeException extends Exception { - private boolean nullSurlStatus = true; + /** + * + */ + private static final long serialVersionUID = 1L; + + private boolean nullSurlStatus = true; - public InvalidRmOutputAttributeException(ArrayOfTSURLReturnStatus surlStatus) { + public InvalidRmOutputAttributeException(ArrayOfTSURLReturnStatus surlStatus) { - nullSurlStatus = (surlStatus == null); - } + nullSurlStatus = (surlStatus == null); + } - public String toString() { + public String toString() { - return "nullSurlStatusArray = " + nullSurlStatus; - } + return "nullSurlStatusArray = " + nullSurlStatus; + } } diff --git a/src/main/java/it/grid/storm/synchcall/data/space/GetSpaceMetaDataOutputData.java b/src/main/java/it/grid/storm/synchcall/data/space/GetSpaceMetaDataOutputData.java index 811d84570..992cbfebb 100644 --- a/src/main/java/it/grid/storm/synchcall/data/space/GetSpaceMetaDataOutputData.java +++ b/src/main/java/it/grid/storm/synchcall/data/space/GetSpaceMetaDataOutputData.java @@ -36,13 +36,12 @@ import it.grid.storm.synchcall.data.exception.InvalidGetSpaceMetaDataOutputAttributeException; /** - * This class is part of the StoRM project. Copyright: Copyright (c) 2008 - * Company: INFN-CNAF and ICTP/EGRID project + * This class is part of the StoRM project. Copyright: Copyright (c) 2008 Company: INFN-CNAF and + * ICTP/EGRID project * - * This class represents the SpaceReservationData associated with the SRM - * request, that is it contains info about: UserID, spaceType, SizeDesired, - * SizeGuaranteed,ecc. Number of files progressing, Number of files finished, - * and whether the request is currently suspended. + * This class represents the SpaceReservationData associated with the SRM request, that is it + * contains info about: UserID, spaceType, SizeDesired, SizeGuaranteed,ecc. Number of files + * progressing, Number of files finished, and whether the request is currently suspended. * * @author lucamag * @date May 29, 2008 @@ -51,67 +50,70 @@ public class GetSpaceMetaDataOutputData implements OutputData, Serializable { - private TReturnStatus status = null; - private ArrayOfTMetaDataSpace metaDataArray = null; + /** + * + */ + private static final long serialVersionUID = 1L; + + private TReturnStatus status = null; + private ArrayOfTMetaDataSpace metaDataArray = null; - public GetSpaceMetaDataOutputData() { + public GetSpaceMetaDataOutputData() { - } + } - public GetSpaceMetaDataOutputData(TReturnStatus status, - ArrayOfTMetaDataSpace metaDataArray) - throws InvalidGetSpaceMetaDataOutputAttributeException { + public GetSpaceMetaDataOutputData(TReturnStatus status, ArrayOfTMetaDataSpace metaDataArray) + throws InvalidGetSpaceMetaDataOutputAttributeException { - boolean ok = status != null && metaDataArray != null; + boolean ok = status != null && metaDataArray != null; - if (!ok) { - throw new InvalidGetSpaceMetaDataOutputAttributeException(status, - metaDataArray); - } + if (!ok) { + throw new InvalidGetSpaceMetaDataOutputAttributeException(status, metaDataArray); + } - this.status = status; - this.metaDataArray = metaDataArray; + this.status = status; + this.metaDataArray = metaDataArray; - } + } - /** - * Method that returns GridUser specify in SRM request. - */ + /** + * Method that returns GridUser specify in SRM request. + */ - public TReturnStatus getStatus() { + public TReturnStatus getStatus() { - return status; - } + return status; + } - /** - * - * - */ - public void setStatus(TReturnStatus status) { + /** + * + * + */ + public void setStatus(TReturnStatus status) { - this.status = status; + this.status = status; - } + } - /** - * Method return metaData. i n queue. - */ + /** + * Method return metaData. i n queue. + */ - public ArrayOfTMetaDataSpace getMetaDataSpaceArray() { + public ArrayOfTMetaDataSpace getMetaDataSpaceArray() { - return metaDataArray; - } + return metaDataArray; + } - public void setMetaDataSpaceArray(ArrayOfTMetaDataSpace metaDataArray) { + public void setMetaDataSpaceArray(ArrayOfTMetaDataSpace metaDataArray) { - this.metaDataArray = metaDataArray; - } + this.metaDataArray = metaDataArray; + } - // @Override - public boolean isSuccess() { + // @Override + public boolean isSuccess() { - // TODO Auto-generated method stub - return false; - } + // TODO Auto-generated method stub + return false; + } } diff --git a/src/main/java/it/grid/storm/tape/recalltable/TapeRecallCatalog.java b/src/main/java/it/grid/storm/tape/recalltable/TapeRecallCatalog.java index 596026f5c..a323820ea 100644 --- a/src/main/java/it/grid/storm/tape/recalltable/TapeRecallCatalog.java +++ b/src/main/java/it/grid/storm/tape/recalltable/TapeRecallCatalog.java @@ -24,13 +24,13 @@ import com.google.common.collect.Lists; import it.grid.storm.asynch.Suspendedable; -import it.grid.storm.catalogs.BoLPersistentChunkData; -import it.grid.storm.catalogs.PersistentChunkData; -import it.grid.storm.catalogs.PtGData; -import it.grid.storm.catalogs.RequestData; -import it.grid.storm.persistence.PersistenceDirector; import it.grid.storm.persistence.dao.TapeRecallDAO; import it.grid.storm.persistence.exceptions.DataAccessException; +import it.grid.storm.persistence.impl.mysql.TapeRecallDAOMySql; +import it.grid.storm.persistence.model.BoLPersistentChunkData; +import it.grid.storm.persistence.model.PersistentChunkData; +import it.grid.storm.persistence.model.PtGData; +import it.grid.storm.persistence.model.RequestData; import it.grid.storm.persistence.model.TapeRecallTO; import it.grid.storm.tape.recalltable.model.TapeRecallStatus; @@ -42,6 +42,7 @@ import java.util.Date; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentLinkedQueue; @@ -51,10 +52,19 @@ public class TapeRecallCatalog { private static final Logger log = LoggerFactory.getLogger(TapeRecallCatalog.class); - private final TapeRecallDAO tapeRecallDAO; + private static TapeRecallCatalog instance; private static Map> recallBuckets = new ConcurrentHashMap<>(); + public static synchronized TapeRecallCatalog getInstance() { + if (instance == null) { + instance = new TapeRecallCatalog(); + } + return instance; + } + + private TapeRecallDAO tapeRecallDAO; + /** * Default constructor * @@ -62,7 +72,7 @@ public class TapeRecallCatalog { */ public TapeRecallCatalog() { - tapeRecallDAO = PersistenceDirector.getDAOFactory().getTapeRecallDAO(); + tapeRecallDAO = TapeRecallDAOMySql.getInstance(); } /** @@ -98,25 +108,6 @@ public int getNumberTaskInProgress() throws DataAccessException { return result; } - /** - * Determines how many task rows have an in-progress state given a certain VO - * - * @param voName @return @throws DataAccessException - */ - public int getNumberTaskInProgress(String voName) throws DataAccessException { - - int result = -1; - try { - result = tapeRecallDAO.getNumberInProgress(voName); - } catch (DataAccessException e) { - log.error( - "Unable to retrieve the number of tasks currently in progress. DataAccessException: {}", - e.getMessage(), e); - throw e; - } - return result; - } - /** * Determines how many task rows have a queued state * @@ -135,24 +126,6 @@ public int getNumberTaskQueued() throws DataAccessException { return result; } - /** - * Determines how many task rows have a queued state given a certain VO - * - * @return @throws DataAccessException - */ - public int getNumberTaskQueued(String voName) throws DataAccessException { - - int result = -1; - try { - result = tapeRecallDAO.getNumberQueued(voName); - } catch (DataAccessException e) { - log.error("Unable to retrieve the number of tasks queued. DataAccessException: {}", - e.getMessage(), e); - throw e; - } - return result; - } - /** * * Determines how many task rows have a queued state and their deferred start time is elapsed @@ -173,30 +146,10 @@ public int getReadyForTakeOver() throws DataAccessException { return result; } - /** - * Determines how many task rows given a certain VO have a queued state and their deferred start - * time is elapsed - * - * @return @throws DataAccessException - */ - public int getReadyForTakeOver(String voName) throws DataAccessException { - - int result = -1; - try { - result = tapeRecallDAO.getReadyForTakeOver(voName); - } catch (DataAccessException e) { - log.error( - "Unable to retrieve the number of tasks ready for the take-over. DataAccessException: {}", - e.getMessage(), e); - throw e; - } - return result; - } - /** * @param taskId @param requestToken @return @throws DataAccessException */ - public TapeRecallTO getTask(UUID taskId, String requestToken) throws DataAccessException { + public Optional getTask(UUID taskId, String requestToken) throws DataAccessException { return tapeRecallDAO.getTask(taskId, requestToken); } @@ -279,48 +232,6 @@ public List getAllInProgressTasks(int numberOfTaks) { return taskList; } - /** - * @return - */ - public TapeRecallTO takeoverTask() { - - TapeRecallTO task = null; - try { - task = tapeRecallDAO.takeoverTask(); - } catch (DataAccessException e) { - log.error("Unable to takeove a task.", e); - } - return task; - } - - /** - * @param voName @return - */ - public TapeRecallTO takeoverTask(String voName) { - - TapeRecallTO task = null; - try { - task = tapeRecallDAO.takeoverTask(voName); - } catch (DataAccessException e) { - log.error("Unable to takeover a task for vo {}", voName, e); - } - return task; - } - - /** - * @param numberOfTaks @param voName @return - */ - public List takeoverTasks(int numberOfTaks, String voName) { - - List taskList = Lists.newArrayList(); - try { - taskList.addAll(tapeRecallDAO.takeoverTasksWithDoubles(numberOfTaks, voName)); - } catch (DataAccessException e) { - log.error("Unable to takeover {} tasks for vo {}", numberOfTaks, voName, e); - } - return taskList; - } - /** * Method used by PtGChunk and BoLChunk to request the recall of a file * diff --git a/src/main/java/it/grid/storm/tape/recalltable/model/PutTapeRecallStatusLogic.java b/src/main/java/it/grid/storm/tape/recalltable/model/PutTapeRecallStatusLogic.java index 54523b253..0b1d0ad32 100644 --- a/src/main/java/it/grid/storm/tape/recalltable/model/PutTapeRecallStatusLogic.java +++ b/src/main/java/it/grid/storm/tape/recalltable/model/PutTapeRecallStatusLogic.java @@ -23,6 +23,7 @@ import it.grid.storm.tape.recalltable.TapeRecallException; import java.util.Date; +import java.util.Optional; import java.util.UUID; import javax.ws.rs.core.Response; @@ -36,76 +37,88 @@ */ public class PutTapeRecallStatusLogic { - private static final Logger log = LoggerFactory - .getLogger(PutTapeRecallStatusLogic.class); - - /** - * @param requestToken - * @param stori - * @return - * @throws TapeRecallException - */ - public static Response serveRequest(String requestToken, StoRI stori) - throws TapeRecallException { - - LocalFile localFile = stori.getLocalFile(); - boolean fileOnDisk; - - try { - fileOnDisk = localFile.isOnDisk(); - } catch (FSException e) { - log.error("Unable to test file {} presence on disk. FSException {}" , localFile.getAbsolutePath() , e.getMessage() , e); - throw new TapeRecallException("Error checking file existence"); - } - - if (!fileOnDisk) { - return Response.ok(false, TEXT_PLAIN_TYPE).status(200).build(); - } - - if (!stori.getVirtualFileSystem().getStorageClassType().isTapeEnabled()) { - // tape not enable for StoRI filesystem, nothing to do - return Response.ok(true, TEXT_PLAIN_TYPE).status(200).build(); - } - - String pfn = localFile.getAbsolutePath(); - UUID taskId = TapeRecallTO.buildTaskIdFromFileName(pfn); - TapeRecallCatalog rtCat = new TapeRecallCatalog(); - boolean exists = false; - try { - exists = rtCat.existsTask(taskId, requestToken); - } catch (DataAccessException e) { - log.error("Error checking existence of a recall task for taskId={} requestToken={}. DataAccessException: {}" , taskId , requestToken , e.getMessage() , e); - throw new TapeRecallException("Error reading from tape recall table"); - } - if (!exists) { - // no recall tasks for this file, nothing to do - return Response.ok(true, TEXT_PLAIN_TYPE).status(200).build(); - } - - TapeRecallTO task; - try { - task = rtCat.getTask(taskId, requestToken); - } catch (DataAccessException e) { - log.error("Unable to update task recall status because unable to retrieve groupTaskId for token {}. DataAccessException: {}", requestToken , e.getMessage(),e); - throw new TapeRecallException("Error reading from tape recall table"); - } - - if (TapeRecallStatus.getRecallTaskStatus(task.getStatusId()).equals(SUCCESS)) { - // status already updated, nothing to do - return Response.ok(true, TEXT_PLAIN_TYPE).status(200).build(); - } - - UUID groupTaskId = task.getGroupTaskId(); - boolean updated; - try { - updated = rtCat.changeGroupTaskStatus(groupTaskId, SUCCESS, new Date()); - } catch (DataAccessException e) { - log.error("Unable to update task recall status for token {} with groupTaskId={}. DataAccessException : {}", requestToken , groupTaskId , e.getMessage() , e); - throw new TapeRecallException("Error updating tape recall table"); - } - if (updated) { - log.info("Task status set to SUCCESS. groupTaskId={} requestToken={} pfn={}" , groupTaskId , requestToken , pfn); - } - return Response.ok(true, TEXT_PLAIN_TYPE).status(200).build(); - } + private static final Logger log = LoggerFactory.getLogger(PutTapeRecallStatusLogic.class); + + /** + * @param requestToken + * @param stori + * @return + * @throws TapeRecallException + */ + public static Response serveRequest(String requestToken, StoRI stori) throws TapeRecallException { + + LocalFile localFile = stori.getLocalFile(); + boolean fileOnDisk; + + try { + fileOnDisk = localFile.isOnDisk(); + } catch (FSException e) { + log.error("Unable to test file {} presence on disk. FSException {}", + localFile.getAbsolutePath(), e.getMessage(), e); + throw new TapeRecallException("Error checking file existence"); + } + + if (!fileOnDisk) { + return Response.ok(false, TEXT_PLAIN_TYPE).status(200).build(); + } + + if (!stori.getVirtualFileSystem().getStorageClassType().isTapeEnabled()) { + // tape not enable for StoRI filesystem, nothing to do + return Response.ok(true, TEXT_PLAIN_TYPE).status(200).build(); + } + + String pfn = localFile.getAbsolutePath(); + UUID taskId = TapeRecallTO.buildTaskIdFromFileName(pfn); + TapeRecallCatalog rtCat = new TapeRecallCatalog(); + boolean exists = false; + try { + exists = rtCat.existsTask(taskId, requestToken); + } catch (DataAccessException e) { + log.error( + "Error checking existence of a recall task for taskId={} requestToken={}. DataAccessException: {}", + taskId, requestToken, e.getMessage(), e); + throw new TapeRecallException("Error reading from tape recall table"); + } + if (!exists) { + // no recall tasks for this file, nothing to do + return Response.ok(true, TEXT_PLAIN_TYPE).status(200).build(); + } + + TapeRecallTO task; + try { + Optional tTask = rtCat.getTask(taskId, requestToken); + if (tTask.isPresent()) { + task = tTask.get(); + } else { + // no recall tasks for this file, nothing to do + return Response.ok(true, TEXT_PLAIN_TYPE).status(200).build(); + } + } catch (DataAccessException e) { + log.error( + "Unable to update task recall status because unable to retrieve groupTaskId for token {}. DataAccessException: {}", + requestToken, e.getMessage(), e); + throw new TapeRecallException("Error reading from tape recall table"); + } + + if (TapeRecallStatus.getRecallTaskStatus(task.getStatusId()).equals(SUCCESS)) { + // status already updated, nothing to do + return Response.ok(true, TEXT_PLAIN_TYPE).status(200).build(); + } + + UUID groupTaskId = task.getGroupTaskId(); + boolean updated; + try { + updated = rtCat.changeGroupTaskStatus(groupTaskId, SUCCESS, new Date()); + } catch (DataAccessException e) { + log.error( + "Unable to update task recall status for token {} with groupTaskId={}. DataAccessException : {}", + requestToken, groupTaskId, e.getMessage(), e); + throw new TapeRecallException("Error updating tape recall table"); + } + if (updated) { + log.info("Task status set to SUCCESS. groupTaskId={} requestToken={} pfn={}", groupTaskId, + requestToken, pfn); + } + return Response.ok(true, TEXT_PLAIN_TYPE).status(200).build(); + } } diff --git a/src/main/java/it/grid/storm/tape/recalltable/model/PutTapeRecallStatusValidator.java b/src/main/java/it/grid/storm/tape/recalltable/model/PutTapeRecallStatusValidator.java index 547f1cbfc..d9e29edcc 100644 --- a/src/main/java/it/grid/storm/tape/recalltable/model/PutTapeRecallStatusValidator.java +++ b/src/main/java/it/grid/storm/tape/recalltable/model/PutTapeRecallStatusValidator.java @@ -17,13 +17,6 @@ package it.grid.storm.tape.recalltable.model; -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.StoRI; -import it.grid.storm.srm.types.InvalidTSURLAttributesException; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.util.SURLValidator; -import it.grid.storm.util.TokenValidator; - import java.util.StringTokenizer; import javax.ws.rs.core.Response; @@ -31,6 +24,13 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import it.grid.storm.namespace.Namespace; +import it.grid.storm.namespace.StoRI; +import it.grid.storm.srm.types.InvalidTSURLAttributesException; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.util.SURLValidator; +import it.grid.storm.util.TokenValidator; + public class PutTapeRecallStatusValidator implements RequestValidator { private static final Logger log = LoggerFactory @@ -142,7 +142,7 @@ private boolean validateSurl(String surlString) { return false; } try { - stori = NamespaceDirector.getNamespace().resolveStoRIbySURL(surl); + stori = Namespace.getInstance().resolveStoRIbySURL(surl); } catch (Exception e) { log.warn("Unable to build a stori for surl {} UnapprochableSurlException: {}" , surl , e.getMessage(),e); return false; diff --git a/src/main/java/it/grid/storm/tape/recalltable/persistence/PropertiesDB.java b/src/main/java/it/grid/storm/tape/recalltable/persistence/PropertiesDB.java deleted file mode 100644 index e0896a77a..000000000 --- a/src/main/java/it/grid/storm/tape/recalltable/persistence/PropertiesDB.java +++ /dev/null @@ -1,236 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/** - * - */ -package it.grid.storm.tape.recalltable.persistence; - -import it.grid.storm.config.Configuration; -import it.grid.storm.persistence.exceptions.DataAccessException; -import it.grid.storm.persistence.model.TapeRecallTO; -import it.grid.storm.srm.types.TRequestToken; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.FileOutputStream; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Properties; -import java.util.UUID; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * @author zappi - * - */ -public class PropertiesDB { - - private static final Logger log = LoggerFactory.getLogger(PropertiesDB.class); - private static Configuration config = Configuration.getInstance(); - private final String dataFileName = "recall-table.txt"; - private final String propertiesDBName; - - public PropertiesDB() { - - String configurationDir = PropertiesDB.config.configurationDir(); - char sep = File.separatorChar; - propertiesDBName = configurationDir + sep + "etc" + sep + "db" + sep - + dataFileName; - log.debug("Properties RecallTable-DB = {}" , propertiesDBName); - } - - public PropertiesDB(boolean test) { - - String configurationDir; - if (test) { - configurationDir = System.getProperty("user.dir"); - } else { - configurationDir = PropertiesDB.config.configurationDir(); - } - char sep = File.separatorChar; - propertiesDBName = configurationDir + sep + "etc" + sep + "db" + sep - + dataFileName; - // log.debug("Properties RecallTable-DB = " + propertiesDBName); - File tasksDBfile = new File(propertiesDBName); - boolean success = false; - try { - success = tasksDBfile.createNewFile(); - } catch (IOException e) { - log.error("Error while trying to check : {}" , propertiesDBName,e); - } - if (success) { - log.debug("TaskDB = '{}' exists ? {}" , propertiesDBName , success); - } - } - - // *************** PERSISTENCE METHODS **************** - - /** - * - * @param task - * @throws FileNotFoundException - * @throws IOException - * @throws DataAccessException - */ - public void addRecallTask(TapeRecallTO task) throws FileNotFoundException, - IOException, DataAccessException { - - Properties properties = new Properties(); - properties.load(new FileInputStream(propertiesDBName)); - - // Retrieve the Request-Token (unique-key) - TRequestToken taskToken = task.getRequestToken(); - if (taskToken == null) { - log.error("You are trying to store a Task without a task-id."); - throw new DataAccessException( - "You are trying to store a Task without a task-id."); - } - // Build the String related to Task-id - String taskStr = task.toString(); - // Insert the new property entry - properties.setProperty(taskToken.getValue(), taskStr); - // Store the properties into disk - properties.store(new FileOutputStream(propertiesDBName), null); - } - - public void setRecallTask(List listTasks) - throws FileNotFoundException, IOException, DataAccessException { - - Properties properties = new Properties(); - properties.load(new FileInputStream(propertiesDBName)); - - TRequestToken taskToken = null; - String taskStr = null; - for (TapeRecallTO TapeRecallTO : listTasks) { - // Retrieve the Task-id (unique-key) - taskToken = TapeRecallTO.getRequestToken(); - if (taskToken == null) { - log.error("You are trying to store a Task without a RequestToken."); - throw new DataAccessException( - "You are trying to store a Task without a Request-Token."); - } - // Build the String related to Task-id - taskStr = TapeRecallTO.toString(); - // Insert the new property entry - properties.setProperty(taskToken.getValue(), taskStr); - taskToken = null; - } - // Store the properties into disk - properties.store(new FileOutputStream(propertiesDBName), null); - } - - // public List getRecallTask(UUID taskId) throws - // FileNotFoundException, IOException, DataAccessException { - // ArrayList result = new ArrayList(); - // Properties properties = new Properties(); - // properties.load(new FileInputStream(propertiesDBName)); - // - // for (Object values : properties.values()) { - // String v = (String)values; - // TapeRecallTO task = TapeRecallBuilder.build(v); - // if (task.getTaskId().equals(taskId)) { - // result.add(task); - // } - // } - // if (result.isEmpty()) { - // log.error("Unable to retrieve the task with ID = " + taskId); - // throw new DataAccessException("Unable to find the task with ID = " + - // taskId); - // } - // return result; - // } - - public void updateRecallTask(TapeRecallTO task) throws FileNotFoundException, - IOException, DataAccessException { - - Properties properties = new Properties(); - properties.load(new FileInputStream(propertiesDBName)); - - UUID taskId = task.getTaskId(); - - // Check if the Task exists within the Properties DB - boolean taskExist = properties.containsKey(taskId.toString()); - if (!(taskExist)) { - log.error("Unable to find the task with ID = {}" , taskId); - throw new DataAccessException("Unable to find the task with ID = " - + taskId); - } else { - // Build the String related to Task-id - String taskStr = task.toString(); - // Insert the new property entry - properties.setProperty(taskId.toString(), taskStr); - log.debug("Removed tasks '{}'" , taskId); - } - - // Store the properties into disk - properties.store(new FileOutputStream(propertiesDBName), null); - } - - public void deleteRecallTask(UUID taskId) throws FileNotFoundException, - IOException, DataAccessException { - - Properties properties = new Properties(); - properties.load(new FileInputStream(propertiesDBName)); - - // Retrieve the Task from taskId - String task = properties.getProperty(taskId.toString()); - if (task == null) { - log.error("Unable to find the task with ID = {}" , taskId); - throw new DataAccessException("Unable to find the task with ID = " - + taskId); - } else { - properties.remove(taskId); - log.debug("Removed tasks '{}'" , taskId); - } - - // Store the properties into disk - properties.store(new FileOutputStream(propertiesDBName), null); - } - - // public LinkedHashMap getAll() throws - // FileNotFoundException, IOException, DataAccessException { - // - // LinkedHashMap tasksDBmem = new - // LinkedHashMap(); - // ArrayList tasksList = new ArrayList(); - // Properties properties = new Properties(); - // properties.load(new FileInputStream(propertiesDBName)); - // Collection values = properties.values(); - // for (Object element : values) { - // String line = (String) element; - // TapeRecallTO task = TapeRecallBuilder.build(line); - // tasksList.add(task); - // } - // TapeRecallTO[] tasksArray = tasksList.toArray(new - // TapeRecallTO[tasksList.size()]); - // Arrays.sort(tasksArray); - // // Create the ordered LinkedHashMap - // for (TapeRecallTO element : tasksArray) { - // tasksDBmem.put(element.getRequestToken(), element); - // } - // return tasksDBmem; - // } - -} diff --git a/src/main/java/it/grid/storm/tape/recalltable/resources/TaskResource.java b/src/main/java/it/grid/storm/tape/recalltable/resources/TaskResource.java index 7a2196f63..a085729e5 100644 --- a/src/main/java/it/grid/storm/tape/recalltable/resources/TaskResource.java +++ b/src/main/java/it/grid/storm/tape/recalltable/resources/TaskResource.java @@ -24,27 +24,6 @@ import static javax.ws.rs.core.Response.Status.INTERNAL_SERVER_ERROR; import static javax.ws.rs.core.Response.Status.NOT_FOUND; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; -import it.grid.storm.config.Configuration; -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.NamespaceInterface; -import it.grid.storm.namespace.StoRI; -import it.grid.storm.persistence.exceptions.DataAccessException; -import it.grid.storm.persistence.model.TapeRecallTO; -import it.grid.storm.rest.metadata.service.ResourceNotFoundException; -import it.grid.storm.rest.metadata.service.ResourceService; -import it.grid.storm.tape.recalltable.TapeRecallCatalog; -import it.grid.storm.tape.recalltable.TapeRecallException; -import it.grid.storm.tape.recalltable.model.PutTapeRecallStatusLogic; -import it.grid.storm.tape.recalltable.model.PutTapeRecallStatusValidator; -import it.grid.storm.tape.recalltable.model.TapeRecallStatus; -import it.grid.storm.tape.recalltable.model.TaskInsertRequestValidator; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; @@ -66,10 +45,27 @@ import javax.ws.rs.core.GenericEntity; import javax.ws.rs.core.Response; -/** - * @author Riccardo Zappi - * @author Enrico Vianello - */ +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; + +import it.grid.storm.config.Configuration; +import it.grid.storm.namespace.Namespace; +import it.grid.storm.namespace.NamespaceException; +import it.grid.storm.namespace.StoRI; +import it.grid.storm.persistence.exceptions.DataAccessException; +import it.grid.storm.persistence.model.TapeRecallTO; +import it.grid.storm.rest.metadata.service.ResourceNotFoundException; +import it.grid.storm.rest.metadata.service.ResourceService; +import it.grid.storm.tape.recalltable.TapeRecallCatalog; +import it.grid.storm.tape.recalltable.TapeRecallException; +import it.grid.storm.tape.recalltable.model.PutTapeRecallStatusLogic; +import it.grid.storm.tape.recalltable.model.PutTapeRecallStatusValidator; +import it.grid.storm.tape.recalltable.model.TapeRecallStatus; +import it.grid.storm.tape.recalltable.model.TaskInsertRequestValidator; + @Path("/recalltable/task") public class TaskResource { @@ -84,7 +80,7 @@ public class TaskResource { public TaskResource() throws NamespaceException { - NamespaceInterface ns = NamespaceDirector.getNamespace(); + Namespace ns = Namespace.getInstance(); recallCatalog = new TapeRecallCatalog(); service = new ResourceService(ns.getAllDefinedVFS(), ns.getAllDefinedMappingRules()); } @@ -254,8 +250,13 @@ public void putNewTaskStatusOrRetryValue(@PathParam("groupTaskId") UUID groupTas try { - recallCatalog.changeGroupTaskStatus(groupTaskId, - TapeRecallStatus.getRecallTaskStatus(intValue), new Date()); + TapeRecallStatus updatedStatus = TapeRecallStatus.getRecallTaskStatus(intValue); + recallCatalog.changeGroupTaskStatus(groupTaskId, updatedStatus, new Date()); + // Update all PtG or BoL related + if (updatedStatus.isFinalStatus()) { + + } + } catch (DataAccessException e) { diff --git a/src/main/java/it/grid/storm/util/VirtualFSHelper.java b/src/main/java/it/grid/storm/util/VirtualFSHelper.java index aec536a48..989020e57 100644 --- a/src/main/java/it/grid/storm/util/VirtualFSHelper.java +++ b/src/main/java/it/grid/storm/util/VirtualFSHelper.java @@ -5,9 +5,9 @@ import com.google.common.collect.Lists; import it.grid.storm.namespace.CapabilityInterface; -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.VirtualFSInterface; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.model.Quota; +import it.grid.storm.namespace.model.VirtualFS; public class VirtualFSHelper { @@ -15,7 +15,7 @@ private VirtualFSHelper() { // empty constructor } - public static final boolean isGPFSQuotaEnabledForVFS(VirtualFSInterface vfs) { + public static final boolean isGPFSQuotaEnabledForVFS(VirtualFS vfs) { boolean result = false; @@ -36,12 +36,12 @@ public static final boolean isGPFSQuotaEnabledForVFS(VirtualFSInterface vfs) { return result; } - public static List getGPFSQuotaEnabledFilesystems() { + public static List getGPFSQuotaEnabledFilesystems() { - List fss = Lists.newArrayList(); - List allVFS = NamespaceDirector.getNamespace().getAllDefinedVFS(); + List fss = Lists.newArrayList(); + List allVFS = Namespace.getInstance().getAllDefinedVFS(); - for (VirtualFSInterface vfs : allVFS) { + for (VirtualFS vfs : allVFS) { if (isGPFSQuotaEnabledForVFS(vfs)) fss.add(vfs); } diff --git a/src/main/java/it/grid/storm/xmlrpc/XMLRPCExecutor.java b/src/main/java/it/grid/storm/xmlrpc/XMLRPCExecutor.java index 1db025466..6930595d5 100644 --- a/src/main/java/it/grid/storm/xmlrpc/XMLRPCExecutor.java +++ b/src/main/java/it/grid/storm/xmlrpc/XMLRPCExecutor.java @@ -46,135 +46,127 @@ public class XMLRPCExecutor { - private static ArrayList bookKeepers = HealthDirector - .getHealthMonitor().getBookKeepers(); - - /** - * Logger - */ - private static final Logger log = LoggerFactory - .getLogger(XMLRPCExecutor.class); - - /** - * @param type - * @param inputParam - * @return - */ - - public Map execute(OperationType type, Map inputParam) - throws StoRMXmlRpcException { - - long startTime = System.currentTimeMillis(); - long duration = System.nanoTime(); - log.debug("Executing a '{}' call" , type.toString()); - log.debug(" Structure size : {}" , inputParam.size()); - Converter converter = ConveterFactory.getConverter(type); - SynchcallDispatcher dispatcher = SynchcallDispatcherFactory.getDispatcher(); - - log.debug("Converting input data with Converter {}", converter.getClass().getName()); - InputData inputData = converter.convertToInputData(inputParam); - - log.debug("Dispatching request using SynchcallDispatcher {}" - , dispatcher.getClass().getName()); - OutputData outputData; - try { - outputData = dispatcher.processRequest(type, inputData); - } catch (IllegalArgumentException e) { - log - .error("Unable to process the request. Error from the SynchcallDispatcher. IllegalArgumentException: {}" - , e.getMessage(),e); - throw new StoRMXmlRpcException( - "Unable to process the request. IllegalArgumentException: " - + e.getMessage()); - } catch (CommandException e) { - log - .error("Unable to execute the request. Error from the SynchcallDispatcher. CommandException: {}" - , e.getMessage(),e); - throw new StoRMXmlRpcException( - "Unable to process the request. CommandException: " + e.getMessage()); - } - Map outputParam = converter.convertFromOutputData(outputData); - duration = System.nanoTime() - duration; - - logExecution(convertOperationType(type), - DataHelper.getRequestor(inputData), startTime, - TimeUnit.NANOSECONDS.toMillis(duration), - outputData.isSuccess()); - - return outputParam; - } - - /** - * Method used to book the execution of SYNCH operation - */ - private void logExecution(it.grid.storm.health.OperationType opType, - String dn, long startTime, long duration, boolean successResult) { - - LogEvent event = new LogEvent(opType, dn, startTime, duration, - successResult); - if (!(bookKeepers.isEmpty())) { - log.debug("Found # {} bookeepers." , bookKeepers.size()); - for (int i = 0; i < bookKeepers.size(); i++) { - bookKeepers.get(i).addLogEvent(event); - } - } - } - - /** - * TOREMOVE! this is a temporary code since two different class of - * OperationTYpe are defined. This is to convert the two kind of operation - * type, from the onw used here, enum based, to the one requested by the - * hearthbeat. - */ - private it.grid.storm.health.OperationType convertOperationType( - OperationType type) { - - switch (type) { - case PTG: - return it.grid.storm.health.OperationType.PTG; - case SPTG: - return it.grid.storm.health.OperationType.SPTG; - case PTP: - return it.grid.storm.health.OperationType.PTP; - case SPTP: - return it.grid.storm.health.OperationType.SPTP; - case COPY: - return it.grid.storm.health.OperationType.COPY; - case BOL: - return it.grid.storm.health.OperationType.BOL; - case AF: - return it.grid.storm.health.OperationType.AF; - case AR: - return it.grid.storm.health.OperationType.AR; - case EFL: - return it.grid.storm.health.OperationType.EFL; - case GSM: - return it.grid.storm.health.OperationType.GSM; - case GST: - return it.grid.storm.health.OperationType.GST; - case LS: - return it.grid.storm.health.OperationType.LS; - case MKD: - return it.grid.storm.health.OperationType.MKD; - case MV: - return it.grid.storm.health.OperationType.MV; - case PNG: - return it.grid.storm.health.OperationType.PNG; - case PD: - return it.grid.storm.health.OperationType.PD; - case RF: - return it.grid.storm.health.OperationType.RF; - case RESSP: - return it.grid.storm.health.OperationType.RS; - case RELSP: - return it.grid.storm.health.OperationType.RSP; - case RM: - return it.grid.storm.health.OperationType.RM; - case RMD: - return it.grid.storm.health.OperationType.RMD; - default: - return it.grid.storm.health.OperationType.UNDEF; - } - } + private static ArrayList bookKeepers = + HealthDirector.getHealthMonitor().getBookKeepers(); + + /** + * Logger + */ + private static final Logger log = LoggerFactory.getLogger(XMLRPCExecutor.class); + + /** + * @param type + * @param inputParam + * @return + */ + + public Map execute(OperationType type, Map inputParam) + throws StoRMXmlRpcException { + + long startTime = System.currentTimeMillis(); + long duration = System.nanoTime(); + log.debug("Executing a '{}' call", type.toString()); + log.debug(" Structure size : {}", inputParam.size()); + Converter converter = ConveterFactory.getConverter(type); + SynchcallDispatcher dispatcher = SynchcallDispatcherFactory.getDispatcher(); + + log.debug("Converting input data with Converter {}", converter.getClass().getName()); + InputData inputData = converter.convertToInputData(inputParam); + + log.debug("Dispatching request using SynchcallDispatcher {}", dispatcher.getClass().getName()); + OutputData outputData; + try { + outputData = dispatcher.processRequest(type, inputData); + } catch (IllegalArgumentException e) { + log.error( + "Unable to process the request. Error from the SynchcallDispatcher. IllegalArgumentException: {}", + e.getMessage(), e); + throw new StoRMXmlRpcException( + "Unable to process the request. IllegalArgumentException: " + e.getMessage()); + } catch (CommandException e) { + log.error( + "Unable to execute the request. Error from the SynchcallDispatcher. CommandException: {}", + e.getMessage(), e); + throw new StoRMXmlRpcException( + "Unable to process the request. CommandException: " + e.getMessage()); + } + Map outputParam = converter.convertFromOutputData(outputData); + duration = System.nanoTime() - duration; + + logExecution(convertOperationType(type), DataHelper.getRequestor(inputData), startTime, + TimeUnit.NANOSECONDS.toMillis(duration), outputData.isSuccess()); + + return outputParam; + } + + /** + * Method used to book the execution of SYNCH operation + */ + private void logExecution(it.grid.storm.health.OperationType opType, String dn, long startTime, + long duration, boolean successResult) { + + LogEvent event = new LogEvent(opType, dn, startTime, duration, successResult); + if (!(bookKeepers.isEmpty())) { + log.debug("Found # {} bookeepers.", bookKeepers.size()); + for (int i = 0; i < bookKeepers.size(); i++) { + bookKeepers.get(i).addLogEvent(event); + } + } + } + + /** + * TOREMOVE! this is a temporary code since two different class of OperationTYpe are defined. This + * is to convert the two kind of operation type, from the onw used here, enum based, to the one + * requested by the hearthbeat. + */ + private it.grid.storm.health.OperationType convertOperationType(OperationType type) { + + switch (type) { + case PTG: + return it.grid.storm.health.OperationType.PTG; + case SPTG: + return it.grid.storm.health.OperationType.SPTG; + case PTP: + return it.grid.storm.health.OperationType.PTP; + case SPTP: + return it.grid.storm.health.OperationType.SPTP; + case COPY: + return it.grid.storm.health.OperationType.COPY; + case BOL: + return it.grid.storm.health.OperationType.BOL; + case AF: + return it.grid.storm.health.OperationType.AF; + case AR: + return it.grid.storm.health.OperationType.AR; + case EFL: + return it.grid.storm.health.OperationType.EFL; + case GSM: + return it.grid.storm.health.OperationType.GSM; + case GST: + return it.grid.storm.health.OperationType.GST; + case LS: + return it.grid.storm.health.OperationType.LS; + case MKD: + return it.grid.storm.health.OperationType.MKD; + case MV: + return it.grid.storm.health.OperationType.MV; + case PNG: + return it.grid.storm.health.OperationType.PNG; + case PD: + return it.grid.storm.health.OperationType.PD; + case RF: + return it.grid.storm.health.OperationType.RF; + case RESSP: + return it.grid.storm.health.OperationType.RS; + case RELSP: + return it.grid.storm.health.OperationType.RSP; + case RM: + return it.grid.storm.health.OperationType.RM; + case RMD: + return it.grid.storm.health.OperationType.RMD; + default: + return it.grid.storm.health.OperationType.UNDEF; + } + } } diff --git a/src/main/java/it/grid/storm/xmlrpc/XMLRPCHttpServer.java b/src/main/java/it/grid/storm/xmlrpc/XMLRPCHttpServer.java index 0f44946ab..4bf3e53a9 100644 --- a/src/main/java/it/grid/storm/xmlrpc/XMLRPCHttpServer.java +++ b/src/main/java/it/grid/storm/xmlrpc/XMLRPCHttpServer.java @@ -66,9 +66,6 @@ public final class XMLRPCHttpServer { */ private boolean running = false; - public static final int DEFAULT_MAX_THREAD_NUM = 256; - public static final int DEFAULT_MAX_QUEUE_SIZE = 1000; - /** * @param port * @param maxThreadNum @@ -82,18 +79,10 @@ public XMLRPCHttpServer(int port, int maxThreadNum, int maxQueueSize) private void configureThreadPool(Server s, int maxThreadNum, int maxQueueSize) { - int threadNumber = maxThreadNum; - - if (threadNumber <= 0) { - threadNumber = DEFAULT_MAX_THREAD_NUM; - } + int threadNumber = maxThreadNum; int queueSize = maxQueueSize; - if (queueSize <= 0) { - queueSize = DEFAULT_MAX_QUEUE_SIZE; - } - NamedInstrumentedThreadPool tp = new NamedInstrumentedThreadPool("xmlrpc", METRIC_REGISTRY.getRegistry()); @@ -120,13 +109,13 @@ private void configureHandler(Server server) throws StoRMXmlRpcException { ServletContextHandler servletContextHandler = new ServletContextHandler(); servletContextHandler.addServlet(new ServletHolder(servlet), "/"); - Boolean isTokenEnabled = Configuration.getInstance().getXmlRpcTokenEnabled(); + Boolean isTokenEnabled = Configuration.getInstance().isSecurityEnabled(); if (isTokenEnabled) { LOG.info("Enabling security filter for XML-RPC requests"); - String token = Configuration.getInstance().getXmlRpcToken(); + String token = Configuration.getInstance().getSecurityToken(); if (token == null || token.isEmpty()) { diff --git a/src/main/java/it/grid/storm/xmlrpc/XMLRPCMethods.java b/src/main/java/it/grid/storm/xmlrpc/XMLRPCMethods.java index d83592bd0..264bc49c8 100644 --- a/src/main/java/it/grid/storm/xmlrpc/XMLRPCMethods.java +++ b/src/main/java/it/grid/storm/xmlrpc/XMLRPCMethods.java @@ -22,165 +22,174 @@ package it.grid.storm.xmlrpc; import it.grid.storm.common.OperationType; -import java.util.HashMap; import java.util.Map; public class XMLRPCMethods { - private final XMLRPCExecutor executor = new XMLRPCExecutor(); + private final XMLRPCExecutor executor = new XMLRPCExecutor(); - public XMLRPCMethods() { + public XMLRPCMethods() { - }; + }; - public Map ping(Map inputParam) throws StoRMXmlRpcException { + public Map ping(Map inputParam) throws StoRMXmlRpcException { - return executor.execute(OperationType.PNG, inputParam); - } + return executor.execute(OperationType.PNG, inputParam); + } - public Map putDone(Map inputParam) throws StoRMXmlRpcException { + public Map putDone(Map inputParam) throws StoRMXmlRpcException { - return executor.execute(OperationType.PD, inputParam); - } + return executor.execute(OperationType.PD, inputParam); + } - public Map releaseFiles(Map inputParam) throws StoRMXmlRpcException { + public Map releaseFiles(Map inputParam) + throws StoRMXmlRpcException { - return executor.execute(OperationType.RF, inputParam); - } + return executor.execute(OperationType.RF, inputParam); + } - public Map extendFileLifeTime(Map inputParam) throws StoRMXmlRpcException { + public Map extendFileLifeTime(Map inputParam) + throws StoRMXmlRpcException { - return executor.execute(OperationType.EFL, inputParam); - } + return executor.execute(OperationType.EFL, inputParam); + } - public Map abortRequest(Map inputParam) throws StoRMXmlRpcException { + public Map abortRequest(Map inputParam) + throws StoRMXmlRpcException { - return executor.execute(OperationType.AR, inputParam); - } + return executor.execute(OperationType.AR, inputParam); + } - public Map abortFiles(HashMap inputParam) throws StoRMXmlRpcException { + public Map abortFiles(Map inputParam) + throws StoRMXmlRpcException { - return executor.execute(OperationType.AF, inputParam); - } + return executor.execute(OperationType.AF, inputParam); + } - public Map reserveSpace(Map inputParam) throws StoRMXmlRpcException { + public Map reserveSpace(Map inputParam) + throws StoRMXmlRpcException { - return executor.execute(OperationType.RESSP, inputParam); - } + return executor.execute(OperationType.RESSP, inputParam); + } - /** - * GetSpaceMetaData - */ - public Map getSpaceMetaData(HashMap inputParam) throws StoRMXmlRpcException { + /** + * GetSpaceMetaData + */ + public Map getSpaceMetaData(Map inputParam) + throws StoRMXmlRpcException { - return executor.execute(OperationType.GSM, inputParam); - } + return executor.execute(OperationType.GSM, inputParam); + } - /** - * GetSpaceTokens - * - * @param inputParam - * @return - */ - public Map getSpaceTokens(Map inputParam) throws StoRMXmlRpcException { + /** + * GetSpaceTokens + * + * @param inputParam + * @return + */ + public Map getSpaceTokens(Map inputParam) + throws StoRMXmlRpcException { - return executor.execute(OperationType.GST, inputParam); - } + return executor.execute(OperationType.GST, inputParam); + } - /** - * ReleaseSpace - */ + /** + * ReleaseSpace + */ - public Map ReleaseSpace(Map inputParam) throws StoRMXmlRpcException { + public Map ReleaseSpace(Map inputParam) + throws StoRMXmlRpcException { - return executor.execute(OperationType.RELSP, inputParam); - } + return executor.execute(OperationType.RELSP, inputParam); + } - /** - * SrmLs request. This method catch an SrmLs request passed by StoRM FrontEnd - * trough xmlrpc communication. The Hastable is the default Java type used to - * represent structure passed by xmlrpc. - * - * @param Hastable - * output parameter structure returned. - * @param inputParameter - * input parameter structure received from xmlrpc call. - */ + /** + * SrmLs request. This method catch an SrmLs request passed by StoRM Frontend trough XMLRPC + * communication. The HashTable is the default Java type used to represent structure passed by + * XMLRPC. + * + * @param Hastable output parameter structure returned. + * @param inputParameter input parameter structure received from xmlrpc call. + */ - public Map ls(Map inputParam) throws StoRMXmlRpcException { + public Map ls(Map inputParam) throws StoRMXmlRpcException { - return executor.execute(OperationType.LS, inputParam); - } + return executor.execute(OperationType.LS, inputParam); + } - /** - * SrmMkdir functionality. - */ + /** + * SrmMkdir functionality. + */ - public Map mkdir(Map inputParam) throws StoRMXmlRpcException { + public Map mkdir(Map inputParam) throws StoRMXmlRpcException { - return executor.execute(OperationType.MKD, inputParam); - } + return executor.execute(OperationType.MKD, inputParam); + } - /** - * SrmRmdir functionality. - * - * @param inputParam - * @return - */ - public Map rmdir(Map inputParam) throws StoRMXmlRpcException { + /** + * SrmRmdir functionality. + * + * @param inputParam + * @return + */ + public Map rmdir(Map inputParam) throws StoRMXmlRpcException { - return executor.execute(OperationType.RMD, inputParam); - } + return executor.execute(OperationType.RMD, inputParam); + } - /** - * SrmRm functionality. - * - * @param inputParam - * @return - */ - public Map rm(Map inputParam) throws StoRMXmlRpcException { + /** + * SrmRm functionality. + * + * @param inputParam + * @return + */ + public Map rm(Map inputParam) throws StoRMXmlRpcException { - return executor.execute(OperationType.RM, inputParam); - } + return executor.execute(OperationType.RM, inputParam); + } - /** - * SrmMv functionality. - */ + /** + * SrmMv functionality. + */ + + public Map mv(Map inputParam) throws StoRMXmlRpcException { - public Map mv(Map inputParam) throws StoRMXmlRpcException { + return executor.execute(OperationType.MV, inputParam); + } + + /** + * SrmPrepareToPut functionality. + */ + public Map prepareToPut(Map inputParam) + throws StoRMXmlRpcException { + + return executor.execute(OperationType.PTP, inputParam); + } + + /** + * SrmPrepareToPutStatus functionality. + */ + public Map prepareToPutStatus(Map inputParam) + throws StoRMXmlRpcException { + + return executor.execute(OperationType.SPTP, inputParam); + } + + /** + * SrmPrepareToGet functionality. + */ + public Map prepareToGet(Map inputParam) + throws StoRMXmlRpcException { + + return executor.execute(OperationType.PTG, inputParam); + } + + /** + * SrmPrepareToGetStatus functionality. + */ + public Map prepareToGetStatus(Map inputParam) + throws StoRMXmlRpcException { - return executor.execute(OperationType.MV, inputParam); - } - - /** - * SrmPrepareToPut functionality. - */ - public Map prepareToPut(Map inputParam) throws StoRMXmlRpcException { - - return executor.execute(OperationType.PTP, inputParam); - } - - /** - * SrmPrepareToPutStatus functionality. - */ - public Map prepareToPutStatus(Map inputParam) throws StoRMXmlRpcException { - - return executor.execute(OperationType.SPTP, inputParam); - } - - /** - * SrmPrepareToGet functionality. - */ - public Map prepareToGet(Map inputParam) throws StoRMXmlRpcException { - - return executor.execute(OperationType.PTG, inputParam); - } - - /** - * SrmPrepareToGetStatus functionality. - */ - public Map prepareToGetStatus(Map inputParam) throws StoRMXmlRpcException { - - return executor.execute(OperationType.SPTG, inputParam); - } + return executor.execute(OperationType.SPTG, inputParam); + } } diff --git a/src/main/java/it/grid/storm/xmlrpc/converter/datatransfer/AbortFilesConverter.java b/src/main/java/it/grid/storm/xmlrpc/converter/datatransfer/AbortFilesConverter.java index 3e727ad77..11ed5c0f3 100644 --- a/src/main/java/it/grid/storm/xmlrpc/converter/datatransfer/AbortFilesConverter.java +++ b/src/main/java/it/grid/storm/xmlrpc/converter/datatransfer/AbortFilesConverter.java @@ -27,6 +27,13 @@ */ package it.grid.storm.xmlrpc.converter.datatransfer; +import java.util.Map; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Maps; + import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.GridUserManager; import it.grid.storm.srm.types.ArrayOfSURLs; @@ -44,86 +51,73 @@ import it.grid.storm.synchcall.data.datatransfer.IdentityAbortFilesInputData; import it.grid.storm.xmlrpc.converter.Converter; -import java.util.HashMap; -import java.util.Map; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - public class AbortFilesConverter implements Converter { - private static final Logger log = LoggerFactory - .getLogger(AbortFilesConverter.class); - - public AbortFilesConverter() { - - } - - /** - * This method returns a AbortFilesInputData created from the input Hashtable - * structure of a xmlrpc srmAbortFiles() v2.2 call. - * - * @param inputParam - * Hashtable containing the input data - * @return AbortFilesInputData - */ - public InputData convertToInputData(Map inputParam) { - - GridUserInterface guser = GridUserManager.decode(inputParam); - - TRequestToken requestToken; - try { - requestToken = TRequestToken.decode(inputParam, - TRequestToken.PNAME_REQUESTOKEN); - log.debug("requestToken={}" , requestToken.toString()); - } catch (InvalidTRequestTokenAttributesException e) { - requestToken = null; - log.debug("requestToken=NULL",e); - } - - ArrayOfSURLs arrayOfSURLs; - try { - arrayOfSURLs = ArrayOfSURLs.decode(inputParam, ArrayOfSURLs.ARRAY_OF_SURLS); - } catch (InvalidArrayOfSURLsAttributeException e) { - log.debug("Empty surlArray!"); - arrayOfSURLs = null; - } - - AbortFilesInputData inputData; - if (guser != null) { - inputData = new IdentityAbortFilesInputData(guser, requestToken, - arrayOfSURLs); - } else { - inputData = new AnonymousAbortFilesInputData(requestToken, arrayOfSURLs); - } - return inputData; - } - - public Map convertFromOutputData(OutputData data) { - - log.debug("AbortFilesOutputData - Creation of XMLRPC Output Structure!"); - - Map outputParam = new HashMap(); - AbortFilesOutputData outputData = AbortFilesOutputData - .make((AbortGeneralOutputData) data); - - // (1) returnStatus - TReturnStatus returnStatus = outputData.getReturnStatus(); - if (returnStatus != null) { - returnStatus.encode(outputParam, TReturnStatus.PNAME_RETURNSTATUS); - } - - // (2) arrayOfFileStatuses - ArrayOfTSURLReturnStatus arrayOfFileStatuses = outputData - .getArrayOfFileStatuses(); - if (arrayOfFileStatuses != null) { - arrayOfFileStatuses.encode(outputParam, - ArrayOfTSURLReturnStatus.PNAME_ARRAYOFFILESTATUSES); - } - - log.debug("AbortFilesConverter - Sending: {}" , outputParam.toString()); - - // Return global structure. - return outputParam; - } + private static final Logger log = LoggerFactory.getLogger(AbortFilesConverter.class); + + public AbortFilesConverter() { + + } + + /** + * This method returns a AbortFilesInputData created from the input Hashtable structure of a + * xmlrpc srmAbortFiles() v2.2 call. + * + * @param inputParam Hashtable containing the input data + * @return AbortFilesInputData + */ + public InputData convertToInputData(Map inputParam) { + + GridUserInterface guser = GridUserManager.decode(inputParam); + + TRequestToken requestToken; + try { + requestToken = TRequestToken.decode(inputParam, TRequestToken.PNAME_REQUESTOKEN); + log.debug("requestToken={}", requestToken.toString()); + } catch (InvalidTRequestTokenAttributesException e) { + requestToken = null; + log.debug("requestToken=NULL", e); + } + + ArrayOfSURLs arrayOfSURLs; + try { + arrayOfSURLs = ArrayOfSURLs.decode(inputParam, ArrayOfSURLs.ARRAY_OF_SURLS); + } catch (InvalidArrayOfSURLsAttributeException e) { + log.debug("Empty surlArray!"); + arrayOfSURLs = null; + } + + AbortFilesInputData inputData; + if (guser != null) { + inputData = new IdentityAbortFilesInputData(guser, requestToken, arrayOfSURLs); + } else { + inputData = new AnonymousAbortFilesInputData(requestToken, arrayOfSURLs); + } + return inputData; + } + + public Map convertFromOutputData(OutputData data) { + + log.debug("AbortFilesOutputData - Creation of XMLRPC Output Structure!"); + + Map outputParam = Maps.newHashMap(); + AbortFilesOutputData outputData = AbortFilesOutputData.make((AbortGeneralOutputData) data); + + // (1) returnStatus + TReturnStatus returnStatus = outputData.getReturnStatus(); + if (returnStatus != null) { + returnStatus.encode(outputParam, TReturnStatus.PNAME_RETURNSTATUS); + } + + // (2) arrayOfFileStatuses + ArrayOfTSURLReturnStatus arrayOfFileStatuses = outputData.getArrayOfFileStatuses(); + if (arrayOfFileStatuses != null) { + arrayOfFileStatuses.encode(outputParam, ArrayOfTSURLReturnStatus.PNAME_ARRAYOFFILESTATUSES); + } + + log.debug("AbortFilesConverter - Sending: {}", outputParam.toString()); + + // Return global structure. + return outputParam; + } } diff --git a/src/main/java/it/grid/storm/xmlrpc/converter/datatransfer/AbortRequestConverter.java b/src/main/java/it/grid/storm/xmlrpc/converter/datatransfer/AbortRequestConverter.java index 4fea0384b..616c534d8 100644 --- a/src/main/java/it/grid/storm/xmlrpc/converter/datatransfer/AbortRequestConverter.java +++ b/src/main/java/it/grid/storm/xmlrpc/converter/datatransfer/AbortRequestConverter.java @@ -27,6 +27,13 @@ */ package it.grid.storm.xmlrpc.converter.datatransfer; +import java.util.Map; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Maps; + import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.GridUserManager; import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException; @@ -41,68 +48,58 @@ import it.grid.storm.synchcall.data.datatransfer.IdentityAbortRequestInputData; import it.grid.storm.xmlrpc.converter.Converter; -import java.util.HashMap; -import java.util.Map; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - public class AbortRequestConverter implements Converter { - private static final Logger log = LoggerFactory - .getLogger(AbortRequestConverter.class); - - public AbortRequestConverter() { - - } - - /** - * This method returns a AbortRequest data created from the input Hashtable - * structure of a xmlrpc srmAbortRequest() v2.2 call. - * - * @param inputParam - * Hashtable containing the input data - * @return AbortRequestInputData - */ - public InputData convertToInputData(Map inputParam) { - - GridUserInterface guser = GridUserManager.decode(inputParam); - - TRequestToken requestToken; - try { - requestToken = TRequestToken.decode(inputParam, - TRequestToken.PNAME_REQUESTOKEN); - log.debug("requestToken={}" , requestToken.toString()); - } catch (InvalidTRequestTokenAttributesException e) { - requestToken = null; - log.debug("requestToken=NULL",e); - } - AbortInputData inputData; - if (guser != null) { - inputData = new IdentityAbortRequestInputData(guser, requestToken); - } else { - inputData = new AnonymousAbortRequestInputData(requestToken); - } - return inputData; - } - - public Map convertFromOutputData(OutputData data) { - - log.debug("AbortRequestOutputData - Creation of XMLRPC Output Structure!"); - - Map outputParam = new HashMap(); - AbortRequestOutputData outputData = AbortRequestOutputData - .make((AbortGeneralOutputData) data); - - // (1) returnStatus - TReturnStatus returnStatus = outputData.getReturnStatus(); - - if (returnStatus != null) { - returnStatus.encode(outputParam, TReturnStatus.PNAME_RETURNSTATUS); - } - log.debug("AbortRequestConverter - Sending: {}" , outputParam.toString()); - - // Return global structure. - return outputParam; - } + private static final Logger log = LoggerFactory.getLogger(AbortRequestConverter.class); + + public AbortRequestConverter() { + + } + + /** + * This method returns a AbortRequest data created from the input Hashtable structure of a xmlrpc + * srmAbortRequest() v2.2 call. + * + * @param inputParam Hashtable containing the input data + * @return AbortRequestInputData + */ + public InputData convertToInputData(Map inputParam) { + + GridUserInterface guser = GridUserManager.decode(inputParam); + + TRequestToken requestToken; + try { + requestToken = TRequestToken.decode(inputParam, TRequestToken.PNAME_REQUESTOKEN); + log.debug("requestToken={}", requestToken.toString()); + } catch (InvalidTRequestTokenAttributesException e) { + requestToken = null; + log.debug("requestToken=NULL", e); + } + AbortInputData inputData; + if (guser != null) { + inputData = new IdentityAbortRequestInputData(guser, requestToken); + } else { + inputData = new AnonymousAbortRequestInputData(requestToken); + } + return inputData; + } + + public Map convertFromOutputData(OutputData data) { + + log.debug("AbortRequestOutputData - Creation of XMLRPC Output Structure!"); + + Map outputParam = Maps.newHashMap(); + AbortRequestOutputData outputData = AbortRequestOutputData.make((AbortGeneralOutputData) data); + + // (1) returnStatus + TReturnStatus returnStatus = outputData.getReturnStatus(); + + if (returnStatus != null) { + returnStatus.encode(outputParam, TReturnStatus.PNAME_RETURNSTATUS); + } + log.debug("AbortRequestConverter - Sending: {}", outputParam.toString()); + + // Return global structure. + return outputParam; + } } diff --git a/src/main/java/it/grid/storm/xmlrpc/converter/datatransfer/ExtendFileLifeTimeConverter.java b/src/main/java/it/grid/storm/xmlrpc/converter/datatransfer/ExtendFileLifeTimeConverter.java index f07395790..ee7ca0f81 100644 --- a/src/main/java/it/grid/storm/xmlrpc/converter/datatransfer/ExtendFileLifeTimeConverter.java +++ b/src/main/java/it/grid/storm/xmlrpc/converter/datatransfer/ExtendFileLifeTimeConverter.java @@ -17,6 +17,13 @@ package it.grid.storm.xmlrpc.converter.datatransfer; +import java.util.Map; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Maps; + import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.GridUserManager; import it.grid.storm.srm.types.ArrayOfSURLs; @@ -30,24 +37,18 @@ import it.grid.storm.synchcall.data.OutputData; import it.grid.storm.synchcall.data.datatransfer.AnonymousExtendFileLifeTimeInputData; import it.grid.storm.synchcall.data.datatransfer.ExtendFileLifeTimeInputData; -import it.grid.storm.synchcall.data.datatransfer.IdentityExtendFileLifeTimeInputData; import it.grid.storm.synchcall.data.datatransfer.ExtendFileLifeTimeOutputData; +import it.grid.storm.synchcall.data.datatransfer.IdentityExtendFileLifeTimeInputData; import it.grid.storm.xmlrpc.converter.Converter; -import java.util.Hashtable; -import java.util.Map; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** * * This class is part of the StoRM project. Copyright (c) 2008 INFN-CNAF. *

* - * This class represents the Type Converter for ExtendFileLifeTime function. - * This class receives input datas from xmlrpc call and converts these datas - * into a StoRM Type that can be used to invoke the ExtendFileLifeTimeManager. + * This class represents the Type Converter for ExtendFileLifeTime function. This class receives + * input datas from xmlrpc call and converts these datas into a StoRM Type that can be used to + * invoke the ExtendFileLifeTimeManager. * * Authors: * @@ -60,88 +61,80 @@ public class ExtendFileLifeTimeConverter implements Converter { - private static final Logger log = LoggerFactory - .getLogger(ExtendFileLifeTimeConverter.class); - - public ExtendFileLifeTimeConverter() { - - } - - /** - * This method returns a ExtendFileLifeTimeInputData created from the input - * Hashtable structure of a xmlrpc srmExtendFileLifeTime() v2.2 call. - * - * @param inputParam - * Hashtable containing the input data - * @return ExtendFileLifeTimeInputData - */ - public InputData convertToInputData(Map inputParam) { - - GridUserInterface guser = GridUserManager.decode(inputParam); - - String authID = (String) inputParam.get("authorizationID"); - - TRequestToken requestToken; - try { - requestToken = TRequestToken.decode(inputParam, - TRequestToken.PNAME_REQUESTOKEN); - log.debug("requestToken={}" , requestToken.toString()); - } catch (InvalidTRequestTokenAttributesException e) { - requestToken = null; - log.error("requestToken=NULL",e); - } - - ArrayOfSURLs arrayOfSURLs; - try { - arrayOfSURLs = ArrayOfSURLs.decode(inputParam, ArrayOfSURLs.ARRAY_OF_SURLS); - } catch (InvalidArrayOfSURLsAttributeException e) { - log.error("Empty surlArray!",e); - arrayOfSURLs = null; - } - - TLifeTimeInSeconds newFileLifetime = TLifeTimeInSeconds.decode(inputParam, - TLifeTimeInSeconds.PNAME_FILELIFETIME); - - TLifeTimeInSeconds newPinLifetime = TLifeTimeInSeconds.decode(inputParam, - TLifeTimeInSeconds.PNAME_PINLIFETIME); - - ExtendFileLifeTimeInputData inputData; - if (guser != null) { - inputData = new IdentityExtendFileLifeTimeInputData(guser, requestToken, - arrayOfSURLs, newFileLifetime, newPinLifetime); - } else { - inputData = new AnonymousExtendFileLifeTimeInputData(requestToken, - arrayOfSURLs, newFileLifetime, newPinLifetime); - } - return inputData; - } - - public Hashtable convertFromOutputData(OutputData data) { - - log - .debug("ExtendFileLifeTimeOutputData - Creation of XMLRPC Output Structure!"); - - Hashtable outputParam = new Hashtable(); - ExtendFileLifeTimeOutputData outputData = (ExtendFileLifeTimeOutputData) data; - - // (1) returnStatus - TReturnStatus returnStatus = outputData.getReturnStatus(); - if (returnStatus != null) { - returnStatus.encode(outputParam, TReturnStatus.PNAME_RETURNSTATUS); - } - - // (2) arrayOfFileStatuses - ArrayOfTSURLLifetimeReturnStatus arrayOfFileStatuses = outputData - .getArrayOfFileStatuses(); - if (arrayOfFileStatuses != null) { - arrayOfFileStatuses.encode(outputParam, - ArrayOfTSURLLifetimeReturnStatus.PNAME_ARRAYOFFILESTATUSES); - } - - log.debug("ExtendFileLifeTimeConverter - Sending: {}" - , outputParam.toString()); - - // Return global structure. - return outputParam; - } + private static final Logger log = LoggerFactory.getLogger(ExtendFileLifeTimeConverter.class); + + public ExtendFileLifeTimeConverter() { + + } + + /** + * This method returns a ExtendFileLifeTimeInputData created from the input Hashtable structure of + * a xmlrpc srmExtendFileLifeTime() v2.2 call. + * + * @param inputParam Hashtable containing the input data + * @return ExtendFileLifeTimeInputData + */ + public InputData convertToInputData(Map inputParam) { + + GridUserInterface guser = GridUserManager.decode(inputParam); + + TRequestToken requestToken; + try { + requestToken = TRequestToken.decode(inputParam, TRequestToken.PNAME_REQUESTOKEN); + log.debug("requestToken={}", requestToken.toString()); + } catch (InvalidTRequestTokenAttributesException e) { + requestToken = null; + log.error("requestToken=NULL", e); + } + + ArrayOfSURLs arrayOfSURLs; + try { + arrayOfSURLs = ArrayOfSURLs.decode(inputParam, ArrayOfSURLs.ARRAY_OF_SURLS); + } catch (InvalidArrayOfSURLsAttributeException e) { + log.error("Empty surlArray!", e); + arrayOfSURLs = null; + } + + TLifeTimeInSeconds newFileLifetime = + TLifeTimeInSeconds.decode(inputParam, TLifeTimeInSeconds.PNAME_FILELIFETIME); + + TLifeTimeInSeconds newPinLifetime = + TLifeTimeInSeconds.decode(inputParam, TLifeTimeInSeconds.PNAME_PINLIFETIME); + + ExtendFileLifeTimeInputData inputData; + if (guser != null) { + inputData = new IdentityExtendFileLifeTimeInputData(guser, requestToken, arrayOfSURLs, + newFileLifetime, newPinLifetime); + } else { + inputData = new AnonymousExtendFileLifeTimeInputData(requestToken, arrayOfSURLs, + newFileLifetime, newPinLifetime); + } + return inputData; + } + + public Map convertFromOutputData(OutputData data) { + + log.debug("ExtendFileLifeTimeOutputData - Creation of XMLRPC Output Structure!"); + + Map outputParam = Maps.newHashMap(); + ExtendFileLifeTimeOutputData outputData = (ExtendFileLifeTimeOutputData) data; + + // (1) returnStatus + TReturnStatus returnStatus = outputData.getReturnStatus(); + if (returnStatus != null) { + returnStatus.encode(outputParam, TReturnStatus.PNAME_RETURNSTATUS); + } + + // (2) arrayOfFileStatuses + ArrayOfTSURLLifetimeReturnStatus arrayOfFileStatuses = outputData.getArrayOfFileStatuses(); + if (arrayOfFileStatuses != null) { + arrayOfFileStatuses.encode(outputParam, + ArrayOfTSURLLifetimeReturnStatus.PNAME_ARRAYOFFILESTATUSES); + } + + log.debug("ExtendFileLifeTimeConverter - Sending: {}", outputParam.toString()); + + // Return global structure. + return outputParam; + } } diff --git a/src/main/java/it/grid/storm/xmlrpc/converter/datatransfer/ManageFileTransferRequestConverter.java b/src/main/java/it/grid/storm/xmlrpc/converter/datatransfer/ManageFileTransferRequestConverter.java index 3ab5135c6..fdd714520 100644 --- a/src/main/java/it/grid/storm/xmlrpc/converter/datatransfer/ManageFileTransferRequestConverter.java +++ b/src/main/java/it/grid/storm/xmlrpc/converter/datatransfer/ManageFileTransferRequestConverter.java @@ -37,77 +37,72 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class ManageFileTransferRequestConverter extends - ManageFileTransferConverter implements Converter { +public class ManageFileTransferRequestConverter extends ManageFileTransferConverter + implements Converter { - static final Logger log = LoggerFactory - .getLogger(ManageFileTransferRequestConverter.class); + static final Logger log = LoggerFactory.getLogger(ManageFileTransferRequestConverter.class); - /** - * This method returns a ReleaseFilesInputData created from the input - * Hashtable structure of an xmlrpc ReleaseFiles v2.2 call. - * - * @param inputParam - * Hashtable containing the input data - * @return ReleaseFilesInputData - */ - public InputData convertToInputData(Map inputParam) { + /** + * This method returns a ReleaseFilesInputData created from the input Hashtable structure of an + * xmlrpc ReleaseFiles v2.2 call. + * + * @param inputParam Hashtable containing the input data + * @return ReleaseFilesInputData + */ + public InputData convertToInputData(Map inputParam) { - GridUserInterface guser = GridUserManager.decode(inputParam); + GridUserInterface guser = GridUserManager.decode(inputParam); - /* (2) TRequestToken requestToken */ - TRequestToken requestToken = null; - try { - requestToken = TRequestToken.decode(inputParam, - TRequestToken.PNAME_REQUESTOKEN); - log.debug("requestToken={}" , requestToken.toString()); - } catch (InvalidTRequestTokenAttributesException e) { - log - .debug("No request token provided by user. InvalidTRequestTokenAttributesException: {}" - , e.getMessage(),e); - } + /* (2) TRequestToken requestToken */ + TRequestToken requestToken = null; + try { + requestToken = TRequestToken.decode(inputParam, TRequestToken.PNAME_REQUESTOKEN); + log.debug("requestToken={}", requestToken.toString()); + } catch (InvalidTRequestTokenAttributesException e) { + log.debug("No request token provided by user. InvalidTRequestTokenAttributesException: {}", + e.getMessage(), e); + } - /* (3) anyURI[] arrayOfSURLs */ - ArrayOfSURLs arrayOfSURLs; - try { - arrayOfSURLs = ArrayOfSURLs.decode(inputParam, ArrayOfSURLs.ARRAY_OF_SURLS); - } catch (InvalidArrayOfSURLsAttributeException e) { - log.debug("Empty surlArray!",e); - arrayOfSURLs = null; - } + /* (3) anyURI[] arrayOfSURLs */ + ArrayOfSURLs arrayOfSURLs; + try { + arrayOfSURLs = ArrayOfSURLs.decode(inputParam, ArrayOfSURLs.ARRAY_OF_SURLS); + } catch (InvalidArrayOfSURLsAttributeException e) { + log.debug("Empty surlArray!", e); + arrayOfSURLs = null; + } - InputData inputData; - if (guser != null) { - if (requestToken != null) { - if (arrayOfSURLs != null && arrayOfSURLs.size() > 0) { - inputData = new IdentityManageFileTransferRequestFilesInputData( - guser, requestToken, arrayOfSURLs); - } else { - inputData = new IdentityReleaseRequestInputData(guser, requestToken); - } - } else { - inputData = new IdentityManageFileTransferFilesInputData(guser, - arrayOfSURLs); - } - } else { - if (requestToken != null) { - if (arrayOfSURLs != null && arrayOfSURLs.size() > 0) { - inputData = new AnonymousManageFileTransferRequestFilesInputData( - requestToken, arrayOfSURLs); - } else { - inputData = new AnonymousReleaseRequestInputData(requestToken); - } - } else { - inputData = new AnonymousManageFileTransferFilesInputData(arrayOfSURLs); - } - } - return inputData; - } + InputData inputData; + if (guser != null) { + if (requestToken != null) { + if (arrayOfSURLs != null && arrayOfSURLs.size() > 0) { + inputData = new IdentityManageFileTransferRequestFilesInputData(guser, requestToken, + arrayOfSURLs); + } else { + inputData = new IdentityReleaseRequestInputData(guser, requestToken); + } + } else { + inputData = new IdentityManageFileTransferFilesInputData(guser, arrayOfSURLs); + } + } else { + if (requestToken != null) { + if (arrayOfSURLs != null && arrayOfSURLs.size() > 0) { + inputData = + new AnonymousManageFileTransferRequestFilesInputData(requestToken, arrayOfSURLs); + } else { + inputData = new AnonymousReleaseRequestInputData(requestToken); + } + } else { + inputData = new AnonymousManageFileTransferFilesInputData(arrayOfSURLs); + } + } + return inputData; + } - @Override - protected Logger getLogger() { + @Override + protected Logger getLogger() { - return log; - } + return log; + } } diff --git a/src/main/java/it/grid/storm/xmlrpc/converter/datatransfer/PrepareToPutRequestConverter.java b/src/main/java/it/grid/storm/xmlrpc/converter/datatransfer/PrepareToPutRequestConverter.java index fc80595b8..c6b372b99 100644 --- a/src/main/java/it/grid/storm/xmlrpc/converter/datatransfer/PrepareToPutRequestConverter.java +++ b/src/main/java/it/grid/storm/xmlrpc/converter/datatransfer/PrepareToPutRequestConverter.java @@ -21,7 +21,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.config.model.v2.OverwriteMode; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.converter.OverwriteModeConverter; import it.grid.storm.srm.types.TLifeTimeInSeconds; import it.grid.storm.srm.types.TOverwriteMode; import it.grid.storm.srm.types.TSURL; @@ -87,26 +89,17 @@ public InputData convertToInputData(Map inputParam) inputData.setFileSize(fileSize); } - String overwriteModeString = (String) inputParam - .get(OVERWRITE_MODE_PARAMETER_NAME); - if (overwriteModeString != null) { - TOverwriteMode overwriteMode; - try { - overwriteMode = TOverwriteMode.getTOverwriteMode(overwriteModeString); - } catch (IllegalArgumentException e) { - log.error("Unable to build TOverwriteMode from '{}'. IllegalArgumentException: {}" - , overwriteModeString - , e.getMessage() - , e); - throw new StoRMXmlRpcException("Unable to build PrepareToPutInputData"); - } - if (!overwriteMode.equals(TOverwriteMode.EMPTY)) { - inputData.setOverwriteMode(overwriteMode); - } else { - log - .warn("Unable to use the received '{}', interpreted as an empty value" , OVERWRITE_MODE_PARAMETER_NAME); - } - } + String overwriteModeString = (String) inputParam.get(OVERWRITE_MODE_PARAMETER_NAME); + if (overwriteModeString != null) { + TOverwriteMode overwriteMode = + OverwriteModeConverter.toSTORM(OverwriteMode.valueOf(overwriteModeString)); + if (!overwriteMode.equals(TOverwriteMode.EMPTY)) { + inputData.setOverwriteMode(overwriteMode); + } else { + log.warn("Unable to use the received '{} = {}', interpreted as an empty value", + OVERWRITE_MODE_PARAMETER_NAME, overwriteModeString); + } + } log.debug("PrepareToPutInputData Created!"); return inputData; } diff --git a/src/main/java/it/grid/storm/xmlrpc/converter/directory/LsConverter.java b/src/main/java/it/grid/storm/xmlrpc/converter/directory/LsConverter.java index 07765f444..3bd7bcded 100644 --- a/src/main/java/it/grid/storm/xmlrpc/converter/directory/LsConverter.java +++ b/src/main/java/it/grid/storm/xmlrpc/converter/directory/LsConverter.java @@ -17,6 +17,13 @@ package it.grid.storm.xmlrpc.converter.directory; +import java.util.Map; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Maps; + import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.GridUserManager; import it.grid.storm.srm.types.ArrayOfSURLs; @@ -34,20 +41,13 @@ import it.grid.storm.xmlrpc.converter.Converter; import it.grid.storm.xmlrpc.converter.ParameterDisplayHelper; -import java.util.Hashtable; -import java.util.Map; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** * * This class is part of the StoRM project. Copyright (c) 2008 INFN-CNAF. *

* - * This class represents the Type Converter for LS function . This class have - * get an input data from xmlrpc call anc convert it into a StoRM Type that can - * be used to invoke the LSManager + * This class represents the Type Converter for LS function . This class have get an input data from + * xmlrpc call anc convert it into a StoRM Type that can be used to invoke the LSManager * * * Authors: @@ -60,106 +60,106 @@ public class LsConverter implements Converter { - /** - * Logger - */ - private static final Logger log = LoggerFactory.getLogger(LsConverter.class); - - public LsConverter() { - - }; - - /** - * This method return a LSInputData created from input Hashtable structure of - * an xmlrpc spaceReservation v2.1 call. SpaceResData can be used to invoke LS - * method of Directory Functions Manager - */ - public InputData convertToInputData(Map inputParam) { - - log.debug("SrmLs: LSConverter :Call received :Creation of SpaceResData = {}" - , inputParam.size()); - log.debug("SrmLs: LSConverter: Input Structure toString: {}" - , ParameterDisplayHelper.display(inputParam)); - - // Member name definition for inputParam struct , from SRM V2.2 - String member_fullDL = new String("fullDetailedList"); - String member_allLR = new String("allLevelRecursive"); - String member_numOL = new String("numOfLevels"); - String member_offset = new String("offset"); - String member_count = new String("count"); - - /* Creation of VomsGridUser */ - GridUserInterface guser = GridUserManager.decode(inputParam); - - /* (2) anyURI[] arrayOfSURLs */ - ArrayOfSURLs surlArray = null; - try { - surlArray = ArrayOfSURLs.decode(inputParam, ArrayOfSURLs.ARRAY_OF_SURLS); - } catch (InvalidArrayOfSURLsAttributeException e2) { - log.debug("SrmLs: Empty surlArray found!",e2); - surlArray = null; - } - - TFileStorageType fileStorageType = TFileStorageType.decode(inputParam, - TFileStorageType.PNAME_FILESTORAGETYPE); - log.debug("fileType: {}" , fileStorageType); - - /* (5) fullDetailedList */ - Boolean fullDL = (Boolean) inputParam.get(member_fullDL); - log.debug("fullDetailedList: {}" , fullDL); - - /* (6) allLevelRecursive */ - Boolean allLR = (Boolean) inputParam.get(member_allLR); - log.debug("allLevelRecursive: {}" , allLR); - - /* (7) numOfLevels */ - Integer numOL = (Integer) inputParam.get(member_numOL); - log.debug("numOfLevels: {}" , numOL); - - /* (8) offset */ - Integer offset = (Integer) inputParam.get(member_offset); - log.debug("offset: {}" , offset); - - /* (9) count */ - Integer count = (Integer) inputParam.get(member_count); - log.debug("count: {}" , count); - - LSInputData inputData; - if (guser != null) { - inputData = new IdentityLSInputData(guser, surlArray, fileStorageType, - fullDL, allLR, numOL, offset, count); - } else { - inputData = new AnonymousLSInputData(surlArray, fileStorageType, fullDL, - allLR, numOL, offset, count); - } - return inputData; - } - - public Hashtable convertFromOutputData(OutputData data) { - - // Creation of new Hashtable to return - Hashtable outputParam = new Hashtable(); - LSOutputData outputData = (LSOutputData) data; - - /* (1) TReturnStatus */ - TReturnStatus globStatus = outputData.getStatus(); - if (globStatus != null) { - globStatus.encode(outputParam, TReturnStatus.PNAME_RETURNSTATUS); - } - - /* (2) TRequestToken */ - TRequestToken requestToken = outputData.getRequestToken(); - if (requestToken != null) { - outputParam.put("requestToken", requestToken.toString()); - } - - /* (3) ArrayOfTMetaDataPathDetail details */ - ArrayOfTMetaDataPathDetail details = outputData.getDetails(); - if (details != null) { - details.encode(outputParam, ArrayOfTMetaDataPathDetail.PNAME_DETAILS); - } - - // Return global structure. - return outputParam; - } + /** + * Logger + */ + private static final Logger log = LoggerFactory.getLogger(LsConverter.class); + + public LsConverter() { + + }; + + /** + * This method return a LSInputData created from input Hashtable structure of an xmlrpc + * spaceReservation v2.1 call. SpaceResData can be used to invoke LS method of Directory Functions + * Manager + */ + public InputData convertToInputData(Map inputParam) { + + log.debug("SrmLs: LSConverter :Call received :Creation of SpaceResData = {}", + inputParam.size()); + log.debug("SrmLs: LSConverter: Input Structure toString: {}", + ParameterDisplayHelper.display(inputParam)); + + // Member name definition for inputParam struct , from SRM V2.2 + String member_fullDL = new String("fullDetailedList"); + String member_allLR = new String("allLevelRecursive"); + String member_numOL = new String("numOfLevels"); + String member_offset = new String("offset"); + String member_count = new String("count"); + + /* Creation of VomsGridUser */ + GridUserInterface guser = GridUserManager.decode(inputParam); + + /* (2) anyURI[] arrayOfSURLs */ + ArrayOfSURLs surlArray = null; + try { + surlArray = ArrayOfSURLs.decode(inputParam, ArrayOfSURLs.ARRAY_OF_SURLS); + } catch (InvalidArrayOfSURLsAttributeException e2) { + log.debug("SrmLs: Empty surlArray found!", e2); + surlArray = null; + } + + TFileStorageType fileStorageType = + TFileStorageType.decode(inputParam, TFileStorageType.PNAME_FILESTORAGETYPE); + log.debug("fileType: {}", fileStorageType); + + /* (5) fullDetailedList */ + Boolean fullDL = (Boolean) inputParam.get(member_fullDL); + log.debug("fullDetailedList: {}", fullDL); + + /* (6) allLevelRecursive */ + Boolean allLR = (Boolean) inputParam.get(member_allLR); + log.debug("allLevelRecursive: {}", allLR); + + /* (7) numOfLevels */ + Integer numOL = (Integer) inputParam.get(member_numOL); + log.debug("numOfLevels: {}", numOL); + + /* (8) offset */ + Integer offset = (Integer) inputParam.get(member_offset); + log.debug("offset: {}", offset); + + /* (9) count */ + Integer count = (Integer) inputParam.get(member_count); + log.debug("count: {}", count); + + LSInputData inputData; + if (guser != null) { + inputData = new IdentityLSInputData(guser, surlArray, fileStorageType, fullDL, allLR, numOL, + offset, count); + } else { + inputData = + new AnonymousLSInputData(surlArray, fileStorageType, fullDL, allLR, numOL, offset, count); + } + return inputData; + } + + public Map convertFromOutputData(OutputData data) { + + // Creation of new HashTable to return + Map outputParam = Maps.newHashMap(); + LSOutputData outputData = (LSOutputData) data; + + /* (1) TReturnStatus */ + TReturnStatus globStatus = outputData.getStatus(); + if (globStatus != null) { + globStatus.encode(outputParam, TReturnStatus.PNAME_RETURNSTATUS); + } + + /* (2) TRequestToken */ + TRequestToken requestToken = outputData.getRequestToken(); + if (requestToken != null) { + outputParam.put("requestToken", requestToken.toString()); + } + + /* (3) ArrayOfTMetaDataPathDetail details */ + ArrayOfTMetaDataPathDetail details = outputData.getDetails(); + if (details != null) { + details.encode(outputParam, ArrayOfTMetaDataPathDetail.PNAME_DETAILS); + } + + // Return global structure. + return outputParam; + } } diff --git a/src/main/java/it/grid/storm/xmlrpc/converter/directory/MkdirConverter.java b/src/main/java/it/grid/storm/xmlrpc/converter/directory/MkdirConverter.java index ff6dd01f4..375159985 100644 --- a/src/main/java/it/grid/storm/xmlrpc/converter/directory/MkdirConverter.java +++ b/src/main/java/it/grid/storm/xmlrpc/converter/directory/MkdirConverter.java @@ -17,6 +17,13 @@ package it.grid.storm.xmlrpc.converter.directory; +import java.util.Map; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Maps; + import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.GridUserManager; import it.grid.storm.srm.types.InvalidTSURLAttributesException; @@ -31,15 +38,9 @@ import it.grid.storm.xmlrpc.converter.Converter; import it.grid.storm.xmlrpc.converter.ParameterDisplayHelper; -import java.util.HashMap; -import java.util.Map; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** - * This class is part of the StoRM project. Copyright: Copyright (c) 2008 - * Company: INFN-CNAF and ICTP/EGRID project + * This class is part of the StoRM project. Copyright: Copyright (c) 2008 Company: INFN-CNAF and + * ICTP/EGRID project * * @author lucamag * @date May 28, 2008 @@ -48,62 +49,58 @@ public class MkdirConverter implements Converter { - /** - * Logger - */ - private static final Logger log = LoggerFactory - .getLogger(MkdirConverter.class); + /** + * Logger + */ + private static final Logger log = LoggerFactory.getLogger(MkdirConverter.class); - public MkdirConverter() { + public MkdirConverter() { - }; + }; - /** - * This method return a MkdirInputData created from input Hashtable structure - * of an xmlrpc Mkdir v2.1 call. Mkdir Input Data can be used to invoke mkdir - * method of DirectoryFunctionsManager - */ - public InputData convertToInputData(Map inputParam) { + /** + * This method return a MkdirInputData created from input Hashtable structure of an xmlrpc Mkdir + * v2.1 call. Mkdir Input Data can be used to invoke mkdir method of DirectoryFunctionsManager + */ + public InputData convertToInputData(Map inputParam) { - log - .debug("SrmMkdir: Converter :Call received :Creation of MkdirInputData = {}" - , inputParam.size()); - log.debug("SrmMkdir: Converter: Input Structure toString: {}" - , ParameterDisplayHelper.display(inputParam)); + log.debug("SrmMkdir: Converter :Call received :Creation of MkdirInputData = {}", + inputParam.size()); + log.debug("SrmMkdir: Converter: Input Structure toString: {}", + ParameterDisplayHelper.display(inputParam)); - GridUserInterface guser = GridUserManager.decode(inputParam); + GridUserInterface guser = GridUserManager.decode(inputParam); - /* (2) directoryPath */ - TSURL surl = null; - try { - surl = TSURL.decode(inputParam, TSURL.PNAME_SURL); - } catch (InvalidTSURLAttributesException e1) { - log.debug("SrmMkdir: ErrorCreating surl: {}" , e1.toString(),e1); - } + /* (2) directoryPath */ + TSURL surl = null; + try { + surl = TSURL.decode(inputParam, TSURL.PNAME_SURL); + } catch (InvalidTSURLAttributesException e1) { + log.debug("SrmMkdir: ErrorCreating surl: {}", e1.toString(), e1); + } - MkdirInputData inputData; - if (guser != null) { - inputData = new IdentityMkdirInputData(guser, surl); - } else { - inputData = new AnonymousMkdirInputData(surl); - } - return inputData; - } + MkdirInputData inputData; + if (guser != null) { + inputData = new IdentityMkdirInputData(guser, surl); + } else { + inputData = new AnonymousMkdirInputData(surl); + } + return inputData; + } - public Map convertFromOutputData(OutputData outputData) { + public Map convertFromOutputData(OutputData outputData) { - log - .debug("SrmMkdir: Converter :Call received :Creation of XMLRPC Output Structure! "); + log.debug("SrmMkdir: Converter :Call received :Creation of XMLRPC Output Structure! "); - Map outputParam = new HashMap(); + Map outputParam = Maps.newHashMap(); - MkdirOutputData odata = (MkdirOutputData) outputData; - TReturnStatus outputStatus = odata.getStatus(); + MkdirOutputData odata = (MkdirOutputData) outputData; + TReturnStatus outputStatus = odata.getStatus(); - outputStatus.encode(outputParam, TReturnStatus.PNAME_RETURNSTATUS); + outputStatus.encode(outputParam, TReturnStatus.PNAME_RETURNSTATUS); - // Return Output Structure - return outputParam; + // Return Output Structure + return outputParam; - } + } } diff --git a/src/main/java/it/grid/storm/xmlrpc/converter/directory/MvConverter.java b/src/main/java/it/grid/storm/xmlrpc/converter/directory/MvConverter.java index f608573b4..08fdc36ca 100644 --- a/src/main/java/it/grid/storm/xmlrpc/converter/directory/MvConverter.java +++ b/src/main/java/it/grid/storm/xmlrpc/converter/directory/MvConverter.java @@ -17,6 +17,13 @@ package it.grid.storm.xmlrpc.converter.directory; +import java.util.Map; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Maps; + import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.GridUserManager; import it.grid.storm.srm.types.InvalidTSURLAttributesException; @@ -31,19 +38,12 @@ import it.grid.storm.xmlrpc.converter.Converter; import it.grid.storm.xmlrpc.converter.ParameterDisplayHelper; -import java.util.HashMap; -import java.util.Map; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** - * This class is part of the StoRM project. Copyright: Copyright (c) 2008 - * Company: INFN-CNAF and ICTP/EGRID project + * This class is part of the StoRM project. Copyright: Copyright (c) 2008 Company: INFN-CNAF and + * ICTP/EGRID project * - * This class represents the Type Converter for SrmMv function . This class have - * get an input data from xmlrpc call anc convert it into a StoRM Type that can - * be used to invoke the MvExecutor. + * This class represents the Type Converter for SrmMv function . This class have get an input data + * from xmlrpc call anc convert it into a StoRM Type that can be used to invoke the MvExecutor. * * @author lucamag * @date May 28, 2008 @@ -52,65 +52,62 @@ public class MvConverter implements Converter { - /** - * Logger - */ - private static final Logger log = LoggerFactory.getLogger(MvConverter.class); - - public MvConverter() { - - }; - - /** - * This method return a MvInputData created from input Map structure of an - * xmlrpc SrmMv v2.2 call. - */ - public InputData convertToInputData(Map inputParam) { - - log.debug("SrmMv: Converter :Call received :Creation of MvInputData = {}" - , inputParam.size()); - log.debug("SrmMv: Converter: Input Structure toString: {}" - , ParameterDisplayHelper.display(inputParam)); - - GridUserInterface guser = GridUserManager.decode(inputParam); - - /* (2) fromSURL */ - TSURL fromSURL = null; - try { - fromSURL = TSURL.decode(inputParam, TSURL.PNAME_FROMSURL); - } catch (InvalidTSURLAttributesException e1) { - log.debug("SrmMv: ErrorCreating surl: {}" , e1.toString(),e1); - } - - /* (3) toSURL */ - TSURL toSURL = null; - try { - toSURL = TSURL.decode(inputParam, TSURL.PNAME_TOSURL); - } catch (InvalidTSURLAttributesException e1) { - log.debug("SrmMv: ErrorCreating surl: {}" , e1.toString(),e1); - } - - MvInputData inputData; - if (guser != null) { - inputData = new IdentityMvInputData(guser, fromSURL, toSURL); - } else { - inputData = new AnonymousMvInputData(fromSURL, toSURL); - } - return inputData; - - } - - public Map convertFromOutputData(OutputData data) { - - log - .debug("SrmMv: Converter :Call received :Creation of XMLRPC Output Structure! "); - // Output structure to return to xmlrpc client - Map outputParam = new HashMap(); - MvOutputData outputData = (MvOutputData) data; - TReturnStatus status = outputData.getStatus(); - status.encode(outputParam, TReturnStatus.PNAME_RETURNSTATUS); - - // Return Output Structure - return outputParam; - } + /** + * Logger + */ + private static final Logger log = LoggerFactory.getLogger(MvConverter.class); + + public MvConverter() { + + }; + + /** + * This method return a MvInputData created from input Map structure of an xmlrpc SrmMv v2.2 call. + */ + public InputData convertToInputData(Map inputParam) { + + log.debug("SrmMv: Converter :Call received :Creation of MvInputData = {}", inputParam.size()); + log.debug("SrmMv: Converter: Input Structure toString: {}", + ParameterDisplayHelper.display(inputParam)); + + GridUserInterface guser = GridUserManager.decode(inputParam); + + /* (2) fromSURL */ + TSURL fromSURL = null; + try { + fromSURL = TSURL.decode(inputParam, TSURL.PNAME_FROMSURL); + } catch (InvalidTSURLAttributesException e1) { + log.debug("SrmMv: ErrorCreating surl: {}", e1.toString(), e1); + } + + /* (3) toSURL */ + TSURL toSURL = null; + try { + toSURL = TSURL.decode(inputParam, TSURL.PNAME_TOSURL); + } catch (InvalidTSURLAttributesException e1) { + log.debug("SrmMv: ErrorCreating surl: {}", e1.toString(), e1); + } + + MvInputData inputData; + if (guser != null) { + inputData = new IdentityMvInputData(guser, fromSURL, toSURL); + } else { + inputData = new AnonymousMvInputData(fromSURL, toSURL); + } + return inputData; + + } + + public Map convertFromOutputData(OutputData data) { + + log.debug("SrmMv: Converter :Call received :Creation of XMLRPC Output Structure! "); + // Output structure to return to xmlrpc client + Map outputParam = Maps.newHashMap(); + MvOutputData outputData = (MvOutputData) data; + TReturnStatus status = outputData.getStatus(); + status.encode(outputParam, TReturnStatus.PNAME_RETURNSTATUS); + + // Return Output Structure + return outputParam; + } } diff --git a/src/main/java/it/grid/storm/xmlrpc/converter/directory/RmConverter.java b/src/main/java/it/grid/storm/xmlrpc/converter/directory/RmConverter.java index 3569dc033..d1f6c5392 100644 --- a/src/main/java/it/grid/storm/xmlrpc/converter/directory/RmConverter.java +++ b/src/main/java/it/grid/storm/xmlrpc/converter/directory/RmConverter.java @@ -17,6 +17,13 @@ package it.grid.storm.xmlrpc.converter.directory; +import java.util.Map; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Maps; + import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.GridUserManager; import it.grid.storm.srm.types.ArrayOfSURLs; @@ -32,18 +39,11 @@ import it.grid.storm.xmlrpc.converter.Converter; import it.grid.storm.xmlrpc.converter.ParameterDisplayHelper; -import java.util.HashMap; -import java.util.Map; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** * This class is part of the StoRM project. * - * This class represents the Type Converter for Rm function . This class have - * get an input data from xmlrpc call anc convert it into a StoRM Type that can - * be used to invoke the RmManager + * This class represents the Type Converter for Rm function . This class have get an input data from + * xmlrpc call anc convert it into a StoRM Type that can be used to invoke the RmManager * * Copyright: Copyright (c) 2008 Company: INFN-CNAF and ICTP/EGRID project * @@ -54,66 +54,62 @@ public class RmConverter implements Converter { - /** - * Logger - */ - private static final Logger log = LoggerFactory.getLogger(RmConverter.class); - - public RmConverter() { - - }; - - /** - * This method return a RmInputData created from input Hashtable structure of - * an xmlrpc Rm v2.1 call. Rm Input Data can be used to invoke mkdir method of - * DirectoryFunctionsManager - */ - public InputData convertToInputData(Map inputParam) { - - log.debug("RmConverter :Call received :Creation of RmdirInputData = {}" - , inputParam.size()); - log.debug("RmConverter: Input Structure toString: {}" - , ParameterDisplayHelper.display(inputParam)); - - GridUserInterface guser = GridUserManager.decode(inputParam); - - ArrayOfSURLs surlArray = null; - try { - surlArray = ArrayOfSURLs.decode(inputParam, ArrayOfSURLs.ARRAY_OF_SURLS); - } catch (InvalidArrayOfSURLsAttributeException e1) { - // TODO Auto-generated catch block - e1.printStackTrace(); - } - - RmInputData inputData; - if (guser != null) { - inputData = new IdentityRmInputData(guser, surlArray); - } else { - inputData = new AnonymousRmInputData(surlArray); - } - log.debug("RmInputData Created!"); - return inputData; - } - - public Map convertFromOutputData(OutputData outputData) { - - log - .debug("RmConverter :Call received :Creation of XMLRPC Output Structure! "); - // Output structure to return to xmlrpc client - Map outputParam = new HashMap(); - RmOutputData rmOutputData = (RmOutputData) outputData; - TReturnStatus status = rmOutputData.getStatus(); - if (status != null) { - status.encode(outputParam, TReturnStatus.PNAME_RETURNSTATUS); - } - - ArrayOfTSURLReturnStatus surlArray = rmOutputData.getSurlStatus(); - if (surlArray != null) { - surlArray.encode(outputParam, - ArrayOfTSURLReturnStatus.PNAME_ARRAYOFFILESTATUSES); - } - - // Return global structure. - return outputParam; - } + /** + * Logger + */ + private static final Logger log = LoggerFactory.getLogger(RmConverter.class); + + public RmConverter() { + + }; + + /** + * This method return a RmInputData created from input Hashtable structure of an xmlrpc Rm v2.1 + * call. Rm Input Data can be used to invoke mkdir method of DirectoryFunctionsManager + */ + public InputData convertToInputData(Map inputParam) { + + log.debug("RmConverter :Call received :Creation of RmdirInputData = {}", inputParam.size()); + log.debug("RmConverter: Input Structure toString: {}", + ParameterDisplayHelper.display(inputParam)); + + GridUserInterface guser = GridUserManager.decode(inputParam); + + ArrayOfSURLs surlArray = null; + try { + surlArray = ArrayOfSURLs.decode(inputParam, ArrayOfSURLs.ARRAY_OF_SURLS); + } catch (InvalidArrayOfSURLsAttributeException e1) { + // TODO Auto-generated catch block + e1.printStackTrace(); + } + + RmInputData inputData; + if (guser != null) { + inputData = new IdentityRmInputData(guser, surlArray); + } else { + inputData = new AnonymousRmInputData(surlArray); + } + log.debug("RmInputData Created!"); + return inputData; + } + + public Map convertFromOutputData(OutputData outputData) { + + log.debug("RmConverter :Call received :Creation of XMLRPC Output Structure! "); + // Output structure to return to xmlrpc client + Map outputParam = Maps.newHashMap(); + RmOutputData rmOutputData = (RmOutputData) outputData; + TReturnStatus status = rmOutputData.getStatus(); + if (status != null) { + status.encode(outputParam, TReturnStatus.PNAME_RETURNSTATUS); + } + + ArrayOfTSURLReturnStatus surlArray = rmOutputData.getSurlStatus(); + if (surlArray != null) { + surlArray.encode(outputParam, ArrayOfTSURLReturnStatus.PNAME_ARRAYOFFILESTATUSES); + } + + // Return global structure. + return outputParam; + } } diff --git a/src/main/java/it/grid/storm/xmlrpc/converter/discovery/PingConverter.java b/src/main/java/it/grid/storm/xmlrpc/converter/discovery/PingConverter.java index c4ba207cd..9594fe8c5 100644 --- a/src/main/java/it/grid/storm/xmlrpc/converter/discovery/PingConverter.java +++ b/src/main/java/it/grid/storm/xmlrpc/converter/discovery/PingConverter.java @@ -27,6 +27,13 @@ package it.grid.storm.xmlrpc.converter.discovery; +import java.util.Map; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Maps; + import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.GridUserManager; import it.grid.storm.srm.types.ArrayOfTExtraInfo; @@ -38,56 +45,48 @@ import it.grid.storm.synchcall.data.discovery.PingOutputData; import it.grid.storm.xmlrpc.converter.Converter; -import java.util.Hashtable; -import java.util.Map; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - public class PingConverter implements Converter { - private static final Logger log = LoggerFactory - .getLogger(PingConverter.class); + private static final Logger log = LoggerFactory.getLogger(PingConverter.class); - public PingConverter() { + public PingConverter() { - } + } - public InputData convertToInputData(Map inputParam) { + public InputData convertToInputData(Map inputParam) { - log.debug("Ping: input converter started. InputParam "); + log.debug("Ping: input converter started. InputParam "); - GridUserInterface requestor = GridUserManager.decode(inputParam); + GridUserInterface requestor = GridUserManager.decode(inputParam); - String authorizationID = (String) inputParam.get("authorizationID"); + String authorizationID = (String) inputParam.get("authorizationID"); - PingInputData inputData; - if (requestor != null) { - inputData = new IdentityPingInputData(requestor, authorizationID); - } else { - inputData = new AnonymousPingInputData(authorizationID); - } - log.debug("Ping: input converter has finished."); - return inputData; - } + PingInputData inputData; + if (requestor != null) { + inputData = new IdentityPingInputData(requestor, authorizationID); + } else { + inputData = new AnonymousPingInputData(authorizationID); + } + log.debug("Ping: input converter has finished."); + return inputData; + } - public Map convertFromOutputData(OutputData data) { + public Map convertFromOutputData(OutputData data) { - log.debug("Ping: output converter started."); - Hashtable outputParam = new Hashtable(); - PingOutputData outputData = (PingOutputData) data; - String versionInfo = outputData.getVersionInfo(); - if (versionInfo != null) { - outputParam.put("versionInfo", versionInfo); - } + log.debug("Ping: output converter started."); + Map outputParam = Maps.newHashMap(); + PingOutputData outputData = (PingOutputData) data; + String versionInfo = outputData.getVersionInfo(); + if (versionInfo != null) { + outputParam.put("versionInfo", versionInfo); + } - ArrayOfTExtraInfo extraInfoArray = outputData.getExtraInfoArray(); - if (extraInfoArray != null) { - extraInfoArray.encode(outputParam, - ArrayOfTExtraInfo.PNAME_STORAGESYSTEMINFO); - } + ArrayOfTExtraInfo extraInfoArray = outputData.getExtraInfoArray(); + if (extraInfoArray != null) { + extraInfoArray.encode(outputParam, ArrayOfTExtraInfo.PNAME_STORAGESYSTEMINFO); + } - log.debug("Ping: output converter has finished."); - return outputParam; - } + log.debug("Ping: output converter has finished."); + return outputParam; + } } diff --git a/src/main/java/it/grid/storm/xmlrpc/converter/space/GetSpaceMetaDataConverter.java b/src/main/java/it/grid/storm/xmlrpc/converter/space/GetSpaceMetaDataConverter.java index f2a0a0e04..01deaadf4 100644 --- a/src/main/java/it/grid/storm/xmlrpc/converter/space/GetSpaceMetaDataConverter.java +++ b/src/main/java/it/grid/storm/xmlrpc/converter/space/GetSpaceMetaDataConverter.java @@ -17,6 +17,13 @@ package it.grid.storm.xmlrpc.converter.space; +import java.util.Map; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Maps; + import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.GridUserManager; import it.grid.storm.srm.types.ArrayOfTMetaDataSpace; @@ -27,23 +34,17 @@ import it.grid.storm.synchcall.data.OutputData; import it.grid.storm.synchcall.data.space.AnonymousGetSpaceMetaDataInputData; import it.grid.storm.synchcall.data.space.GetSpaceMetaDataInputData; -import it.grid.storm.synchcall.data.space.IdentityGetSpaceMetaDataInputData; import it.grid.storm.synchcall.data.space.GetSpaceMetaDataOutputData; +import it.grid.storm.synchcall.data.space.IdentityGetSpaceMetaDataInputData; import it.grid.storm.xmlrpc.converter.Converter; -import java.util.Hashtable; -import java.util.Map; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** - * This class is part of the StoRM project. Copyright: Copyright (c) 2008 - * Company: INFN-CNAF and ICTP/EGRID project + * This class is part of the StoRM project. Copyright: Copyright (c) 2008 Company: INFN-CNAF and + * ICTP/EGRID project * - * This class represents the Type Converter for GetSpaceMetaData function . This - * class have get an input data from xmlrpc call anc convert it into a StoRM - * Type that can be used to invoke the GetSpaceMetaDataManager + * This class represents the Type Converter for GetSpaceMetaData function . This class have get an + * input data from xmlrpc call anc convert it into a StoRM Type that can be used to invoke the + * GetSpaceMetaDataManager * * @author lucamag * @date May 29, 2008 @@ -52,77 +53,66 @@ public class GetSpaceMetaDataConverter implements Converter { - /** - * Logger - */ - private static final Logger log = LoggerFactory - .getLogger(GetSpaceMetaDataConverter.class); - - public GetSpaceMetaDataConverter() { - - }; - - /** - * This method return a SpaceResData created from input Hashtable structure of - * an xmlrpc spaceReservation v2.2 call. SpaceResData can be used to invoke - * SpaceResevation Manager - */ - public InputData convertToInputData(Map inputParam) { - - String memberName = null; - - /* Creation of VomsGridUser */ - GridUserInterface guser = GridUserManager.decode(inputParam); - - /* (1) authorizationID (never used) */ - String authID = (String) inputParam.get("authorizationID"); - - ArrayOfTSpaceToken arrayOfSpaceTokens; - try { - arrayOfSpaceTokens = ArrayOfTSpaceToken.decode(inputParam, - ArrayOfTSpaceToken.PNAME_ARRAYOFSPACETOKENS); - } catch (InvalidArrayOfTSpaceTokenAttributeException e) { - arrayOfSpaceTokens = null; - } - - GetSpaceMetaDataInputData inputData; - if (guser != null) { - inputData = new IdentityGetSpaceMetaDataInputData(guser, - arrayOfSpaceTokens); - } else { - inputData = new AnonymousGetSpaceMetaDataInputData(arrayOfSpaceTokens); - } - return inputData; - } - - public Map convertFromOutputData(OutputData data) { - - log - .debug("GetSpaceMetaDataConverter: Creation of XMLRPC Output Structure! "); - - // Creation of new Hashtable to return - Hashtable outputParam = new Hashtable(); - - // outputData - GetSpaceMetaDataOutputData outputData = (GetSpaceMetaDataOutputData) data; - - /* (1) returnStatus */ - TReturnStatus returnStatus = outputData.getStatus(); - if (returnStatus != null) { - returnStatus.encode(outputParam, TReturnStatus.PNAME_RETURNSTATUS); - } - - /* (2) arrayOfSpaceDetails */ - ArrayOfTMetaDataSpace arrayOfSpaceDetails = outputData - .getMetaDataSpaceArray(); - if (arrayOfSpaceDetails != null) { - arrayOfSpaceDetails.encode(outputParam, - ArrayOfTMetaDataSpace.PNAME_ARRAYOFSPACEDETAILS); - } - - log.debug(outputParam.toString()); - - // Return output Parameter structure - return outputParam; - } + /** + * Logger + */ + private static final Logger log = LoggerFactory.getLogger(GetSpaceMetaDataConverter.class); + + public GetSpaceMetaDataConverter() { + + }; + + /** + * This method return a SpaceResData created from input Hashtable structure of an xmlrpc + * spaceReservation v2.2 call. SpaceResData can be used to invoke SpaceResevation Manager + */ + public InputData convertToInputData(Map inputParam) { + + /* Creation of VomsGridUser */ + GridUserInterface guser = GridUserManager.decode(inputParam); + + ArrayOfTSpaceToken arrayOfSpaceTokens; + try { + arrayOfSpaceTokens = + ArrayOfTSpaceToken.decode(inputParam, ArrayOfTSpaceToken.PNAME_ARRAYOFSPACETOKENS); + } catch (InvalidArrayOfTSpaceTokenAttributeException e) { + arrayOfSpaceTokens = null; + } + + GetSpaceMetaDataInputData inputData; + if (guser != null) { + inputData = new IdentityGetSpaceMetaDataInputData(guser, arrayOfSpaceTokens); + } else { + inputData = new AnonymousGetSpaceMetaDataInputData(arrayOfSpaceTokens); + } + return inputData; + } + + public Map convertFromOutputData(OutputData data) { + + log.debug("GetSpaceMetaDataConverter: Creation of XMLRPC Output Structure! "); + + // Creation of new Hashtable to return + Map outputParam = Maps.newHashMap(); + + // outputData + GetSpaceMetaDataOutputData outputData = (GetSpaceMetaDataOutputData) data; + + /* (1) returnStatus */ + TReturnStatus returnStatus = outputData.getStatus(); + if (returnStatus != null) { + returnStatus.encode(outputParam, TReturnStatus.PNAME_RETURNSTATUS); + } + + /* (2) arrayOfSpaceDetails */ + ArrayOfTMetaDataSpace arrayOfSpaceDetails = outputData.getMetaDataSpaceArray(); + if (arrayOfSpaceDetails != null) { + arrayOfSpaceDetails.encode(outputParam, ArrayOfTMetaDataSpace.PNAME_ARRAYOFSPACEDETAILS); + } + + log.debug(outputParam.toString()); + + // Return output Parameter structure + return outputParam; + } } diff --git a/src/main/java/it/grid/storm/xmlrpc/converter/space/ReserveSpaceConverter.java b/src/main/java/it/grid/storm/xmlrpc/converter/space/ReserveSpaceConverter.java index fb5cfdbe9..f7408a87f 100644 --- a/src/main/java/it/grid/storm/xmlrpc/converter/space/ReserveSpaceConverter.java +++ b/src/main/java/it/grid/storm/xmlrpc/converter/space/ReserveSpaceConverter.java @@ -28,6 +28,13 @@ package it.grid.storm.xmlrpc.converter.space; +import java.util.Map; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Maps; + import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.GridUserManager; import it.grid.storm.srm.types.ArrayOfTExtraInfo; @@ -46,156 +53,135 @@ import it.grid.storm.xmlrpc.converter.Converter; import it.grid.storm.xmlrpc.converter.ParameterDisplayHelper; -import java.util.HashMap; -import java.util.Map; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - public class ReserveSpaceConverter implements Converter { - /** - * Logger - */ - private static final Logger log = LoggerFactory - .getLogger(ReserveSpaceConverter.class); - - public ReserveSpaceConverter() { - - }; - - /** - * This method return a SpaceResData created from input Hashtable structure of - * an xmlrpc spaceReservation v2.1 call. SpaceResData can be used to invoke - * SpaceResevation Manager - */ - public InputData convertToInputData(Map inputParam) { - - log - .debug("reserveSpaceConverter :Call received :Creation of SpaceResData = {}" - , inputParam.size()); - log.debug("reserveSpaceConverter: Input Structure toString: {}" - , ParameterDisplayHelper.display(inputParam)); - - String memberName = null; - - GridUserInterface guser = GridUserManager.decode(inputParam); - - memberName = new String("authorizationID"); - String authID = (String) inputParam.get(memberName); - - memberName = new String("userSpaceTokenDescription"); - String spaceAlias = (String) inputParam.get(memberName); - if (spaceAlias == null) { - spaceAlias = new String(""); - } - - TRetentionPolicyInfo retentionPolicyInfo = TRetentionPolicyInfo.decode( - inputParam, TRetentionPolicyInfo.PNAME_retentionPolicyInfo); - - TSizeInBytes desiredSizeOfTotalSpace = TSizeInBytes.decode(inputParam, - TSizeInBytes.PNAME_DESIREDSIZEOFTOTALSPACE); - - TSizeInBytes desiredSizeOfGuaranteedSpace = TSizeInBytes.decode(inputParam, - TSizeInBytes.PNAME_DESIREDSIZEOFGUARANTEEDSPACE); - - ArrayOfTExtraInfo storageSystemInfo; - try { - storageSystemInfo = ArrayOfTExtraInfo.decode(inputParam, - ArrayOfTExtraInfo.PNAME_STORAGESYSTEMINFO); - } catch (InvalidArrayOfTExtraInfoAttributeException e) { - storageSystemInfo = null; - } - - ReserveSpaceInputData inputData; - if (guser != null) { - inputData = new IdentityReserveSpaceInputData(guser, spaceAlias, - retentionPolicyInfo, desiredSizeOfTotalSpace, - desiredSizeOfGuaranteedSpace, storageSystemInfo); - } else { - inputData = new AnonymousReserveSpaceInputData(spaceAlias, - retentionPolicyInfo, desiredSizeOfTotalSpace, - desiredSizeOfGuaranteedSpace, storageSystemInfo); - } - TLifeTimeInSeconds desiredLifetimeOfReservedSpace = TLifeTimeInSeconds - .decode(inputParam, - TLifeTimeInSeconds.PNAME_DESIREDLIFETIMEOFRESERVEDSPACE); - if (desiredLifetimeOfReservedSpace != null - && !desiredLifetimeOfReservedSpace.isEmpty()) { - inputData.setSpaceLifetime(desiredLifetimeOfReservedSpace); - } - return inputData; - - } - - public Map convertFromOutputData(OutputData data) { - - log - .debug("reserveSpaceConverter :Call received :Creation of XMLRPC Output Structure! "); - - // Creation of new Hashtable to return - Map outputParam = new HashMap(); - - ReserveSpaceOutputData outputData = (ReserveSpaceOutputData) data; - - /* (1) returnStatus */ - TReturnStatus returnStatus = outputData.getStatus(); - returnStatus.encode(outputParam, TReturnStatus.PNAME_RETURNSTATUS); - - /* (2) requestToken */ - /* - * Actually we are not planning an asynchronous version of ReserveSpace (in - * theory not needed for StoRM). Therefor this parameter is not set. - */ - - /* (3) estimatedProcessingTime */ - // TODO: in the future (actually the FE is predisposed to decode this value - // as an int). - - /* (4) retentionPolocyInfo */ - TRetentionPolicyInfo retentionPolicyInfo = outputData - .getRetentionPolicyInfo(); - if (retentionPolicyInfo != null) { - retentionPolicyInfo.encode(outputParam, - TRetentionPolicyInfo.PNAME_retentionPolicyInfo); - } - - /* (5) sizeOfTotalReservedSpace */ - TSizeInBytes sizeOfTotalReservedSpace = outputData.getTotalSize(); - if (sizeOfTotalReservedSpace != null) { - if (!(sizeOfTotalReservedSpace.isEmpty())) { - sizeOfTotalReservedSpace.encode(outputParam, - TSizeInBytes.PNAME_SIZEOFTOTALRESERVEDSPACE); - } - } - - /* (6) sizeOfGuaranteedReservedSpace */ - TSizeInBytes sizeOfGuaranteedReservedSpace = outputData.getGuaranteedSize(); - if (sizeOfGuaranteedReservedSpace != null) { - if (!(sizeOfGuaranteedReservedSpace.isEmpty())) { - sizeOfGuaranteedReservedSpace.encode(outputParam, - TSizeInBytes.PNAME_SIZEOFGUARANTEEDRESERVEDSPACE); - } - } - - /* (7) lifetimeOfReservedSpace */ - TLifeTimeInSeconds lifetimeOfReservedSpace = outputData - .getLifeTimeInSeconds(); - if (lifetimeOfReservedSpace != null) { - if (!(lifetimeOfReservedSpace.isEmpty())) { - lifetimeOfReservedSpace.encode(outputParam, - TLifeTimeInSeconds.PNAME_LIFETIMEOFRESERVEDSPACE); - } - } - - /* (8) spaceToken */ - TSpaceToken spaceToken = outputData.getSpaceToken(); - if (spaceToken != null) { - spaceToken.encode(outputParam, TSpaceToken.PNAME_SPACETOKEN); - } - - log.debug(outputParam.toString()); - - return outputParam; - } + /** + * Logger + */ + private static final Logger log = LoggerFactory.getLogger(ReserveSpaceConverter.class); + + public ReserveSpaceConverter() { + + }; + + /** + * This method return a SpaceResData created from input Hashtable structure of an xmlrpc + * spaceReservation v2.1 call. SpaceResData can be used to invoke SpaceResevation Manager + */ + public InputData convertToInputData(Map inputParam) { + + log.debug("reserveSpaceConverter :Call received :Creation of SpaceResData = {}", + inputParam.size()); + log.debug("reserveSpaceConverter: Input Structure toString: {}", + ParameterDisplayHelper.display(inputParam)); + + String memberName = null; + + GridUserInterface guser = GridUserManager.decode(inputParam); + + memberName = new String("userSpaceTokenDescription"); + String spaceAlias = (String) inputParam.get(memberName); + if (spaceAlias == null) { + spaceAlias = new String(""); + } + + TRetentionPolicyInfo retentionPolicyInfo = + TRetentionPolicyInfo.decode(inputParam, TRetentionPolicyInfo.PNAME_retentionPolicyInfo); + + TSizeInBytes desiredSizeOfTotalSpace = + TSizeInBytes.decode(inputParam, TSizeInBytes.PNAME_DESIREDSIZEOFTOTALSPACE); + + TSizeInBytes desiredSizeOfGuaranteedSpace = + TSizeInBytes.decode(inputParam, TSizeInBytes.PNAME_DESIREDSIZEOFGUARANTEEDSPACE); + + ArrayOfTExtraInfo storageSystemInfo; + try { + storageSystemInfo = + ArrayOfTExtraInfo.decode(inputParam, ArrayOfTExtraInfo.PNAME_STORAGESYSTEMINFO); + } catch (InvalidArrayOfTExtraInfoAttributeException e) { + storageSystemInfo = null; + } + + ReserveSpaceInputData inputData; + if (guser != null) { + inputData = new IdentityReserveSpaceInputData(guser, spaceAlias, retentionPolicyInfo, + desiredSizeOfTotalSpace, desiredSizeOfGuaranteedSpace, storageSystemInfo); + } else { + inputData = new AnonymousReserveSpaceInputData(spaceAlias, retentionPolicyInfo, + desiredSizeOfTotalSpace, desiredSizeOfGuaranteedSpace, storageSystemInfo); + } + TLifeTimeInSeconds desiredLifetimeOfReservedSpace = TLifeTimeInSeconds.decode(inputParam, + TLifeTimeInSeconds.PNAME_DESIREDLIFETIMEOFRESERVEDSPACE); + if (desiredLifetimeOfReservedSpace != null && !desiredLifetimeOfReservedSpace.isEmpty()) { + inputData.setSpaceLifetime(desiredLifetimeOfReservedSpace); + } + return inputData; + + } + + public Map convertFromOutputData(OutputData data) { + + log.debug("reserveSpaceConverter :Call received :Creation of XMLRPC Output Structure! "); + + // Creation of new HashMap to return + Map outputParam = Maps.newHashMap(); + + ReserveSpaceOutputData outputData = (ReserveSpaceOutputData) data; + + /* (1) returnStatus */ + TReturnStatus returnStatus = outputData.getStatus(); + returnStatus.encode(outputParam, TReturnStatus.PNAME_RETURNSTATUS); + + /* (2) requestToken */ + /* + * Actually we are not planning an asynchronous version of ReserveSpace (in theory not needed + * for StoRM). Therefor this parameter is not set. + */ + + /* (3) estimatedProcessingTime */ + // TODO: in the future (actually the FE is predisposed to decode this value + // as an int). + + /* (4) retentionPolocyInfo */ + TRetentionPolicyInfo retentionPolicyInfo = outputData.getRetentionPolicyInfo(); + if (retentionPolicyInfo != null) { + retentionPolicyInfo.encode(outputParam, TRetentionPolicyInfo.PNAME_retentionPolicyInfo); + } + + /* (5) sizeOfTotalReservedSpace */ + TSizeInBytes sizeOfTotalReservedSpace = outputData.getTotalSize(); + if (sizeOfTotalReservedSpace != null) { + if (!(sizeOfTotalReservedSpace.isEmpty())) { + sizeOfTotalReservedSpace.encode(outputParam, TSizeInBytes.PNAME_SIZEOFTOTALRESERVEDSPACE); + } + } + + /* (6) sizeOfGuaranteedReservedSpace */ + TSizeInBytes sizeOfGuaranteedReservedSpace = outputData.getGuaranteedSize(); + if (sizeOfGuaranteedReservedSpace != null) { + if (!(sizeOfGuaranteedReservedSpace.isEmpty())) { + sizeOfGuaranteedReservedSpace.encode(outputParam, + TSizeInBytes.PNAME_SIZEOFGUARANTEEDRESERVEDSPACE); + } + } + + /* (7) lifetimeOfReservedSpace */ + TLifeTimeInSeconds lifetimeOfReservedSpace = outputData.getLifeTimeInSeconds(); + if (lifetimeOfReservedSpace != null) { + if (!(lifetimeOfReservedSpace.isEmpty())) { + lifetimeOfReservedSpace.encode(outputParam, + TLifeTimeInSeconds.PNAME_LIFETIMEOFRESERVEDSPACE); + } + } + + /* (8) spaceToken */ + TSpaceToken spaceToken = outputData.getSpaceToken(); + if (spaceToken != null) { + spaceToken.encode(outputParam, TSpaceToken.PNAME_SPACETOKEN); + } + + log.debug(outputParam.toString()); + + return outputParam; + } } diff --git a/src/test/java/it/grid/storm/config/ConfigurationConverterTest.java b/src/test/java/it/grid/storm/config/ConfigurationConverterTest.java new file mode 100644 index 000000000..bd23e03e8 --- /dev/null +++ b/src/test/java/it/grid/storm/config/ConfigurationConverterTest.java @@ -0,0 +1,133 @@ +package it.grid.storm.config; + +import static it.grid.storm.config.ConfigurationDefaults.DB_POOL_MAX_WAIT_MILLIS; +import static it.grid.storm.config.ConfigurationDefaults.DB_POOL_MIN_IDLE; +import static it.grid.storm.config.ConfigurationDefaults.DB_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.DB_POOL_TEST_ON_BORROW; +import static it.grid.storm.config.ConfigurationDefaults.DB_POOL_TEST_WHILE_IDLE; +import static it.grid.storm.config.ConfigurationDefaults.DB_PORT; +import static org.junit.Assert.assertEquals; + +import java.io.File; +import java.io.IOException; + +import org.junit.Test; + +import com.fasterxml.jackson.dataformat.javaprop.JavaPropsMapper; + +import it.grid.storm.config.converter.StormPropertiesConversionException; +import it.grid.storm.config.converter.StormPropertiesConverter; +import it.grid.storm.config.model.v2.StormProperties; + +public class ConfigurationConverterTest { + + @Test + public void testLoadedConfigurationFromOldProperties() + throws IOException, StormPropertiesConversionException { + + ClassLoader classLoader = getClass().getClassLoader(); + File source = new File(classLoader.getResource("v1.properties").getFile()); + File target = new File("/tmp/converted.properties"); + + // convert source configuration file and save it into target file: + StormPropertiesConverter.convert(source, target); + + // load new configuration from file + JavaPropsMapper mapper = new JavaPropsMapper(); + StormProperties properties = mapper.readValue(target, StormProperties.class); + + // not converted + assertEquals(DB_PORT, properties.getDb().getPort()); + assertEquals(DB_POOL_SIZE, properties.getDb().getPool().getSize()); + assertEquals(DB_POOL_MIN_IDLE, properties.getDb().getPool().getMinIdle()); + assertEquals(DB_POOL_MAX_WAIT_MILLIS, properties.getDb().getPool().getMaxWaitMillis()); + assertEquals(DB_POOL_TEST_ON_BORROW, properties.getDb().getPool().isTestOnBorrow()); + assertEquals(DB_POOL_TEST_WHILE_IDLE, properties.getDb().getPool().isTestWhileIdle()); + + // SRM service + assertEquals("fe.example.org", properties.getSrmEndpoints().get(0).getHost()); + assertEquals(8444, properties.getSrmEndpoints().get(0).getPort()); + assertEquals("fe-01.example.org", properties.getSrmEndpoints().get(1).getHost()); + assertEquals(8444, properties.getSrmEndpoints().get(1).getPort()); + assertEquals("fe-02.example.org", properties.getSrmEndpoints().get(2).getHost()); + assertEquals(8444, properties.getSrmEndpoints().get(2).getPort()); + assertEquals("be.example.org", properties.getDb().getHostname()); + assertEquals("storm", properties.getDb().getUsername()); + assertEquals("my-secret-password", properties.getDb().getPassword()); + assertEquals("prop=1", properties.getDb().getProperties()); + assertEquals(9999, properties.getRest().getPort()); + assertEquals(512, properties.getRest().getMaxThreads()); + assertEquals(2000, properties.getRest().getMaxQueueSize()); + assertEquals(8081, properties.getXmlrpc().getPort()); + assertEquals(512, properties.getXmlrpc().getMaxThreads()); + assertEquals(2000, properties.getXmlrpc().getMaxQueueSize()); + assertEquals(true, properties.getSecurity().isEnabled()); + assertEquals("ilovejava", properties.getSecurity().getToken()); + assertEquals(true, properties.getDu().isEnabled()); + assertEquals(true, properties.getDu().isParallelTasksEnabled()); + assertEquals(60, properties.getDu().getInitialDelay()); + assertEquals(360, properties.getDu().getTasksInterval()); + assertEquals(true, properties.isSanityChecksEnabled()); + assertEquals(true, properties.getDirectories().isEnableAutomaticCreation()); + assertEquals(true, properties.getDirectories().isEnableWritepermOnCreation()); + assertEquals(310000, properties.getPinlifetime().getDefaultValue()); + assertEquals(1900000, properties.getPinlifetime().getMaximum()); + assertEquals("/file", properties.getExtraslashes().getFile()); + assertEquals("/rfio", properties.getExtraslashes().getRfio()); + assertEquals("/root", properties.getExtraslashes().getRoot()); + assertEquals("/gsiftp", properties.getExtraslashes().getGsiftp()); + assertEquals(2000000, properties.getFiles().getDefaultSize()); + assertEquals(300000, properties.getFiles().getDefaultLifetime()); + assertEquals("N", properties.getFiles().getDefaultOverwrite()); + assertEquals("P", properties.getFiles().getDefaultStoragetype()); + assertEquals(20, properties.getRequestsScheduler().getCorePoolSize()); + assertEquals(60, properties.getRequestsScheduler().getMaxPoolSize()); + assertEquals(3000, properties.getRequestsScheduler().getQueueSize()); + assertEquals(60, properties.getPtpScheduler().getCorePoolSize()); + assertEquals(300, properties.getPtpScheduler().getMaxPoolSize()); + assertEquals(2000, properties.getPtpScheduler().getQueueSize()); + assertEquals(70, properties.getPtgScheduler().getCorePoolSize()); + assertEquals(400, properties.getPtgScheduler().getMaxPoolSize()); + assertEquals(3000, properties.getPtgScheduler().getQueueSize()); + assertEquals(40, properties.getBolScheduler().getCorePoolSize()); + assertEquals(100, properties.getBolScheduler().getMaxPoolSize()); + assertEquals(1000, properties.getBolScheduler().getQueueSize()); + assertEquals(15, properties.getRequestsPickerAgent().getDelay()); + assertEquals(25, properties.getRequestsPickerAgent().getInterval()); + assertEquals(150, properties.getRequestsPickerAgent().getMaxFetchedSize()); + assertEquals(true, properties.getSynchLs().isDefaultAllLevelRecursive()); + assertEquals(3, properties.getSynchLs().getDefaultNumLevels()); + assertEquals(2, properties.getSynchLs().getDefaultOffset()); + assertEquals(3000, properties.getSynchLs().getMaxEntries()); + assertEquals(false, properties.isSkipPtgAclSetup()); + + assertEquals(60, properties.getInprogressRequestsAgent().getDelay()); + assertEquals(600, properties.getInprogressRequestsAgent().getInterval()); + assertEquals(7000, properties.getInprogressRequestsAgent().getPtpExpirationTime()); + + assertEquals(10, properties.getExpiredSpacesAgent().getDelay()); + assertEquals(300, properties.getExpiredSpacesAgent().getInterval()); + + assertEquals(false, properties.getCompletedRequestsAgent().isEnabled()); + assertEquals(100, properties.getCompletedRequestsAgent().getDelay()); + assertEquals(600, properties.getCompletedRequestsAgent().getInterval()); + assertEquals(1000, properties.getCompletedRequestsAgent().getPurgeSize()); + assertEquals(7200, properties.getCompletedRequestsAgent().getPurgeAge()); + + assertEquals(true, properties.getHearthbeat().isBookkeepingEnabled()); + assertEquals(true, properties.getHearthbeat().isPerformanceMeasuringEnabled()); + assertEquals(30, properties.getHearthbeat().getPeriod()); + assertEquals(10, properties.getHearthbeat().getPerformanceLogbookTimeInterval()); + assertEquals(10, properties.getHearthbeat().getPerformanceGlanceTimeInterval()); + + assertEquals(900, properties.getInfoQuotaRefreshPeriod()); + assertEquals("/", properties.getHttpTurlPrefix()); + assertEquals(20000, properties.getServerPoolStatusCheckTimeout()); + assertEquals(10, properties.getAbortMaxloop()); + assertEquals("ping-values.properties", properties.getPingPropertiesFilename()); + + // delete temporary file + target.delete(); + + } +} diff --git a/src/test/java/it/grid/storm/config/ConfigurationTest.java b/src/test/java/it/grid/storm/config/ConfigurationTest.java new file mode 100644 index 000000000..a27217b69 --- /dev/null +++ b/src/test/java/it/grid/storm/config/ConfigurationTest.java @@ -0,0 +1,402 @@ +package it.grid.storm.config; + +import static it.grid.storm.config.ConfigurationDefaults.AUTOMATIC_DIRECTORY_CREATION; +import static it.grid.storm.config.ConfigurationDefaults.BOL_SCHEDULER_CORE_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.BOL_SCHEDULER_MAX_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.BOL_SCHEDULER_QUEUE_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.BOOK_KEEPING_ENABLED; +import static it.grid.storm.config.ConfigurationDefaults.COMPLETED_REQUESTS_AGENT_DELAY; +import static it.grid.storm.config.ConfigurationDefaults.COMPLETED_REQUESTS_AGENT_ENABLED; +import static it.grid.storm.config.ConfigurationDefaults.COMPLETED_REQUESTS_AGENT_INTERVAL; +import static it.grid.storm.config.ConfigurationDefaults.COMPLETED_REQUESTS_AGENT_PURGE_AGE; +import static it.grid.storm.config.ConfigurationDefaults.COMPLETED_REQUESTS_AGENT_PURGE_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.DB_PASSWORD; +import static it.grid.storm.config.ConfigurationDefaults.DB_POOL_MAX_WAIT_MILLIS; +import static it.grid.storm.config.ConfigurationDefaults.DB_POOL_MIN_IDLE; +import static it.grid.storm.config.ConfigurationDefaults.DB_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.DB_POOL_TEST_ON_BORROW; +import static it.grid.storm.config.ConfigurationDefaults.DB_POOL_TEST_WHILE_IDLE; +import static it.grid.storm.config.ConfigurationDefaults.DB_PORT; +import static it.grid.storm.config.ConfigurationDefaults.DB_PROPERTIES; +import static it.grid.storm.config.ConfigurationDefaults.DB_USERNAME; +import static it.grid.storm.config.ConfigurationDefaults.DEFAULT_FILE_STORAGE_TYPE; +import static it.grid.storm.config.ConfigurationDefaults.DEFAULT_OVERWRITE_MODE; +import static it.grid.storm.config.ConfigurationDefaults.DISKUSAGE_SERVICE_ENABLED; +import static it.grid.storm.config.ConfigurationDefaults.DISKUSAGE_SERVICE_INITIAL_DELAY; +import static it.grid.storm.config.ConfigurationDefaults.DISKUSAGE_SERVICE_PARALLEL_TASKS_ENABLED; +import static it.grid.storm.config.ConfigurationDefaults.DISKUSAGE_SERVICE_TASKS_INTERVAL; +import static it.grid.storm.config.ConfigurationDefaults.ENABLE_WRITE_PERM_ON_DIRECTORY; +import static it.grid.storm.config.ConfigurationDefaults.EXPIRED_SPACES_AGENT_DELAY; +import static it.grid.storm.config.ConfigurationDefaults.EXPIRED_SPACES_AGENT_INTERVAL; +import static it.grid.storm.config.ConfigurationDefaults.EXTRA_SLASHES_FOR_FILE_TURL; +import static it.grid.storm.config.ConfigurationDefaults.EXTRA_SLASHES_FOR_GSIFTP_TURL; +import static it.grid.storm.config.ConfigurationDefaults.EXTRA_SLASHES_FOR_RFIO_TURL; +import static it.grid.storm.config.ConfigurationDefaults.EXTRA_SLASHES_FOR_ROOT_TURL; +import static it.grid.storm.config.ConfigurationDefaults.FILE_DEFAULT_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.FILE_LIFETIME_DEFAULT; +import static it.grid.storm.config.ConfigurationDefaults.GPFS_QUOTA_REFRESH_PERIOD; +import static it.grid.storm.config.ConfigurationDefaults.HEARTHBEAT_PERIOD; +import static it.grid.storm.config.ConfigurationDefaults.HTTP_TURL_PREFIX; +import static it.grid.storm.config.ConfigurationDefaults.INPROGRESS_REQUESTS_AGENT_DELAY; +import static it.grid.storm.config.ConfigurationDefaults.INPROGRESS_REQUESTS_AGENT_INTERVAL; +import static it.grid.storm.config.ConfigurationDefaults.INPROGRESS_REQUESTS_AGENT_PTP_EXPIRATION_TIME; +import static it.grid.storm.config.ConfigurationDefaults.LS_DEFAULT_ALL_LEVEL_RECURSIVE; +import static it.grid.storm.config.ConfigurationDefaults.LS_DEFAULT_NUM_OF_LEVELS; +import static it.grid.storm.config.ConfigurationDefaults.LS_DEFAULT_OFFSET; +import static it.grid.storm.config.ConfigurationDefaults.LS_MAX_NUMBER_OF_ENTRY; +import static it.grid.storm.config.ConfigurationDefaults.MAX_LOOP; +import static it.grid.storm.config.ConfigurationDefaults.PERFORMANCE_GLANCE_TIME_INTERVAL; +import static it.grid.storm.config.ConfigurationDefaults.PERFORMANCE_LOGBOOK_TIME_INTERVAL; +import static it.grid.storm.config.ConfigurationDefaults.PERFORMANCE_MEASURING; +import static it.grid.storm.config.ConfigurationDefaults.PING_VALUES_PROPERTIES_FILENAME; +import static it.grid.storm.config.ConfigurationDefaults.PIN_LIFETIME_DEFAULT; +import static it.grid.storm.config.ConfigurationDefaults.PIN_LIFETIME_MAXIMUM; +import static it.grid.storm.config.ConfigurationDefaults.PTG_SCHEDULER_CORE_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.PTG_SCHEDULER_MAX_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.PTG_SCHEDULER_QUEUE_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.PTG_SKIP_ACL_SETUP; +import static it.grid.storm.config.ConfigurationDefaults.PTP_SCHEDULER_CORE_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.PTP_SCHEDULER_MAX_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.PTP_SCHEDULER_QUEUE_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.REQUESTS_PICKER_AGENT_DELAY; +import static it.grid.storm.config.ConfigurationDefaults.REQUESTS_PICKER_AGENT_INTERVAL; +import static it.grid.storm.config.ConfigurationDefaults.REQUESTS_PICKER_AGENT_MAX_FETCHED_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.REQUESTS_SCHEDULER_CORE_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.REQUESTS_SCHEDULER_MAX_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.REQUESTS_SCHEDULER_QUEUE_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.REST_SERVICES_MAX_QUEUE_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.REST_SERVICES_MAX_THREADS; +import static it.grid.storm.config.ConfigurationDefaults.REST_SERVICES_PORT; +import static it.grid.storm.config.ConfigurationDefaults.SANITY_CHECK_ENABLED; +import static it.grid.storm.config.ConfigurationDefaults.SECURITY_ENABLED; +import static it.grid.storm.config.ConfigurationDefaults.SECURITY_TOKEN; +import static it.grid.storm.config.ConfigurationDefaults.SERVER_POOL_STATUS_CHECK_TIMEOUT; +import static it.grid.storm.config.ConfigurationDefaults.XMLRPC_MAX_QUEUE_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.XMLRPC_MAX_THREADS; +import static it.grid.storm.config.ConfigurationDefaults.XMLRPC_SERVER_PORT; +import static it.grid.storm.config.model.v2.StormProperties.VERSION; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + +import java.io.File; +import java.io.IOException; +import java.net.InetAddress; + +import org.junit.Test; + +import com.google.common.collect.Lists; + +import it.grid.storm.config.converter.StormPropertiesConversionException; +import it.grid.storm.config.model.v2.OverwriteMode; +import it.grid.storm.config.model.v2.StorageType; +import it.grid.storm.namespace.model.Authority; + +public class ConfigurationTest { + + @Test + public void testLoadConfiguration() throws IOException { + + Configuration.init("src/test/resources/storm.properties"); + Configuration config = Configuration.getInstance(); + + assertEquals(VERSION, config.getVersion()); + assertFalse(config.getManagedSrmEndpoints().isEmpty()); + assertEquals(2, config.getManagedSrmEndpoints().size()); + assertEquals("storm-fe01.example", config.getManagedSrmEndpoints().get(0).getServiceHostname()); + assertEquals(8444, config.getManagedSrmEndpoints().get(0).getServicePort()); + assertEquals("storm-fe02.example", config.getManagedSrmEndpoints().get(1).getServiceHostname()); + assertEquals(8445, config.getManagedSrmEndpoints().get(1).getServicePort()); + assertEquals("storm-db.example", config.getDbHostname()); + assertEquals("test", config.getDbUsername()); + assertEquals("secret", config.getDbPassword()); + assertEquals(3308, config.getDbPort()); + assertEquals("test", config.getDbProperties()); + assertEquals(200, config.getDbPoolSize()); + assertEquals(1200, config.getDbPoolMaxWaitMillis()); + assertEquals(50, config.getDbPoolMinIdle()); + assertEquals(false, config.isDbPoolTestOnBorrow()); + assertEquals(false, config.isDbPoolTestWhileIdle()); + assertEquals(9999, config.getRestServicesPort()); + assertEquals(150, config.getRestServicesMaxThreads()); + assertEquals(1500, config.getRestServicesMaxQueueSize()); + assertEquals(9090, config.getXmlRpcServerPort()); + assertEquals(512, config.getXmlrpcMaxThreads()); + assertEquals(2000, config.getXmlrpcMaxQueueSize()); + assertEquals(true, config.isSecurityEnabled()); + assertEquals("ilovejava", config.getSecurityToken()); + assertEquals(true, config.isDiskUsageServiceEnabled()); + assertEquals(true, config.isDiskUsageServiceTasksParallel()); + assertEquals(120, config.getDiskUsageServiceInitialDelay()); + assertEquals(200000, config.getDiskUsageServiceTasksInterval()); + assertEquals(20, config.getInProgressAgentInitialDelay()); + assertEquals(400, config.getInProgressAgentInterval()); + assertEquals(333000, config.getInProgressPtpExpirationTime()); + assertEquals(20, config.getExpiredSpacesAgentInitialDelay()); + assertEquals(400, config.getExpiredSpacesAgentInterval()); + assertEquals(true, config.isCompletedRequestsAgentEnabled()); + assertEquals(20, config.getCompletedRequestsAgentDelay()); + assertEquals(400, config.getCompletedRequestsAgentPeriod()); + assertEquals(1800, config.getCompletedRequestsAgentPurgeSize()); + assertEquals(22200, config.getCompletedRequestsAgentPurgeAge()); + assertEquals(10, config.getCorePoolSize()); + assertEquals(50, config.getMaxPoolSize()); + assertEquals(2000, config.getQueueSize()); + assertEquals(50, config.getPtPCorePoolSize()); + assertEquals(200, config.getPtPMaxPoolSize()); + assertEquals(1000, config.getPtPQueueSize()); + assertEquals(50, config.getPtGCorePoolSize()); + assertEquals(200, config.getPtGMaxPoolSize()); + assertEquals(2000, config.getPtGQueueSize()); + assertEquals(50, config.getBoLCorePoolSize()); + assertEquals(200, config.getBoLMaxPoolSize()); + assertEquals(2000, config.getBoLQueueSize()); + assertEquals(10, config.getRequestsPickerAgentInitialDelay()); + assertEquals(20, config.getRequestsPickerAgentInterval()); + assertEquals(1000, config.getRequestsPickerAgentMaxFetchedSize()); + assertEquals(false, config.isSanityCheckEnabled()); + assertEquals("/file", config.getExtraSlashesForFileTURL()); + assertEquals("/rfio", config.getExtraSlashesForRFIOTURL()); + assertEquals("/root", config.getExtraSlashesForRootTURL()); + assertEquals("/gsiftp", config.getExtraSlashesForGsiFTPTURL()); + assertEquals(true, config.isLsDefaultAllLevelRecursive()); + assertEquals(2, config.getLsDefaultNumOfLevels()); + assertEquals(1, config.getLsDefaultOffset()); + assertEquals(3000, config.getLsMaxNumberOfEntry()); + assertEquals(300000, config.getPinLifetimeDefault()); + assertEquals(18000000, config.getPinLifetimeMaximum()); + assertEquals(true, config.isSkipPtgACLSetup()); + assertEquals(100000, config.getFileDefaultSize()); + assertEquals(300000, config.getFileLifetimeDefault()); + assertEquals("N", config.getDefaultOverwriteMode().name()); + assertEquals("P", config.getDefaultFileStorageType().name()); + assertEquals(true, config.isAutomaticDirectoryCreationEnabled()); + assertEquals(true, config.isDirectoryWritePermOnCreationEnabled()); + assertEquals(true, config.isHearthbeatBookkeepingEnabled()); + assertEquals(true, config.isHearthbeatPerformanceMeasuringEnabled()); + assertEquals(30, config.getHearthbeatPeriod()); + assertEquals(10, config.getHearthbeatPerformanceLogbookTimeInterval()); + assertEquals(10, config.getHearthbeatPerformanceGlanceTimeInterval()); + assertEquals(900, config.getGPFSQuotaRefreshPeriod()); + assertEquals("/", config.getHTTPTURLPrefix()); + assertEquals(20000, config.getServerPoolStatusCheckTimeout()); + assertEquals(10, config.getMaxLoop()); + assertEquals("ping-values.properties", config.getPingValuesPropertiesFilename()); + } + + @Test + public void testLoadEmptyConfiguration() throws IOException { + + Configuration.init("src/test/resources/empty.properties"); + Configuration config = Configuration.getInstance(); + String hostname = InetAddress.getLocalHost().getHostName(); + + assertEquals(VERSION, config.getVersion()); + assertFalse(config.getManagedSrmEndpoints().isEmpty()); + assertEquals(1, config.getManagedSrmEndpoints().size()); + assertEquals(hostname, config.getManagedSrmEndpoints().get(0).getServiceHostname()); + assertEquals(8444, config.getManagedSrmEndpoints().get(0).getServicePort()); + assertEquals(hostname, config.getDbHostname()); + assertEquals(DB_USERNAME, config.getDbUsername()); + assertEquals(DB_PASSWORD, config.getDbPassword()); + assertEquals(DB_PORT, config.getDbPort()); + assertEquals(DB_PROPERTIES, config.getDbProperties()); + assertEquals(DB_POOL_SIZE, config.getDbPoolSize()); + assertEquals(DB_POOL_MAX_WAIT_MILLIS, config.getDbPoolMaxWaitMillis()); + assertEquals(DB_POOL_MIN_IDLE, config.getDbPoolMinIdle()); + assertEquals(DB_POOL_TEST_ON_BORROW, config.isDbPoolTestOnBorrow()); + assertEquals(DB_POOL_TEST_WHILE_IDLE, config.isDbPoolTestWhileIdle()); + assertEquals(REST_SERVICES_PORT, config.getRestServicesPort()); + assertEquals(REST_SERVICES_MAX_THREADS, config.getRestServicesMaxThreads()); + assertEquals(REST_SERVICES_MAX_QUEUE_SIZE, config.getRestServicesMaxQueueSize()); + assertEquals(XMLRPC_SERVER_PORT, config.getXmlRpcServerPort()); + assertEquals(XMLRPC_MAX_THREADS, config.getXmlrpcMaxThreads()); + assertEquals(XMLRPC_MAX_QUEUE_SIZE, config.getXmlrpcMaxQueueSize()); + assertEquals(SECURITY_ENABLED, config.isSecurityEnabled()); + assertEquals(SECURITY_TOKEN, config.getSecurityToken()); + assertEquals(DISKUSAGE_SERVICE_ENABLED, config.isDiskUsageServiceEnabled()); + assertEquals(DISKUSAGE_SERVICE_PARALLEL_TASKS_ENABLED, + config.isDiskUsageServiceTasksParallel()); + assertEquals(DISKUSAGE_SERVICE_INITIAL_DELAY, config.getDiskUsageServiceInitialDelay()); + assertEquals(DISKUSAGE_SERVICE_TASKS_INTERVAL, config.getDiskUsageServiceTasksInterval()); + assertEquals(INPROGRESS_REQUESTS_AGENT_DELAY, config.getInProgressAgentInitialDelay()); + assertEquals(INPROGRESS_REQUESTS_AGENT_INTERVAL, config.getInProgressAgentInterval()); + assertEquals(INPROGRESS_REQUESTS_AGENT_PTP_EXPIRATION_TIME, + config.getInProgressPtpExpirationTime()); + assertEquals(EXPIRED_SPACES_AGENT_DELAY, config.getExpiredSpacesAgentInitialDelay()); + assertEquals(EXPIRED_SPACES_AGENT_INTERVAL, config.getExpiredSpacesAgentInterval()); + assertEquals(COMPLETED_REQUESTS_AGENT_ENABLED, config.isCompletedRequestsAgentEnabled()); + assertEquals(COMPLETED_REQUESTS_AGENT_DELAY, config.getCompletedRequestsAgentDelay()); + assertEquals(COMPLETED_REQUESTS_AGENT_INTERVAL, config.getCompletedRequestsAgentPeriod()); + assertEquals(COMPLETED_REQUESTS_AGENT_PURGE_SIZE, config.getCompletedRequestsAgentPurgeSize()); + assertEquals(COMPLETED_REQUESTS_AGENT_PURGE_AGE, config.getCompletedRequestsAgentPurgeAge()); + assertEquals(REQUESTS_SCHEDULER_CORE_POOL_SIZE, config.getCorePoolSize()); + assertEquals(REQUESTS_SCHEDULER_MAX_POOL_SIZE, config.getMaxPoolSize()); + assertEquals(REQUESTS_SCHEDULER_QUEUE_SIZE, config.getQueueSize()); + assertEquals(PTP_SCHEDULER_CORE_POOL_SIZE, config.getPtPCorePoolSize()); + assertEquals(PTP_SCHEDULER_MAX_POOL_SIZE, config.getPtPMaxPoolSize()); + assertEquals(PTP_SCHEDULER_QUEUE_SIZE, config.getPtPQueueSize()); + assertEquals(PTG_SCHEDULER_CORE_POOL_SIZE, config.getPtGCorePoolSize()); + assertEquals(PTG_SCHEDULER_MAX_POOL_SIZE, config.getPtGMaxPoolSize()); + assertEquals(PTG_SCHEDULER_QUEUE_SIZE, config.getPtGQueueSize()); + assertEquals(BOL_SCHEDULER_CORE_POOL_SIZE, config.getBoLCorePoolSize()); + assertEquals(BOL_SCHEDULER_MAX_POOL_SIZE, config.getBoLMaxPoolSize()); + assertEquals(BOL_SCHEDULER_QUEUE_SIZE, config.getBoLQueueSize()); + assertEquals(REQUESTS_PICKER_AGENT_DELAY, config.getRequestsPickerAgentInitialDelay()); + assertEquals(REQUESTS_PICKER_AGENT_INTERVAL, config.getRequestsPickerAgentInterval()); + assertEquals(REQUESTS_PICKER_AGENT_MAX_FETCHED_SIZE, + config.getRequestsPickerAgentMaxFetchedSize()); + assertEquals(SANITY_CHECK_ENABLED, config.isSanityCheckEnabled()); + assertEquals(EXTRA_SLASHES_FOR_FILE_TURL, config.getExtraSlashesForFileTURL()); + assertEquals(EXTRA_SLASHES_FOR_RFIO_TURL, config.getExtraSlashesForRFIOTURL()); + assertEquals(EXTRA_SLASHES_FOR_ROOT_TURL, config.getExtraSlashesForRootTURL()); + assertEquals(EXTRA_SLASHES_FOR_GSIFTP_TURL, config.getExtraSlashesForGsiFTPTURL()); + assertEquals(LS_DEFAULT_ALL_LEVEL_RECURSIVE, config.isLsDefaultAllLevelRecursive()); + assertEquals(LS_DEFAULT_NUM_OF_LEVELS, config.getLsDefaultNumOfLevels()); + assertEquals(LS_DEFAULT_OFFSET, config.getLsDefaultOffset()); + assertEquals(LS_MAX_NUMBER_OF_ENTRY, config.getLsMaxNumberOfEntry()); + assertEquals(PIN_LIFETIME_DEFAULT, config.getPinLifetimeDefault()); + assertEquals(PIN_LIFETIME_MAXIMUM, config.getPinLifetimeMaximum()); + assertEquals(PTG_SKIP_ACL_SETUP, config.isSkipPtgACLSetup()); + assertEquals(FILE_DEFAULT_SIZE, config.getFileDefaultSize()); + assertEquals(FILE_LIFETIME_DEFAULT, config.getFileLifetimeDefault()); + assertEquals(DEFAULT_OVERWRITE_MODE, config.getDefaultOverwriteMode().name()); + assertEquals(DEFAULT_FILE_STORAGE_TYPE, config.getDefaultFileStorageType().name()); + assertEquals(AUTOMATIC_DIRECTORY_CREATION, config.isAutomaticDirectoryCreationEnabled()); + assertEquals(ENABLE_WRITE_PERM_ON_DIRECTORY, config.isDirectoryWritePermOnCreationEnabled()); + assertEquals(BOOK_KEEPING_ENABLED, config.isHearthbeatBookkeepingEnabled()); + assertEquals(PERFORMANCE_MEASURING, config.isHearthbeatPerformanceMeasuringEnabled()); + assertEquals(HEARTHBEAT_PERIOD, config.getHearthbeatPeriod()); + assertEquals(PERFORMANCE_LOGBOOK_TIME_INTERVAL, + config.getHearthbeatPerformanceLogbookTimeInterval()); + assertEquals(PERFORMANCE_GLANCE_TIME_INTERVAL, + config.getHearthbeatPerformanceGlanceTimeInterval()); + assertEquals(GPFS_QUOTA_REFRESH_PERIOD, config.getGPFSQuotaRefreshPeriod()); + assertEquals(HTTP_TURL_PREFIX, config.getHTTPTURLPrefix()); + assertEquals(SERVER_POOL_STATUS_CHECK_TIMEOUT, config.getServerPoolStatusCheckTimeout()); + assertEquals(MAX_LOOP, config.getMaxLoop()); + assertEquals(PING_VALUES_PROPERTIES_FILENAME, config.getPingValuesPropertiesFilename()); + } + + @Test + public void testLoadedConfigurationFromOldProperties() + throws IOException, StormPropertiesConversionException { + + Configuration.init("src/test/resources/v1.properties"); + Configuration config = Configuration.getInstance(); + + // SRM service + assertEquals("fe.example.org", config.getSrmServiceHostname()); + assertEquals(8444, config.getSrmServicePort()); + assertEquals(config.getManagedSrmEndpoints(), + Lists.newArrayList(new Authority("fe.example.org", 8444), + new Authority("fe-01.example.org", 8444), new Authority("fe-02.example.org", 8444))); + // database + assertEquals("storm", config.getDbUsername()); + assertEquals("my-secret-password", config.getDbPassword()); + assertEquals("prop=1", config.getDbProperties()); + // not converted properties + assertEquals(DB_PORT, config.getDbPort()); + assertEquals(DB_POOL_SIZE, config.getDbPoolSize()); + assertEquals(DB_POOL_MIN_IDLE, config.getDbPoolMinIdle()); + assertEquals(DB_POOL_MAX_WAIT_MILLIS, config.getDbPoolMaxWaitMillis()); + assertEquals(DB_POOL_TEST_ON_BORROW, config.isDbPoolTestOnBorrow()); + assertEquals(DB_POOL_TEST_WHILE_IDLE, config.isDbPoolTestWhileIdle()); + // REST + assertEquals(9999, config.getRestServicesPort()); + assertEquals(512, config.getRestServicesMaxThreads()); + assertEquals(2000, config.getRestServicesMaxQueueSize()); + // sanity check + assertEquals(true, config.isSanityCheckEnabled()); + // xmlrpc + assertEquals(8081, config.getXmlRpcServerPort()); + assertEquals(512, config.getXmlrpcMaxThreads()); + assertEquals(2000, config.getXmlrpcMaxQueueSize()); + assertEquals(true, config.isSecurityEnabled()); + assertEquals("ilovejava", config.getSecurityToken()); + // disk usage + assertEquals(true, config.isDiskUsageServiceEnabled()); + assertEquals(60, config.getDiskUsageServiceInitialDelay()); + assertEquals(360, config.getDiskUsageServiceTasksInterval()); + assertEquals(true, config.isDiskUsageServiceTasksParallel()); + // + assertEquals(10, config.getExpiredSpacesAgentInitialDelay()); + assertEquals(300, config.getExpiredSpacesAgentInterval()); + // + assertEquals(2000000, config.getFileDefaultSize()); + assertEquals(300000, config.getFileLifetimeDefault()); + assertEquals(310000, config.getPinLifetimeDefault()); + assertEquals(1900000, config.getPinLifetimeMaximum()); + + assertEquals(15, config.getRequestsPickerAgentInitialDelay()); + assertEquals(25, config.getRequestsPickerAgentInterval()); + assertEquals(150, config.getRequestsPickerAgentMaxFetchedSize()); + // LS + assertEquals(3000, config.getLsMaxNumberOfEntry()); + assertEquals(true, config.isLsDefaultAllLevelRecursive()); + assertEquals(3, config.getLsDefaultNumOfLevels()); + assertEquals(2, config.getLsDefaultOffset()); + // + assertEquals(60, config.getPtPCorePoolSize()); + assertEquals(300, config.getPtPMaxPoolSize()); + assertEquals(2000, config.getPtPQueueSize()); + + assertEquals(70, config.getPtGCorePoolSize()); + assertEquals(400, config.getPtGMaxPoolSize()); + assertEquals(3000, config.getPtGQueueSize()); + + assertEquals(40, config.getBoLCorePoolSize()); + assertEquals(100, config.getBoLMaxPoolSize()); + assertEquals(1000, config.getBoLQueueSize()); + + assertEquals(20, config.getCorePoolSize()); + assertEquals(60, config.getMaxPoolSize()); + assertEquals(3000, config.getQueueSize()); + + assertEquals(true, config.isAutomaticDirectoryCreationEnabled()); + assertEquals(true, config.isDirectoryWritePermOnCreationEnabled()); + + assertEquals(OverwriteMode.N, config.getDefaultOverwriteMode()); + assertEquals(StorageType.P, config.getDefaultFileStorageType()); + + assertEquals(false, config.isCompletedRequestsAgentEnabled()); + assertEquals(100, config.getCompletedRequestsAgentDelay()); + assertEquals(600, config.getCompletedRequestsAgentPeriod()); + assertEquals(1000, config.getCompletedRequestsAgentPurgeSize()); + assertEquals(7200, config.getCompletedRequestsAgentPurgeAge()); + + assertEquals(60, config.getInProgressAgentInitialDelay()); + assertEquals(600, config.getInProgressAgentInterval()); + assertEquals(7000, config.getInProgressPtpExpirationTime()); + + assertEquals("/file", config.getExtraSlashesForFileTURL()); + assertEquals("/rfio", config.getExtraSlashesForRFIOTURL()); + assertEquals("/gsiftp", config.getExtraSlashesForGsiFTPTURL()); + assertEquals("/root", config.getExtraSlashesForRootTURL()); + + assertEquals("ping-values.properties", config.getPingValuesPropertiesFilename()); + + assertEquals(30, config.getHearthbeatPeriod()); + assertEquals(10, config.getHearthbeatPerformanceGlanceTimeInterval()); + assertEquals(10, config.getHearthbeatPerformanceLogbookTimeInterval()); + assertEquals(true, config.isHearthbeatBookkeepingEnabled()); + assertEquals(true, config.isHearthbeatPerformanceMeasuringEnabled()); + + assertEquals(10, config.getMaxLoop()); + + assertEquals(900, config.getGPFSQuotaRefreshPeriod()); + + assertEquals(20000, config.getServerPoolStatusCheckTimeout()); + + assertEquals(false, config.isSkipPtgACLSetup()); + + assertEquals("/", config.getHTTPTURLPrefix()); + + // check new file created + File exported = new File("src/test/resources/v1.properties.new"); + assertEquals(exported.exists(), true); + // clear file + exported.delete(); + + } + +} diff --git a/src/test/java/it/grid/storm/config/model/StormPropertiesTest.java b/src/test/java/it/grid/storm/config/model/StormPropertiesTest.java new file mode 100644 index 000000000..7ef8e284c --- /dev/null +++ b/src/test/java/it/grid/storm/config/model/StormPropertiesTest.java @@ -0,0 +1,343 @@ +package it.grid.storm.config.model; + +import static it.grid.storm.config.ConfigurationDefaults.AUTOMATIC_DIRECTORY_CREATION; +import static it.grid.storm.config.ConfigurationDefaults.BOL_SCHEDULER_CORE_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.BOL_SCHEDULER_MAX_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.BOL_SCHEDULER_QUEUE_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.BOOK_KEEPING_ENABLED; +import static it.grid.storm.config.ConfigurationDefaults.COMPLETED_REQUESTS_AGENT_DELAY; +import static it.grid.storm.config.ConfigurationDefaults.COMPLETED_REQUESTS_AGENT_ENABLED; +import static it.grid.storm.config.ConfigurationDefaults.COMPLETED_REQUESTS_AGENT_INTERVAL; +import static it.grid.storm.config.ConfigurationDefaults.COMPLETED_REQUESTS_AGENT_PURGE_AGE; +import static it.grid.storm.config.ConfigurationDefaults.COMPLETED_REQUESTS_AGENT_PURGE_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.DB_PASSWORD; +import static it.grid.storm.config.ConfigurationDefaults.DB_POOL_MAX_WAIT_MILLIS; +import static it.grid.storm.config.ConfigurationDefaults.DB_POOL_MIN_IDLE; +import static it.grid.storm.config.ConfigurationDefaults.DB_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.DB_POOL_TEST_ON_BORROW; +import static it.grid.storm.config.ConfigurationDefaults.DB_POOL_TEST_WHILE_IDLE; +import static it.grid.storm.config.ConfigurationDefaults.DB_PORT; +import static it.grid.storm.config.ConfigurationDefaults.DB_PROPERTIES; +import static it.grid.storm.config.ConfigurationDefaults.DB_USERNAME; +import static it.grid.storm.config.ConfigurationDefaults.DEFAULT_FILE_STORAGE_TYPE; +import static it.grid.storm.config.ConfigurationDefaults.DEFAULT_OVERWRITE_MODE; +import static it.grid.storm.config.ConfigurationDefaults.DEFAULT_QUALITY_LEVEL; +import static it.grid.storm.config.ConfigurationDefaults.DEFAULT_SITENAME; +import static it.grid.storm.config.ConfigurationDefaults.DISKUSAGE_SERVICE_ENABLED; +import static it.grid.storm.config.ConfigurationDefaults.DISKUSAGE_SERVICE_INITIAL_DELAY; +import static it.grid.storm.config.ConfigurationDefaults.DISKUSAGE_SERVICE_PARALLEL_TASKS_ENABLED; +import static it.grid.storm.config.ConfigurationDefaults.DISKUSAGE_SERVICE_TASKS_INTERVAL; +import static it.grid.storm.config.ConfigurationDefaults.ENABLE_WRITE_PERM_ON_DIRECTORY; +import static it.grid.storm.config.ConfigurationDefaults.EXPIRED_SPACES_AGENT_DELAY; +import static it.grid.storm.config.ConfigurationDefaults.EXPIRED_SPACES_AGENT_INTERVAL; +import static it.grid.storm.config.ConfigurationDefaults.EXTRA_SLASHES_FOR_FILE_TURL; +import static it.grid.storm.config.ConfigurationDefaults.EXTRA_SLASHES_FOR_GSIFTP_TURL; +import static it.grid.storm.config.ConfigurationDefaults.EXTRA_SLASHES_FOR_RFIO_TURL; +import static it.grid.storm.config.ConfigurationDefaults.EXTRA_SLASHES_FOR_ROOT_TURL; +import static it.grid.storm.config.ConfigurationDefaults.FILE_DEFAULT_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.FILE_LIFETIME_DEFAULT; +import static it.grid.storm.config.ConfigurationDefaults.GPFS_QUOTA_REFRESH_PERIOD; +import static it.grid.storm.config.ConfigurationDefaults.HEARTHBEAT_PERIOD; +import static it.grid.storm.config.ConfigurationDefaults.HTTP_TURL_PREFIX; +import static it.grid.storm.config.ConfigurationDefaults.INPROGRESS_REQUESTS_AGENT_DELAY; +import static it.grid.storm.config.ConfigurationDefaults.INPROGRESS_REQUESTS_AGENT_INTERVAL; +import static it.grid.storm.config.ConfigurationDefaults.INPROGRESS_REQUESTS_AGENT_PTP_EXPIRATION_TIME; +import static it.grid.storm.config.ConfigurationDefaults.LS_DEFAULT_ALL_LEVEL_RECURSIVE; +import static it.grid.storm.config.ConfigurationDefaults.LS_DEFAULT_NUM_OF_LEVELS; +import static it.grid.storm.config.ConfigurationDefaults.LS_DEFAULT_OFFSET; +import static it.grid.storm.config.ConfigurationDefaults.LS_MAX_NUMBER_OF_ENTRY; +import static it.grid.storm.config.ConfigurationDefaults.MAX_LOOP; +import static it.grid.storm.config.ConfigurationDefaults.PERFORMANCE_GLANCE_TIME_INTERVAL; +import static it.grid.storm.config.ConfigurationDefaults.PERFORMANCE_LOGBOOK_TIME_INTERVAL; +import static it.grid.storm.config.ConfigurationDefaults.PERFORMANCE_MEASURING; +import static it.grid.storm.config.ConfigurationDefaults.PING_VALUES_PROPERTIES_FILENAME; +import static it.grid.storm.config.ConfigurationDefaults.PIN_LIFETIME_DEFAULT; +import static it.grid.storm.config.ConfigurationDefaults.PIN_LIFETIME_MAXIMUM; +import static it.grid.storm.config.ConfigurationDefaults.PTG_SCHEDULER_CORE_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.PTG_SCHEDULER_MAX_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.PTG_SCHEDULER_QUEUE_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.PTG_SKIP_ACL_SETUP; +import static it.grid.storm.config.ConfigurationDefaults.PTP_SCHEDULER_CORE_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.PTP_SCHEDULER_MAX_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.PTP_SCHEDULER_QUEUE_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.REQUESTS_PICKER_AGENT_DELAY; +import static it.grid.storm.config.ConfigurationDefaults.REQUESTS_PICKER_AGENT_INTERVAL; +import static it.grid.storm.config.ConfigurationDefaults.REQUESTS_PICKER_AGENT_MAX_FETCHED_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.REQUESTS_SCHEDULER_CORE_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.REQUESTS_SCHEDULER_MAX_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.REQUESTS_SCHEDULER_QUEUE_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.REST_SERVICES_MAX_QUEUE_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.REST_SERVICES_MAX_THREADS; +import static it.grid.storm.config.ConfigurationDefaults.REST_SERVICES_PORT; +import static it.grid.storm.config.ConfigurationDefaults.SANITY_CHECK_ENABLED; +import static it.grid.storm.config.ConfigurationDefaults.SECURITY_ENABLED; +import static it.grid.storm.config.ConfigurationDefaults.SECURITY_TOKEN; +import static it.grid.storm.config.ConfigurationDefaults.SERVER_POOL_STATUS_CHECK_TIMEOUT; +import static it.grid.storm.config.ConfigurationDefaults.XMLRPC_MAX_QUEUE_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.XMLRPC_MAX_THREADS; +import static it.grid.storm.config.ConfigurationDefaults.XMLRPC_SERVER_PORT; +import static it.grid.storm.config.model.v2.StormProperties.VERSION; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; +import java.io.PrintWriter; +import java.net.InetAddress; + +import org.junit.Before; +import org.junit.Test; + +import com.fasterxml.jackson.core.JsonParseException; +import com.fasterxml.jackson.databind.JsonMappingException; +import com.fasterxml.jackson.databind.MapperFeature; +import com.fasterxml.jackson.dataformat.javaprop.JavaPropsMapper; + +import it.grid.storm.config.model.v2.QualityLevel; +import it.grid.storm.config.model.v2.StormProperties; + +public class StormPropertiesTest { + + private JavaPropsMapper mapper; + private ClassLoader classLoader; + + @Before + public void init() { + + mapper = new JavaPropsMapper(); + mapper.enable(MapperFeature.ACCEPT_CASE_INSENSITIVE_ENUMS); + classLoader = getClass().getClassLoader(); + } + + @Test + public void testLoadingConfigurationFromFullPropertiesV2() + throws JsonParseException, JsonMappingException, IOException { + + File file = new File(classLoader.getResource("storm.properties").getFile()); + StormProperties properties = mapper.readValue(file, StormProperties.class); + System.out.println(properties); + assertEquals(StormProperties.VERSION, properties.getVersion()); + assertFalse(properties.getSrmEndpoints().isEmpty()); + assertEquals(2, properties.getSrmEndpoints().size()); + assertEquals("storm-fe01.example", properties.getSrmEndpoints().get(0).getHost()); + assertEquals(8444, properties.getSrmEndpoints().get(0).getPort()); + assertEquals("storm-fe02.example", properties.getSrmEndpoints().get(1).getHost()); + assertEquals(8445, properties.getSrmEndpoints().get(1).getPort()); + assertEquals("storm-db.example", properties.getDb().getHostname()); + assertEquals("test", properties.getDb().getUsername()); + assertEquals("secret", properties.getDb().getPassword()); + assertEquals(3308, properties.getDb().getPort()); + assertEquals("test", properties.getDb().getProperties()); + assertEquals(200, properties.getDb().getPool().getSize()); + assertEquals(1200, properties.getDb().getPool().getMaxWaitMillis()); + assertEquals(false, properties.getDb().getPool().isTestOnBorrow()); + assertEquals(false, properties.getDb().getPool().isTestWhileIdle()); + assertEquals(9999, properties.getRest().getPort()); + assertEquals(150, properties.getRest().getMaxThreads()); + assertEquals(1500, properties.getRest().getMaxQueueSize()); + assertEquals(9090, properties.getXmlrpc().getPort()); + assertEquals(512, properties.getXmlrpc().getMaxThreads()); + assertEquals(2000, properties.getXmlrpc().getMaxQueueSize()); + assertEquals(true, properties.getSecurity().isEnabled()); + assertEquals("ilovejava", properties.getSecurity().getToken()); + assertEquals(true, properties.getDu().isEnabled()); + assertEquals(true, properties.getDu().isParallelTasksEnabled()); + assertEquals(120, properties.getDu().getInitialDelay()); + assertEquals(200000, properties.getDu().getTasksInterval()); + assertEquals(20, properties.getInprogressRequestsAgent().getDelay()); + assertEquals(400, properties.getInprogressRequestsAgent().getInterval()); + assertEquals(333000, properties.getInprogressRequestsAgent().getPtpExpirationTime()); + assertEquals(20, properties.getExpiredSpacesAgent().getDelay()); + assertEquals(400, properties.getExpiredSpacesAgent().getInterval()); + assertEquals(true, properties.getCompletedRequestsAgent().isEnabled()); + assertEquals(20, properties.getCompletedRequestsAgent().getDelay()); + assertEquals(400, properties.getCompletedRequestsAgent().getInterval()); + assertEquals(1800, properties.getCompletedRequestsAgent().getPurgeSize()); + assertEquals(22200, properties.getCompletedRequestsAgent().getPurgeAge()); + assertEquals(10, properties.getRequestsScheduler().getCorePoolSize()); + assertEquals(50, properties.getRequestsScheduler().getMaxPoolSize()); + assertEquals(2000, properties.getRequestsScheduler().getQueueSize()); + assertEquals(50, properties.getPtpScheduler().getCorePoolSize()); + assertEquals(200, properties.getPtpScheduler().getMaxPoolSize()); + assertEquals(1000, properties.getPtpScheduler().getQueueSize()); + assertEquals(50, properties.getPtgScheduler().getCorePoolSize()); + assertEquals(200, properties.getPtgScheduler().getMaxPoolSize()); + assertEquals(2000, properties.getPtgScheduler().getQueueSize()); + assertEquals(50, properties.getBolScheduler().getCorePoolSize()); + assertEquals(200, properties.getBolScheduler().getMaxPoolSize()); + assertEquals(2000, properties.getBolScheduler().getQueueSize()); + assertEquals(10, properties.getRequestsPickerAgent().getDelay()); + assertEquals(20, properties.getRequestsPickerAgent().getInterval()); + assertEquals(1000, properties.getRequestsPickerAgent().getMaxFetchedSize()); + assertEquals(false, properties.isSanityChecksEnabled()); + assertEquals("/file", properties.getExtraslashes().getFile()); + assertEquals("/rfio", properties.getExtraslashes().getRfio()); + assertEquals("/root", properties.getExtraslashes().getRoot()); + assertEquals("/gsiftp", properties.getExtraslashes().getGsiftp()); + assertEquals(true, properties.getSynchLs().isDefaultAllLevelRecursive()); + assertEquals(2, properties.getSynchLs().getDefaultNumLevels()); + assertEquals(1, properties.getSynchLs().getDefaultOffset()); + assertEquals(3000, properties.getSynchLs().getMaxEntries()); + assertEquals(300000, properties.getPinlifetime().getDefaultValue()); + assertEquals(18000000, properties.getPinlifetime().getMaximum()); + assertEquals(true, properties.isSkipPtgAclSetup()); + assertEquals(100000, properties.getFiles().getDefaultSize()); + assertEquals(300000, properties.getFiles().getDefaultLifetime()); + assertEquals("N", properties.getFiles().getDefaultOverwrite()); + assertEquals("P", properties.getFiles().getDefaultStoragetype()); + assertEquals(true, properties.getDirectories().isEnableAutomaticCreation()); + assertEquals(true, properties.getDirectories().isEnableWritepermOnCreation()); + assertEquals(true, properties.getHearthbeat().isBookkeepingEnabled()); + assertEquals(true, properties.getHearthbeat().isPerformanceMeasuringEnabled()); + assertEquals(30, properties.getHearthbeat().getPeriod()); + assertEquals(10, properties.getHearthbeat().getPerformanceLogbookTimeInterval()); + assertEquals(10, properties.getHearthbeat().getPerformanceGlanceTimeInterval()); + assertEquals(900, properties.getInfoQuotaRefreshPeriod()); + assertEquals("/", properties.getHttpTurlPrefix()); + assertEquals(20000, properties.getServerPoolStatusCheckTimeout()); + assertEquals(10, properties.getAbortMaxloop()); + assertEquals("ping-values.properties", properties.getPingPropertiesFilename()); + assertEquals("StoRM test", properties.getSite().getName()); + assertEquals(QualityLevel.PRODUCTION, properties.getSite().getQualityLevel()); + } + + @Test + public void testDefaultConfigurationStartingFromEmptyFile() + throws JsonParseException, JsonMappingException, IOException { + + String hostname = InetAddress.getLocalHost().getHostName(); + File file = new File(classLoader.getResource("empty.properties").getFile()); + StormProperties properties = mapper.readValue(file, StormProperties.class); + assertEquals(VERSION, properties.getVersion()); + assertFalse(properties.getSrmEndpoints().isEmpty()); + assertEquals(1, properties.getSrmEndpoints().size()); + assertEquals(hostname, properties.getSrmEndpoints().get(0).getHost()); + assertEquals(8444, properties.getSrmEndpoints().get(0).getPort()); + assertEquals(hostname, properties.getDb().getHostname()); + assertEquals(DB_USERNAME, properties.getDb().getUsername()); + assertEquals(DB_PASSWORD, properties.getDb().getPassword()); + assertEquals(DB_PORT, properties.getDb().getPort()); + assertEquals(DB_PROPERTIES, properties.getDb().getProperties()); + assertEquals(DB_POOL_SIZE, properties.getDb().getPool().getSize()); + assertEquals(DB_POOL_MAX_WAIT_MILLIS, properties.getDb().getPool().getMaxWaitMillis()); + assertEquals(DB_POOL_MIN_IDLE, properties.getDb().getPool().getMinIdle()); + assertEquals(DB_POOL_TEST_ON_BORROW, properties.getDb().getPool().isTestOnBorrow()); + assertEquals(DB_POOL_TEST_WHILE_IDLE, properties.getDb().getPool().isTestWhileIdle()); + assertEquals(REST_SERVICES_PORT, properties.getRest().getPort()); + assertEquals(REST_SERVICES_MAX_THREADS, properties.getRest().getMaxThreads()); + assertEquals(REST_SERVICES_MAX_QUEUE_SIZE, properties.getRest().getMaxQueueSize()); + assertEquals(XMLRPC_SERVER_PORT, properties.getXmlrpc().getPort()); + assertEquals(XMLRPC_MAX_THREADS, properties.getXmlrpc().getMaxThreads()); + assertEquals(XMLRPC_MAX_QUEUE_SIZE, properties.getXmlrpc().getMaxQueueSize()); + assertEquals(SECURITY_ENABLED, properties.getSecurity().isEnabled()); + assertEquals(SECURITY_TOKEN, properties.getSecurity().getToken()); + assertEquals(DISKUSAGE_SERVICE_ENABLED, properties.getDu().isEnabled()); + assertEquals(DISKUSAGE_SERVICE_PARALLEL_TASKS_ENABLED, + properties.getDu().isParallelTasksEnabled()); + assertEquals(DISKUSAGE_SERVICE_INITIAL_DELAY, properties.getDu().getInitialDelay()); + assertEquals(DISKUSAGE_SERVICE_TASKS_INTERVAL, properties.getDu().getTasksInterval()); + assertEquals(INPROGRESS_REQUESTS_AGENT_DELAY, + properties.getInprogressRequestsAgent().getDelay()); + assertEquals(INPROGRESS_REQUESTS_AGENT_INTERVAL, + properties.getInprogressRequestsAgent().getInterval()); + assertEquals(INPROGRESS_REQUESTS_AGENT_PTP_EXPIRATION_TIME, + properties.getInprogressRequestsAgent().getPtpExpirationTime()); + assertEquals(EXPIRED_SPACES_AGENT_DELAY, properties.getExpiredSpacesAgent().getDelay()); + assertEquals(EXPIRED_SPACES_AGENT_INTERVAL, properties.getExpiredSpacesAgent().getInterval()); + assertEquals(COMPLETED_REQUESTS_AGENT_ENABLED, + properties.getCompletedRequestsAgent().isEnabled()); + assertEquals(COMPLETED_REQUESTS_AGENT_DELAY, properties.getCompletedRequestsAgent().getDelay()); + assertEquals(COMPLETED_REQUESTS_AGENT_INTERVAL, + properties.getCompletedRequestsAgent().getInterval()); + assertEquals(COMPLETED_REQUESTS_AGENT_PURGE_SIZE, + properties.getCompletedRequestsAgent().getPurgeSize()); + assertEquals(COMPLETED_REQUESTS_AGENT_PURGE_AGE, + properties.getCompletedRequestsAgent().getPurgeAge()); + assertEquals(REQUESTS_SCHEDULER_CORE_POOL_SIZE, + properties.getRequestsScheduler().getCorePoolSize()); + assertEquals(REQUESTS_SCHEDULER_MAX_POOL_SIZE, + properties.getRequestsScheduler().getMaxPoolSize()); + assertEquals(REQUESTS_SCHEDULER_QUEUE_SIZE, properties.getRequestsScheduler().getQueueSize()); + assertEquals(PTP_SCHEDULER_CORE_POOL_SIZE, properties.getPtpScheduler().getCorePoolSize()); + assertEquals(PTP_SCHEDULER_MAX_POOL_SIZE, properties.getPtpScheduler().getMaxPoolSize()); + assertEquals(PTP_SCHEDULER_QUEUE_SIZE, properties.getPtpScheduler().getQueueSize()); + assertEquals(PTG_SCHEDULER_CORE_POOL_SIZE, properties.getPtgScheduler().getCorePoolSize()); + assertEquals(PTG_SCHEDULER_MAX_POOL_SIZE, properties.getPtgScheduler().getMaxPoolSize()); + assertEquals(PTG_SCHEDULER_QUEUE_SIZE, properties.getPtgScheduler().getQueueSize()); + assertEquals(BOL_SCHEDULER_CORE_POOL_SIZE, properties.getBolScheduler().getCorePoolSize()); + assertEquals(BOL_SCHEDULER_MAX_POOL_SIZE, properties.getBolScheduler().getMaxPoolSize()); + assertEquals(BOL_SCHEDULER_QUEUE_SIZE, properties.getBolScheduler().getQueueSize()); + assertEquals(REQUESTS_PICKER_AGENT_DELAY, properties.getRequestsPickerAgent().getDelay()); + assertEquals(REQUESTS_PICKER_AGENT_INTERVAL, properties.getRequestsPickerAgent().getInterval()); + assertEquals(REQUESTS_PICKER_AGENT_MAX_FETCHED_SIZE, + properties.getRequestsPickerAgent().getMaxFetchedSize()); + assertEquals(SANITY_CHECK_ENABLED, properties.isSanityChecksEnabled()); + assertEquals(EXTRA_SLASHES_FOR_FILE_TURL, properties.getExtraslashes().getFile()); + assertEquals(EXTRA_SLASHES_FOR_RFIO_TURL, properties.getExtraslashes().getRfio()); + assertEquals(EXTRA_SLASHES_FOR_ROOT_TURL, properties.getExtraslashes().getRoot()); + assertEquals(EXTRA_SLASHES_FOR_GSIFTP_TURL, properties.getExtraslashes().getGsiftp()); + assertEquals(LS_DEFAULT_ALL_LEVEL_RECURSIVE, + properties.getSynchLs().isDefaultAllLevelRecursive()); + assertEquals(LS_DEFAULT_NUM_OF_LEVELS, properties.getSynchLs().getDefaultNumLevels()); + assertEquals(LS_DEFAULT_OFFSET, properties.getSynchLs().getDefaultOffset()); + assertEquals(LS_MAX_NUMBER_OF_ENTRY, properties.getSynchLs().getMaxEntries()); + assertEquals(PIN_LIFETIME_DEFAULT, properties.getPinlifetime().getDefaultValue()); + assertEquals(PIN_LIFETIME_MAXIMUM, properties.getPinlifetime().getMaximum()); + assertEquals(PTG_SKIP_ACL_SETUP, properties.isSkipPtgAclSetup()); + assertEquals(FILE_DEFAULT_SIZE, properties.getFiles().getDefaultSize()); + assertEquals(FILE_LIFETIME_DEFAULT, properties.getFiles().getDefaultLifetime()); + assertEquals(DEFAULT_OVERWRITE_MODE, properties.getFiles().getDefaultOverwrite()); + assertEquals(DEFAULT_FILE_STORAGE_TYPE, properties.getFiles().getDefaultStoragetype()); + assertEquals(AUTOMATIC_DIRECTORY_CREATION, + properties.getDirectories().isEnableAutomaticCreation()); + assertEquals(ENABLE_WRITE_PERM_ON_DIRECTORY, + properties.getDirectories().isEnableWritepermOnCreation()); + assertEquals(BOOK_KEEPING_ENABLED, properties.getHearthbeat().isBookkeepingEnabled()); + assertEquals(PERFORMANCE_MEASURING, properties.getHearthbeat().isPerformanceMeasuringEnabled()); + assertEquals(HEARTHBEAT_PERIOD, properties.getHearthbeat().getPeriod()); + assertEquals(PERFORMANCE_LOGBOOK_TIME_INTERVAL, + properties.getHearthbeat().getPerformanceLogbookTimeInterval()); + assertEquals(PERFORMANCE_GLANCE_TIME_INTERVAL, + properties.getHearthbeat().getPerformanceGlanceTimeInterval()); + assertEquals(GPFS_QUOTA_REFRESH_PERIOD, properties.getInfoQuotaRefreshPeriod()); + assertEquals(HTTP_TURL_PREFIX, properties.getHttpTurlPrefix()); + assertEquals(SERVER_POOL_STATUS_CHECK_TIMEOUT, properties.getServerPoolStatusCheckTimeout()); + assertEquals(MAX_LOOP, properties.getAbortMaxloop()); + assertEquals(PING_VALUES_PROPERTIES_FILENAME, properties.getPingPropertiesFilename()); + assertEquals(DEFAULT_SITENAME, properties.getSite().getName()); + assertEquals(DEFAULT_QUALITY_LEVEL, properties.getSite().getQualityLevel()); + } + + @Test(expected = JsonMappingException.class) + public void testNewConfigurationVersionOverOldFile() + throws JsonParseException, JsonMappingException, IOException { + + JavaPropsMapper mapper = new JavaPropsMapper(); + ClassLoader classLoader = getClass().getClassLoader(); + File file = new File(classLoader.getResource("v1.properties").getFile()); + mapper.readValue(file, StormProperties.class); + } + + @Test + public void testSrmEndpointsWithoutHostname() + throws JsonParseException, JsonMappingException, IOException { + + final String TEST_FILE = "/tmp/test-srm-endpoints.properties"; + PrintWriter writer = new PrintWriter(TEST_FILE, "UTF-8"); + writer.println("srm_endpoints[0].port: 8444"); + writer.close(); + JavaPropsMapper mapper = new JavaPropsMapper(); + File file = new File(TEST_FILE); + try { + mapper.readValue(file, StormProperties.class); + fail("Expected JsonMappingException"); + } catch (JsonMappingException e) { + assertTrue( + e.getMessage().indexOf("Missing required creator property 'host' (index 0)") != -1); + } finally { + file.delete(); + } + } +} diff --git a/src/test/java/it/grid/storm/namespace/model/NamespaceTest.java b/src/test/java/it/grid/storm/namespace/model/NamespaceTest.java new file mode 100644 index 000000000..4bf88c5b5 --- /dev/null +++ b/src/test/java/it/grid/storm/namespace/model/NamespaceTest.java @@ -0,0 +1,56 @@ +package it.grid.storm.namespace.model; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.io.IOException; +import java.util.Set; + +import javax.xml.parsers.ParserConfigurationException; + +import org.apache.commons.configuration.ConfigurationException; +import org.junit.Before; +import org.junit.Test; +import org.w3c.dom.DOMException; +import org.xml.sax.SAXException; + +import it.grid.storm.config.Configuration; +import it.grid.storm.namespace.Namespace; +import it.grid.storm.namespace.NamespaceException; + +public class NamespaceTest { + + @Before + public void init() throws DOMException, ConfigurationException, + ParserConfigurationException, SAXException, IOException, NamespaceException { + ClassLoader classLoader = getClass().getClassLoader(); + File configFile = new File(classLoader.getResource("storm.properties").getFile()); + Configuration.init(configFile.getAbsolutePath()); + File namespaceFile = new File(classLoader.getResource("namespace.xml").getFile()); + Namespace.init(namespaceFile.getAbsolutePath(), false); + } + + @Test + public void checkGridftpEndpoints() { + Namespace ns = Namespace.getInstance(); + Set gridftpEndpoints = ns.getManagedEndpoints(Protocol.GSIFTP); + System.out.println(gridftpEndpoints); + assertEquals(2, gridftpEndpoints.size()); + assertTrue(gridftpEndpoints.contains(new Authority("gridftp01.cnaf.infn.it", 2811))); + assertTrue(gridftpEndpoints.contains(new Authority("gridftp02.cnaf.infn.it", 2811))); + } + + @Test + public void checkDavEndpoints() { + Namespace ns = Namespace.getInstance(); + Set davEndpoints = ns.getManagedEndpoints(Protocol.HTTPS); + davEndpoints.addAll(ns.getManagedEndpoints(Protocol.HTTP)); + System.out.println(davEndpoints); + assertEquals(4, davEndpoints.size()); + assertTrue(davEndpoints.contains(new Authority("dav01.cnaf.infn.it", 8443))); + assertTrue(davEndpoints.contains(new Authority("dav02.cnaf.infn.it", 8443))); + assertTrue(davEndpoints.contains(new Authority("dav01.cnaf.infn.it", 8085))); + assertTrue(davEndpoints.contains(new Authority("dav02.cnaf.infn.it", 8085))); + } +} diff --git a/src/test/java/it/grid/storm/namespace/model/SAInfoTest.java b/src/test/java/it/grid/storm/namespace/model/SAInfoTest.java index c9393a953..c0adeb935 100644 --- a/src/test/java/it/grid/storm/namespace/model/SAInfoTest.java +++ b/src/test/java/it/grid/storm/namespace/model/SAInfoTest.java @@ -10,16 +10,16 @@ import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.collect.Lists; -import it.grid.storm.namespace.remote.Constants.HttpPerms; +import it.grid.storm.rest.info.storageareas.model.HttpPerms; +import it.grid.storm.rest.info.storageareas.model.SAInfo; public class SAInfoTest { private static final Logger log = LoggerFactory.getLogger(SAInfoTest.class); private static final String JSON_STRING = - "{\"name\":\"test.vo\",\"token\":\"TESTVO_TOKEN\",\"voname\":\"test.vo\",\"root\":\"/storage/test.vo\",\"storageclass\":\"T1D0\",\"stfnRoot\":[\"/test.vo\"],\"retentionPolicy\":\"CUSTODIAL\",\"accessLatency\":\"ONLINE\",\"protocols\":[\"xroot\",\"webdav\"],\"anonymous\":\"NOREAD\",\"availableNearlineSpace\":20000000,\"approachableRules\":[\"Fake-DN-Matching-Rule\"]}"; + "{\"name\":\"test.vo\",\"token\":\"TESTVO_TOKEN\",\"vos\":[\"test.vo\"],\"rootPath\":\"/storage/test.vo\",\"storageClass\":\"T1D0\",\"accessPoints\":[\"/test.vo\"],\"retentionPolicy\":\"custodial\",\"accessLatency\":\"online\",\"protocols\":[\"xroot\",\"webdav\"],\"anonymous\":\"NOREAD\",\"availableNearlineSpace\":20000000,\"approachableRules\":[\"Fake-DN-Matching-Rule\"]}"; private static final SAInfo saInfo; @@ -27,16 +27,17 @@ public class SAInfoTest { saInfo = new SAInfo(); saInfo.setName("test.vo"); saInfo.setToken("TESTVO_TOKEN"); - saInfo.setVoname("test.vo"); + saInfo.addVo("test.vo"); saInfo.setRoot("/storage/test.vo"); - saInfo.setStorageclass("T1D0"); - saInfo.setStfnRoot(Lists.newArrayList("/test.vo")); - saInfo.setRetentionPolicy(RetentionPolicy.CUSTODIAL.getRetentionPolicyName()); - saInfo.setAccessLatency(AccessLatency.ONLINE.getAccessLatencyName()); - saInfo.setProtocols(Lists.newArrayList("xroot", "webdav")); + saInfo.setStorageClass(StorageClassType.T1D0); + saInfo.addAccessPoint("/test.vo"); + saInfo.setRetentionPolicy(RetentionPolicy.custodial); + saInfo.setAccessLatency(AccessLatency.online); + saInfo.addProtocol("xroot"); + saInfo.addProtocol("webdav"); saInfo.setAnonymous(HttpPerms.NOREAD); saInfo.setAvailableNearlineSpace(20000000); - saInfo.setApproachableRules(Lists.newArrayList("Fake-DN-Matching-Rule")); + saInfo.addApproachableRule("Fake-DN-Matching-Rule"); } @Test @@ -56,6 +57,4 @@ public void testRead() throws IOException { assertEquals(saInfoRead.getToken(), saInfo.getToken()); } - - } diff --git a/src/test/java/it/grid/storm/persistence/converter/OverwriteModeConverterTest.java b/src/test/java/it/grid/storm/persistence/converter/OverwriteModeConverterTest.java new file mode 100644 index 000000000..0282ec001 --- /dev/null +++ b/src/test/java/it/grid/storm/persistence/converter/OverwriteModeConverterTest.java @@ -0,0 +1,30 @@ +package it.grid.storm.persistence.converter; + +import static org.junit.Assert.assertEquals; + +import org.junit.Test; + +import it.grid.storm.config.model.v2.OverwriteMode; +import it.grid.storm.srm.types.TOverwriteMode; + +public class OverwriteModeConverterTest { + + @Test + public void ConvertFromDb() { + + assertEquals(TOverwriteMode.ALWAYS, OverwriteModeConverter.toSTORM(OverwriteMode.A)); + assertEquals(TOverwriteMode.NEVER, OverwriteModeConverter.toSTORM(OverwriteMode.N)); + assertEquals(TOverwriteMode.WHENFILESAREDIFFERENT, OverwriteModeConverter.toSTORM(OverwriteMode.D)); + } + + @Test + public void ConvertToDb() { + + assertEquals(OverwriteMode.A, OverwriteModeConverter.toDB(TOverwriteMode.ALWAYS)); + assertEquals(OverwriteMode.N, OverwriteModeConverter.toDB(TOverwriteMode.NEVER)); + assertEquals(OverwriteMode.D, OverwriteModeConverter.toDB(TOverwriteMode.WHENFILESAREDIFFERENT)); + assertEquals(OverwriteMode.A.name(), "A"); + assertEquals(OverwriteMode.N.name(), "N"); + assertEquals(OverwriteMode.D.name(), "D"); + } +} diff --git a/src/test/java/it/grid/storm/rest/auth/RestTokenFilterTest.java b/src/test/java/it/grid/storm/rest/auth/RestTokenFilterTest.java index e3c4b6b2b..306dcfa9b 100644 --- a/src/test/java/it/grid/storm/rest/auth/RestTokenFilterTest.java +++ b/src/test/java/it/grid/storm/rest/auth/RestTokenFilterTest.java @@ -20,9 +20,12 @@ import org.eclipse.jetty.servlet.FilterHolder; import org.junit.After; +import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import it.grid.storm.config.Configuration; + public class RestTokenFilterTest { private final static String TOKEN = "abracadabra"; @@ -55,6 +58,12 @@ private RestTokenFilter getRestTokenFilter(String token) throws Exception { return (RestTokenFilter) filterHolder.getFilter(); } + @Before + public void init() throws IOException { + + Configuration.init("src/test/resources/storm.properties"); + } + @Test public void testMisconfiguration() throws Exception { diff --git a/src/test/java/it/grid/storm/rest/info/EndpointResourceTest.java b/src/test/java/it/grid/storm/rest/info/EndpointResourceTest.java new file mode 100644 index 000000000..86f5d17f3 --- /dev/null +++ b/src/test/java/it/grid/storm/rest/info/EndpointResourceTest.java @@ -0,0 +1,97 @@ +package it.grid.storm.rest.info; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.io.IOException; + +import javax.xml.parsers.ParserConfigurationException; + +import org.apache.commons.configuration.ConfigurationException; +import org.junit.Before; +import org.junit.Test; +import org.w3c.dom.DOMException; +import org.xml.sax.SAXException; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; + +import it.grid.storm.config.Configuration; +import it.grid.storm.config.model.v2.QualityLevel; +import it.grid.storm.config.model.v2.SrmEndpoint; +import it.grid.storm.namespace.Namespace; +import it.grid.storm.namespace.NamespaceException; +import it.grid.storm.namespace.model.AccessLatency; +import it.grid.storm.namespace.model.Authority; +import it.grid.storm.namespace.model.RetentionPolicy; +import it.grid.storm.namespace.model.StorageClassType; +import it.grid.storm.rest.info.endpoint.EndpointResource; +import it.grid.storm.rest.info.endpoint.model.EndpointInfo; +import it.grid.storm.rest.info.storageareas.model.HttpPerms; +import it.grid.storm.rest.info.storageareas.model.SAInfo; +import jersey.repackaged.com.google.common.collect.Lists; + +public class EndpointResourceTest { + + @Before + public void init() throws DOMException, ConfigurationException, ParserConfigurationException, + SAXException, IOException, NamespaceException { + ClassLoader classLoader = getClass().getClassLoader(); + File configFile = new File(classLoader.getResource("storm.properties").getFile()); + Configuration.init(configFile.getAbsolutePath()); + File namespaceFile = new File(classLoader.getResource("namespace.xml").getFile()); + Namespace.init(namespaceFile.getAbsolutePath(), false); + } + + @Test + public void getInfoTest() throws JsonProcessingException { + EndpointResource er = + new EndpointResource(Configuration.getInstance(), Namespace.getInstance()); + EndpointInfo info = er.getEndpointInfo(); + assertEquals("StoRM test", info.getSiteName()); + assertEquals(QualityLevel.PRODUCTION, info.getQualityLevel()); + assertEquals(2, info.getSrmEndpoints().size()); + assertTrue(info.getSrmEndpoints().contains(new SrmEndpoint("storm-fe01.example", 8444))); + assertTrue(info.getSrmEndpoints().contains(new SrmEndpoint("storm-fe02.example", 8445))); + assertEquals(2, info.getGridftpEndpoints().size()); + assertTrue(info.getGridftpEndpoints().contains(new Authority("gridftp01.cnaf.infn.it", 2811))); + assertTrue(info.getGridftpEndpoints().contains(new Authority("gridftp02.cnaf.infn.it", 2811))); + assertEquals(4, info.getDavEndpoints().size()); + assertTrue(info.getDavEndpoints().contains(new Authority("dav01.cnaf.infn.it", 8443))); + assertTrue(info.getDavEndpoints().contains(new Authority("dav02.cnaf.infn.it", 8443))); + assertTrue(info.getDavEndpoints().contains(new Authority("dav01.cnaf.infn.it", 8085))); + assertTrue(info.getDavEndpoints().contains(new Authority("dav02.cnaf.infn.it", 8085))); + assertEquals(2, info.getVos().size()); + assertTrue(info.getVos().contains("test.vo")); + assertTrue(info.getVos().contains("test.vo.2")); + assertEquals(7, info.getStorageAreas().size()); + assertTrue(info.getStorageAreas().keySet().contains("TESTVO-FS")); + assertTrue(info.getStorageAreas().keySet().contains("TESTVO2-FS")); + assertTrue(info.getStorageAreas().keySet().contains("TESTVOBIS-FS")); + assertTrue(info.getStorageAreas().keySet().contains("NOAUTH-FS")); + assertTrue(info.getStorageAreas().keySet().contains("TAPE-FS")); + assertTrue(info.getStorageAreas().keySet().contains("NESTED-FS")); + SAInfo sa = info.getStorageAreas().get("TESTVO-FS"); + assertEquals("TESTVO-FS", sa.getName()); + assertEquals("TESTVO_TOKEN", sa.getToken()); + assertEquals(1, sa.getVos().size()); + assertEquals("test.vo", sa.getVos().get(0)); + assertEquals("/storage/test.vo", sa.getRootPath()); + assertEquals(StorageClassType.T0D1, sa.getStorageClass()); + assertEquals(1, sa.getAccessPoints().size()); + assertEquals("/test.vo", sa.getAccessPoints().get(0)); + assertEquals(RetentionPolicy.replica, sa.getRetentionPolicy()); + assertEquals(AccessLatency.online, sa.getAccessLatency()); + assertEquals(6, sa.getProtocols().size()); + assertTrue(sa.getProtocols() + .containsAll(Lists.newArrayList("xroot", "https", "http", "root", "gsiftp", "file"))); + assertEquals(HttpPerms.NOREAD, sa.getAnonymous()); + assertEquals(0, sa.getAvailableNearlineSpace()); + assertEquals(1, sa.getApproachableRules().size()); + assertEquals("vo:test.vo", sa.getApproachableRules().get(0)); + + ObjectMapper mapper = new ObjectMapper(); + System.out.println(mapper.writeValueAsString(info)); + } +} diff --git a/src/test/java/it/grid/storm/rest/info/namespace/model/NamespaceTest.java b/src/test/java/it/grid/storm/rest/info/namespace/model/NamespaceTest.java new file mode 100644 index 000000000..629099bec --- /dev/null +++ b/src/test/java/it/grid/storm/rest/info/namespace/model/NamespaceTest.java @@ -0,0 +1,29 @@ +package it.grid.storm.rest.info.namespace.model; + +import java.io.File; +import java.io.IOException; + +import org.junit.Assert; +import org.junit.Test; + +import com.fasterxml.jackson.core.JsonParseException; +import com.fasterxml.jackson.databind.JsonMappingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.dataformat.xml.XmlMapper; + +public class NamespaceTest { + + @Test + public void test() throws JsonParseException, JsonMappingException, IOException { + + ClassLoader classLoader = getClass().getClassLoader(); + File file = new File(classLoader.getResource("namespace.xml").getFile()); + XmlMapper xmlMapper = new XmlMapper(); + Namespace info = xmlMapper.readValue(file, Namespace.class); + Assert.assertEquals("TESTVO-FS", info.getFilesystems().get(0).getName()); + ObjectMapper jsonMapper = new ObjectMapper(); + System.out.println(jsonMapper.writeValueAsString(info)); + + } + +} diff --git a/src/test/java/it/grid/storm/rest/metadata/MetadataTests.java b/src/test/java/it/grid/storm/rest/metadata/MetadataTests.java index 966f275df..9669e5534 100644 --- a/src/test/java/it/grid/storm/rest/metadata/MetadataTests.java +++ b/src/test/java/it/grid/storm/rest/metadata/MetadataTests.java @@ -14,13 +14,14 @@ import org.junit.Test; import org.mockito.Mockito; +import it.grid.storm.config.Configuration; import it.grid.storm.filesystem.FSException; import it.grid.storm.filesystem.FilesystemError; import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.StoRI; -import it.grid.storm.namespace.VirtualFSInterface; import it.grid.storm.namespace.model.MappingRule; import it.grid.storm.namespace.model.StoRIType; +import it.grid.storm.namespace.model.VirtualFS; import it.grid.storm.rest.metadata.model.StoriMetadata; import it.grid.storm.rest.metadata.model.VirtualFsMetadata; import it.grid.storm.rest.metadata.service.ResourceNotFoundException; @@ -35,12 +36,12 @@ public class MetadataTests { private static final String STFN_NOSLASH_PATH = "test.vo/path/to/filename.dat"; private static final String FILE_PATH = "/storage/test.vo/path/to/filename.dat"; - private VirtualFSInterface vfs; + private VirtualFS vfs; private StoriMetadata expected; + + private VirtualFS getVirtualFS(String name, String rootPath) throws NamespaceException { - private VirtualFSInterface getVirtualFS(String name, String rootPath) throws NamespaceException { - - VirtualFSInterface vfs = Mockito.mock(VirtualFSInterface.class); + VirtualFS vfs = Mockito.mock(VirtualFS.class); Mockito.when(vfs.getAliasName()).thenReturn(name); Mockito.when(vfs.getRootPath()).thenReturn(rootPath); StoRI stori = Mockito.mock(StoRI.class); @@ -79,7 +80,10 @@ private Metadata getMetadataServlet(StoriMetadataService s) { } @Before - public void init() throws NamespaceException { + public void init() throws NamespaceException, IOException { + + Configuration.init("src/test/resources/storm.properties"); + vfs = getVirtualFS(VFS_NAME, VFS_ROOTPATH); expected = StoriMetadata.builder() .absolutePath(FILE_PATH) diff --git a/src/test/java/it/grid/storm/rest/metadata/ResourceServiceTest.java b/src/test/java/it/grid/storm/rest/metadata/ResourceServiceTest.java index 989f5786c..ece58ee7c 100644 --- a/src/test/java/it/grid/storm/rest/metadata/ResourceServiceTest.java +++ b/src/test/java/it/grid/storm/rest/metadata/ResourceServiceTest.java @@ -19,8 +19,8 @@ import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.StoRI; -import it.grid.storm.namespace.VirtualFSInterface; import it.grid.storm.namespace.model.MappingRule; +import it.grid.storm.namespace.model.VirtualFS; import it.grid.storm.rest.metadata.service.ResourceNotFoundException; import it.grid.storm.rest.metadata.service.ResourceService; @@ -43,9 +43,9 @@ public class ResourceServiceTest { private static final String NOT_FOUND_STFNPATH = "/test.vo2/dir/filename.dat"; - private VirtualFSInterface getVirtualFS(String name, String rootPath) throws NamespaceException { + private VirtualFS getVirtualFS(String name, String rootPath) throws NamespaceException { - VirtualFSInterface vfs = Mockito.mock(VirtualFSInterface.class); + VirtualFS vfs = Mockito.mock(VirtualFS.class); Mockito.when(vfs.getAliasName()).thenReturn(name); Mockito.when(vfs.getRootPath()).thenReturn(rootPath); StoRI fileStori = Mockito.mock(StoRI.class); @@ -61,34 +61,34 @@ private VirtualFSInterface getVirtualFS(String name, String rootPath) throws Nam return vfs; } - private MappingRule getMappingRule(String name, String stfnRoot, VirtualFSInterface vfs) { + private MappingRule getMappingRule(String name, String stfnRoot, VirtualFS vfs) { return new MappingRule(name, stfnRoot, vfs); } private ResourceService getStoRIResourceService() throws NamespaceException { - VirtualFSInterface vfs = getVirtualFS(VFS_NAME, VFS_ROOTPATH); + VirtualFS vfs = getVirtualFS(VFS_NAME, VFS_ROOTPATH); MappingRule rule = getMappingRule(RULE_NAME, RULE_STFNROOT, vfs); return new ResourceService(Lists.newArrayList(vfs), Lists.newArrayList(rule)); } private ResourceService getStoRIResourceServiceStfnRootEndingSlash() throws NamespaceException { - VirtualFSInterface vfs = getVirtualFS(VFS_NAME, VFS_ROOTPATH_ENDING_SLASH); + VirtualFS vfs = getVirtualFS(VFS_NAME, VFS_ROOTPATH_ENDING_SLASH); MappingRule rule = getMappingRule(RULE_NAME, RULE_STFNROOT_ENDING_SLASH, vfs); return new ResourceService(Lists.newArrayList(vfs), Lists.newArrayList(rule)); } private ResourceService getStoRIResourceServiceNoRules() throws NamespaceException { - VirtualFSInterface vfs = getVirtualFS(VFS_NAME, VFS_ROOTPATH); + VirtualFS vfs = getVirtualFS(VFS_NAME, VFS_ROOTPATH); return new ResourceService(Lists.newArrayList(vfs), Collections.emptyList()); } private ResourceService getStoRIResourceServiceRulesNULL() throws NamespaceException { - VirtualFSInterface vfs = getVirtualFS(VFS_NAME, VFS_ROOTPATH); + VirtualFS vfs = getVirtualFS(VFS_NAME, VFS_ROOTPATH); return new ResourceService(Lists.newArrayList(vfs), null); } @@ -99,12 +99,12 @@ private ResourceService getStoRIResourceServiceVfsListNULL() throws NamespaceExc private ResourceService getStoRIResourceServiceNoVFSs() throws NamespaceException { - VirtualFSInterface vfs = getVirtualFS(VFS_NAME, VFS_ROOTPATH); + VirtualFS vfs = getVirtualFS(VFS_NAME, VFS_ROOTPATH); MappingRule rule = getMappingRule(RULE_NAME, RULE_STFNROOT, vfs); - return new ResourceService(Collections.emptyList(), + return new ResourceService(Collections.emptyList(), Lists.newArrayList(rule)); } - + @Before public void initLocalTmpDirectory() throws IOException { new File(DIR_PATH).mkdirs(); diff --git a/src/test/java/it/grid/storm/rest/metadata/StoriMetadataServiceTest.java b/src/test/java/it/grid/storm/rest/metadata/StoriMetadataServiceTest.java index 998ccd708..9f981b7f4 100644 --- a/src/test/java/it/grid/storm/rest/metadata/StoriMetadataServiceTest.java +++ b/src/test/java/it/grid/storm/rest/metadata/StoriMetadataServiceTest.java @@ -27,7 +27,7 @@ import it.grid.storm.filesystem.LocalFile; import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.StoRI; -import it.grid.storm.namespace.VirtualFSInterface; +import it.grid.storm.namespace.model.VirtualFS; import it.grid.storm.rest.metadata.model.StoriMetadata; import it.grid.storm.rest.metadata.model.StoriMetadata.ResourceStatus; import it.grid.storm.rest.metadata.model.StoriMetadata.ResourceType; @@ -57,7 +57,7 @@ private void init(boolean dirExists, boolean fileExists, boolean isMigrated, boo initStormEA(isMigrated, isRecalled); - VirtualFSInterface vfs = Mockito.mock(VirtualFSInterface.class); + VirtualFS vfs = Mockito.mock(VirtualFS.class); Mockito.when(vfs.getAliasName()).thenReturn(VFS_NAME); Mockito.when(vfs.getRootPath()).thenReturn(VFS_ROOT_PATH); diff --git a/src/test/java/it/grid/storm/tape/recalltable/resources/TaskResourceTest.java b/src/test/java/it/grid/storm/tape/recalltable/resources/TaskResourceTest.java index 19415cefc..300b829a1 100644 --- a/src/test/java/it/grid/storm/tape/recalltable/resources/TaskResourceTest.java +++ b/src/test/java/it/grid/storm/tape/recalltable/resources/TaskResourceTest.java @@ -1,6 +1,7 @@ package it.grid.storm.tape.recalltable.resources; -import static it.grid.storm.config.Configuration.CONFIG_FILE_PATH; +import static it.grid.storm.persistence.model.TapeRecallTO.RecallTaskType.BOL; +import static it.grid.storm.persistence.model.TapeRecallTO.RecallTaskType.PTG; import static it.grid.storm.tape.recalltable.resources.TaskInsertRequest.MAX_RETRY_ATTEMPTS; import static javax.ws.rs.core.Response.Status.BAD_REQUEST; import static javax.ws.rs.core.Response.Status.CREATED; @@ -14,14 +15,15 @@ import java.io.IOException; import java.net.URI; -import java.util.ArrayList; import java.util.Date; import java.util.List; +import java.util.Random; import java.util.UUID; import javax.ws.rs.WebApplicationException; import javax.ws.rs.core.Response; +import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; @@ -30,12 +32,13 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.Lists; +import it.grid.storm.config.Configuration; import it.grid.storm.griduser.VONameMatchingRule; import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.StoRI; -import it.grid.storm.namespace.VirtualFSInterface; import it.grid.storm.namespace.model.ApproachableRule; import it.grid.storm.namespace.model.SubjectRules; +import it.grid.storm.namespace.model.VirtualFS; import it.grid.storm.persistence.exceptions.DataAccessException; import it.grid.storm.persistence.model.TapeRecallTO; import it.grid.storm.rest.metadata.service.ResourceNotFoundException; @@ -55,33 +58,45 @@ public class TaskResourceTest { private static final String FILE_PATH = "/storage/test.vo/path/to/filename.dat"; private static final String STFN_PATH = "/test.vo/path/to/filename.dat"; - private VirtualFSInterface VFS = getVirtualFS(VFS_NAME, VFS_ROOTPATH, VFS_VONAME); - - private StoRI STORI = getStoRI(VFS); - private UUID groupTaskID = UUID.randomUUID(); - private TapeRecallCatalog RECALL_CATALOG = getTapeRecallCatalogInsertSuccess(groupTaskID); - private TapeRecallCatalog BROKEN_RECALL_CATALOG = getTapeRecallCatalogInsertError(); + private TapeRecallCatalog RECALL_CATALOG; + private TapeRecallCatalog BROKEN_RECALL_CATALOG; - static { - System.setProperty(CONFIG_FILE_PATH, "storm.properties"); + private TapeRecallTO createRandom(Date date, String voName) { + + TapeRecallTO result = new TapeRecallTO(); + Random r = new Random(); + result.setFileName("/root/" + voName + "/test/" + r.nextInt(1001)); + result.setRequestToken(TRequestToken.getRandom()); + if (r.nextInt(2) == 0) { + result.setRequestType(BOL); + } else { + result.setRequestType(PTG); + } + result.setUserID("FakeId"); + result.setRetryAttempt(0); + result.setPinLifetime(r.nextInt(1001)); + result.setVoName(voName); + result.setInsertionInstant(date); + int deferred = r.nextInt(2); + Date deferredRecallTime = new Date(date.getTime() + (deferred * (long) Math.random())); + result.setDeferredRecallInstant(deferredRecallTime); + result.setGroupTaskId(UUID.randomUUID()); + return result; } - private TapeRecallCatalog getTapeRecallCatalogInsertSuccess(UUID groupTaskId) { + private TapeRecallCatalog getTapeRecallCatalogInsertSuccess(UUID groupTaskId) + throws DataAccessException { TapeRecallCatalog catalog = Mockito.mock(TapeRecallCatalog.class); - try { - Mockito.when(catalog.insertNewTask(Mockito.any(TapeRecallTO.class))).thenReturn(groupTaskId); - Mockito.when(catalog.getGroupTasks(groupTaskId)) - .thenReturn(Lists.newArrayList(TapeRecallTO.createRandom(new Date(), VFS_VONAME))); - } catch (DataAccessException e) { - e.printStackTrace(); - } + Mockito.when(catalog.insertNewTask(Mockito.any(TapeRecallTO.class))).thenReturn(groupTaskId); + TapeRecallTO fakeTask = createRandom(new Date(), VFS_VONAME); + Mockito.when(catalog.getGroupTasks(groupTaskId)).thenReturn(Lists.newArrayList(fakeTask)); return catalog; } - private StoRI getStoRI(VirtualFSInterface virtualFS) { + private StoRI getStoRI(VirtualFS virtualFS) { StoRI sto = Mockito.mock(StoRI.class); Mockito.when(sto.getAbsolutePath()).thenReturn(FILE_PATH); Mockito.when(sto.getVirtualFileSystem()).thenReturn(virtualFS); @@ -123,9 +138,9 @@ private TapeRecallCatalog getTapeRecallCatalogInsertError() { return catalog; } - private VirtualFSInterface getVirtualFS(String name, String rootPath, String voName) { + private VirtualFS getVirtualFS(String name, String rootPath, String voName) { - VirtualFSInterface vfs = Mockito.mock(VirtualFSInterface.class); + VirtualFS vfs = Mockito.mock(VirtualFS.class); ApproachableRule appRule = Mockito.mock(ApproachableRule.class); SubjectRules subRules = Mockito.mock(SubjectRules.class); VONameMatchingRule matchingRule = Mockito.mock(VONameMatchingRule.class); @@ -150,11 +165,11 @@ private TaskResource getTaskResource(ResourceService service, TapeRecallCatalog return new TaskResource(service, catalog); } - private void testGETTaskInfo(Response res) + private void testGETTaskInfo(Response res, StoRI stori) throws InvalidTRequestTokenAttributesException, DataAccessException, JsonParseException, JsonMappingException, IOException, NamespaceException, ResourceNotFoundException { - TaskResource recallEndpoint = getTaskResource(getResourceService(STORI), RECALL_CATALOG); + TaskResource recallEndpoint = getTaskResource(getResourceService(stori), RECALL_CATALOG); // extract response data URI location = URI.create(res.getHeaderString("Location")); @@ -163,7 +178,7 @@ private void testGETTaskInfo(Response res) String requestTokenValue = location.getQuery().split("=")[1]; // prepare mocks for task info request - TapeRecallTO task = TapeRecallTO.createRandom(new Date(), VFS_VONAME); + TapeRecallTO task = createRandom(new Date(), VFS_VONAME); TRequestToken requestToken = Mockito.mock(TRequestToken.class); Mockito.when(requestToken.getValue()).thenReturn(requestTokenValue); task.setRequestToken(new TRequestToken(requestTokenValue, new Date())); @@ -178,11 +193,23 @@ private void testGETTaskInfo(Response res) assertNotNull(t); } + @Before + public void init() throws DataAccessException, IOException { + + Configuration.init("src/test/resources/storm.properties"); + + RECALL_CATALOG = getTapeRecallCatalogInsertSuccess(groupTaskID); + BROKEN_RECALL_CATALOG = getTapeRecallCatalogInsertError(); + + } + @Test public void testPOSTSuccess() throws DataAccessException, NamespaceException, JsonParseException, JsonMappingException, IOException, InvalidTRequestTokenAttributesException, ResourceNotFoundException { + VirtualFS VFS = getVirtualFS(VFS_NAME, VFS_ROOTPATH, VFS_VONAME); + StoRI STORI = getStoRI(VFS); TaskResource recallEndpoint = getTaskResource(getResourceService(STORI), RECALL_CATALOG); TaskInsertRequest request = TaskInsertRequest.builder() .stfn(STFN_PATH) @@ -195,7 +222,7 @@ public void testPOSTSuccess() assertNotNull(res.getHeaderString("Location")); assertEquals(res.getStatus(), CREATED.getStatusCode()); - testGETTaskInfo(res); + testGETTaskInfo(res, STORI); } @Test @@ -203,6 +230,8 @@ public void testPOSTSuccessWithNullVoName() throws DataAccessException, NamespaceException, JsonParseException, JsonMappingException, IOException, InvalidTRequestTokenAttributesException, ResourceNotFoundException { + VirtualFS VFS = getVirtualFS(VFS_NAME, VFS_ROOTPATH, VFS_VONAME); + StoRI STORI = getStoRI(VFS); TaskResource recallEndpoint = getTaskResource(getResourceService(STORI), RECALL_CATALOG); TaskInsertRequest request = TaskInsertRequest.builder() .stfn(STFN_PATH) @@ -215,7 +244,7 @@ public void testPOSTSuccessWithNullVoName() assertNotNull(res.getHeaderString("Location")); assertEquals(res.getStatus(), CREATED.getStatusCode()); - testGETTaskInfo(res); + testGETTaskInfo(res, STORI); } @Test @@ -244,6 +273,8 @@ public void testPOSTNamespaceErrorOnResolvingStfnPath() public void testPOSTBadVoNameRequested() throws DataAccessException, NamespaceException, ResourceNotFoundException { + VirtualFS VFS = getVirtualFS(VFS_NAME, VFS_ROOTPATH, VFS_VONAME); + StoRI STORI = getStoRI(VFS); TaskResource recallEndpoint = new TaskResource(getResourceService(STORI), RECALL_CATALOG); TaskInsertRequest request = TaskInsertRequest.builder() .stfn(STFN_PATH) @@ -280,6 +311,8 @@ public void testPOSTUnableToMapStfnPath() public void testPOSTDbException() throws DataAccessException, NamespaceException, ResourceNotFoundException { + VirtualFS VFS = getVirtualFS(VFS_NAME, VFS_ROOTPATH, VFS_VONAME); + StoRI STORI = getStoRI(VFS); TaskResource recallEndpoint = new TaskResource(getResourceService(STORI), BROKEN_RECALL_CATALOG); TaskInsertRequest request = TaskInsertRequest.builder() @@ -301,6 +334,8 @@ public void testPOSTDbException() public void testPOSTValidationRequestNullFilePath() throws DataAccessException, NamespaceException, ResourceNotFoundException { + VirtualFS VFS = getVirtualFS(VFS_NAME, VFS_ROOTPATH, VFS_VONAME); + StoRI STORI = getStoRI(VFS); TaskResource recallEndpoint = new TaskResource(getResourceService(STORI), BROKEN_RECALL_CATALOG); TaskInsertRequest request = TaskInsertRequest.builder().userId("test").build(); @@ -317,6 +352,8 @@ public void testPOSTValidationRequestNullFilePath() public void testPOSTValidationRequestNullUserId() throws DataAccessException, NamespaceException, ResourceNotFoundException { + VirtualFS VFS = getVirtualFS(VFS_NAME, VFS_ROOTPATH, VFS_VONAME); + StoRI STORI = getStoRI(VFS); TaskResource recallEndpoint = new TaskResource(getResourceService(STORI), BROKEN_RECALL_CATALOG); TaskInsertRequest request = TaskInsertRequest.builder().stfn(STFN_PATH).build(); @@ -333,6 +370,8 @@ public void testPOSTValidationRequestNullUserId() public void testPOSTValidationRequestInvalidNegativeRetryAttempts() throws DataAccessException, NamespaceException, ResourceNotFoundException { + VirtualFS VFS = getVirtualFS(VFS_NAME, VFS_ROOTPATH, VFS_VONAME); + StoRI STORI = getStoRI(VFS); TaskResource recallEndpoint = new TaskResource(getResourceService(STORI), BROKEN_RECALL_CATALOG); TaskInsertRequest request = @@ -351,6 +390,8 @@ public void testPOSTValidationRequestInvalidNegativeRetryAttempts() public void testPOSTValidationRequestInvalidTooManyRetryAttempts() throws DataAccessException, NamespaceException, ResourceNotFoundException { + VirtualFS VFS = getVirtualFS(VFS_NAME, VFS_ROOTPATH, VFS_VONAME); + StoRI STORI = getStoRI(VFS); TaskResource recallEndpoint = new TaskResource(getResourceService(STORI), BROKEN_RECALL_CATALOG); TaskInsertRequest request = TaskInsertRequest.builder() @@ -370,7 +411,7 @@ public void testPOSTValidationRequestInvalidTooManyRetryAttempts() private TapeRecallCatalog getTapeRecallCatalogInProgressNotEmpty() { - List emptyList = new ArrayList(); + List emptyList = Lists.newArrayList(); TapeRecallCatalog catalog = Mockito.mock(TapeRecallCatalog.class); Mockito.when(catalog.getAllInProgressTasks(Mockito.anyInt())).thenReturn(emptyList); return catalog; @@ -381,6 +422,8 @@ public void testGETTasksInProgressEmpty() throws DataAccessException, NamespaceException, JsonParseException, JsonMappingException, IOException, InvalidTRequestTokenAttributesException, ResourceNotFoundException { + VirtualFS VFS = getVirtualFS(VFS_NAME, VFS_ROOTPATH, VFS_VONAME); + StoRI STORI = getStoRI(VFS); TaskResource recallEndpoint = getTaskResource(getResourceService(STORI), getTapeRecallCatalogInProgressNotEmpty()); Response res = recallEndpoint.getTasks(10); diff --git a/src/test/resources/empty.properties b/src/test/resources/empty.properties new file mode 100644 index 000000000..531c1c59c --- /dev/null +++ b/src/test/resources/empty.properties @@ -0,0 +1 @@ +version: 2.0 \ No newline at end of file diff --git a/src/test/resources/namespace-1.5.0.xsd b/src/test/resources/namespace-1.5.0.xsd new file mode 100644 index 000000000..142bb247f --- /dev/null +++ b/src/test/resources/namespace-1.5.0.xsd @@ -0,0 +1,344 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/test/resources/namespace.xml b/src/test/resources/namespace.xml new file mode 100644 index 000000000..fdd584d03 --- /dev/null +++ b/src/test/resources/namespace.xml @@ -0,0 +1,663 @@ + + + + TESTVO_TOKEN + T0D1 + /storage/test.vo + it.grid.storm.filesystem.swig.posixfs + it.grid.storm.filesystem.MockSpaceSystem + + permit-all + + + replica + online + neverExpire + 4000000000 + 0 + + + AoT + + + file + + + 0 + gsiftp + gridftp01.cnaf.infn.it + 2811 + + + 1 + gsiftp + gridftp02.cnaf.infn.it + 2811 + + + root + xroot.cnaf.infn.it + 1094 + + + 2 + xroot + xroot.cnaf.infn.it + 1094 + + + 3 + https + dav01.cnaf.infn.it + 8443 + + + 4 + http + dav01.cnaf.infn.it + 8085 + + + 5 + https + dav02.cnaf.infn.it + 8443 + + + 6 + http + dav02.cnaf.infn.it + 8085 + + + + round-robin + + + + + + + + + + + TESTVO2_TOKEN + T0D1 + /storage/test.vo.2 + it.grid.storm.filesystem.swig.posixfs + it.grid.storm.filesystem.MockSpaceSystem + + permit-all + + + replica + online + neverExpire + 4000000000 + 0 + + + AoT + + + file + + + 0 + gsiftp + gridftp01.cnaf.infn.it + 2811 + + + 1 + gsiftp + gridftp02.cnaf.infn.it + 2811 + + + root + xroot.cnaf.infn.it + 1094 + + + 2 + xroot + xroot.cnaf.infn.it + 1094 + + + 3 + https + dav01.cnaf.infn.it + 8443 + + + 4 + http + dav01.cnaf.infn.it + 8085 + + + 5 + https + dav02.cnaf.infn.it + 8443 + + + 6 + http + dav02.cnaf.infn.it + 8085 + + + + round-robin + + + + + + + + + + + IGI_TOKEN + T0D1 + /storage/igi + it.grid.storm.filesystem.swig.posixfs + it.grid.storm.filesystem.MockSpaceSystem + + permit-all + + + replica + online + neverExpire + 4000000000 + 0 + + + AoT + + + file + + + 0 + gsiftp + gridftp01.cnaf.infn.it + 2811 + + + 1 + gsiftp + gridftp02.cnaf.infn.it + 2811 + + + root + xroot.cnaf.infn.it + 1094 + + + 2 + xroot + xroot.cnaf.infn.it + 1094 + + + 3 + https + dav01.cnaf.infn.it + 8443 + + + 4 + http + dav01.cnaf.infn.it + 8085 + + + 5 + https + dav02.cnaf.infn.it + 8443 + + + 6 + http + dav02.cnaf.infn.it + 8085 + + + + round-robin + + + + + + + + + + + NOAUTH_TOKEN + T0D1 + /storage/noauth + it.grid.storm.filesystem.swig.posixfs + it.grid.storm.filesystem.MockSpaceSystem + + permit-all + + + replica + online + neverExpire + 4000000000 + 0 + + + AoT + + + file + + + 0 + gsiftp + gridftp01.cnaf.infn.it + 2811 + + + 1 + gsiftp + gridftp02.cnaf.infn.it + 2811 + + + root + xroot.cnaf.infn.it + 1094 + + + 2 + xroot + xroot.cnaf.infn.it + 1094 + + + 3 + https + dav01.cnaf.infn.it + 8443 + + + 4 + http + dav01.cnaf.infn.it + 8085 + + + 5 + https + dav02.cnaf.infn.it + 8443 + + + 6 + http + dav02.cnaf.infn.it + 8085 + + + + round-robin + + + + + + + + + + + TESTVOBIS_TOKEN + T0D1 + /storage/test.vo.bis + it.grid.storm.filesystem.swig.posixfs + it.grid.storm.filesystem.MockSpaceSystem + + permit-all + + + replica + online + neverExpire + 4000000000 + 0 + + + AoT + + + file + + + 0 + gsiftp + gridftp01.cnaf.infn.it + 2811 + + + 1 + gsiftp + gridftp02.cnaf.infn.it + 2811 + + + root + xroot.cnaf.infn.it + 1094 + + + 2 + xroot + xroot.cnaf.infn.it + 1094 + + + 3 + https + dav01.cnaf.infn.it + 8443 + + + 4 + http + dav01.cnaf.infn.it + 8085 + + + 5 + https + dav02.cnaf.infn.it + 8443 + + + 6 + http + dav02.cnaf.infn.it + 8085 + + + + round-robin + + + + + + + + + + + NESTED_TOKEN + T0D1 + /storage/nested + it.grid.storm.filesystem.swig.posixfs + it.grid.storm.filesystem.MockSpaceSystem + + permit-all + + + replica + online + neverExpire + 4000000000 + 0 + + + AoT + + + file + + + 0 + gsiftp + gridftp01.cnaf.infn.it + 2811 + + + 1 + gsiftp + gridftp02.cnaf.infn.it + 2811 + + + root + xroot.cnaf.infn.it + 1094 + + + 2 + xroot + xroot.cnaf.infn.it + 1094 + + + 3 + https + dav01.cnaf.infn.it + 8443 + + + 4 + http + dav01.cnaf.infn.it + 8085 + + + 5 + https + dav02.cnaf.infn.it + 8443 + + + 6 + http + dav02.cnaf.infn.it + 8085 + + + + round-robin + + + + + + + + + + + TAPE_TOKEN + T1D0 + /storage/tape + it.grid.storm.filesystem.swig.test + it.grid.storm.filesystem.MockSpaceSystem + + permit-all + + + custodial + nearline + neverExpire + 4000000000 + 8000000000 + + + AoT + + + file + + + 0 + gsiftp + gridftp01.cnaf.infn.it + 2811 + + + 1 + gsiftp + gridftp02.cnaf.infn.it + 2811 + + + root + xroot.cnaf.infn.it + 1094 + + + 2 + xroot + xroot.cnaf.infn.it + 1094 + + + 3 + https + dav01.cnaf.infn.it + 8443 + + + 4 + http + dav01.cnaf.infn.it + 8085 + + + 5 + https + dav02.cnaf.infn.it + 8443 + + + 6 + http + dav02.cnaf.infn.it + 8085 + + + + round-robin + + + + + + + + + + + + + /test.vo + TESTVO-FS + + + /test.vo.2 + TESTVO2-FS + + + /igi + IGI-FS + + + /noauth + NOAUTH-FS + + + /test.vo.bis + TESTVOBIS-FS + + + /test.vo.2/nested + NESTED-FS + + + /alias + NESTED-FS + + + /tape + TAPE-FS + + + + + + * + test.vo + + TESTVO-FS + false + + + + * + test.vo.2 + + TESTVO2-FS + false + + + + * + test.vo + + IGI-FS + false + + + + * + test.vo + + NOAUTH-FS + false + + + + * + test.vo.2 + + TESTVOBIS-FS + false + + + + * + test.vo.2 + + NESTED-FS + false + + + + * + test.vo.2 + + TAPE-FS + false + + + \ No newline at end of file diff --git a/src/test/resources/storm.properties b/src/test/resources/storm.properties index a15676b2e..b1c07ec4f 100644 --- a/src/test/resources/storm.properties +++ b/src/test/resources/storm.properties @@ -1,63 +1,108 @@ -storm.service.FE-public.hostname = storm.example -storm.service.port = 8444 -storm.service.SURL.endpoint = srm://storm.example:8444/srm/managerv2 -storm.service.SURL.default-ports = 8444 -storm.service.request-db.host = storm.example -storm.service.request-db.username = storm -storm.service.request-db.passwd = password -directory.automatic-creation = false -directory.writeperm = false -pinLifetime.default=259200 -pinLifetime.maximum=1814400 -extraslashes.file= -extraslashes.rfio= -extraslashes.gsiftp=/ -extraslashes.root= -fileLifetime.default=259200 -default.overwrite = A -default.storagetype = P -persistence.internal-db.connection-pool = true -persistence.internal-db.connection-pool.maxActive = 50 -persistence.internal-db.connection-pool.maxWait = 50 -scheduler.serial=false -scheduler.crusher.workerCorePoolSize=10 -scheduler.crusher.workerMaxPoolSize=50 -scheduler.crusher.queueSize=2000 -scheduler.chunksched.ptp.workerCorePoolSize=50 -scheduler.chunksched.ptp.workerMaxPoolSize=200 -scheduler.chunksched.ptp.queueSize=1000 -scheduler.chunksched.ptg.workerCorePoolSize=50 -scheduler.chunksched.ptg.workerMaxPoolSize=200 -scheduler.chunksched.ptg.queueSize=2000 -scheduler.chunksched.bol.workerCorePoolSize=50 -scheduler.chunksched.bol.workerMaxPoolSize=200 -scheduler.chunksched.bol.queueSize=2000 -scheduler.chunksched.copy.workerCorePoolSize=10 -scheduler.chunksched.copy.workerMaxPoolSize=50 -scheduler.chunksched.copy.queueSize=500 -asynch.db.ReconnectPeriod=18000 -asynch.db.DelayPeriod=30 -asynch.PickingInitialDelay=1 -asynch.PickingTimeInterval=2 -asynch.PickingMaxBatchSize=100 -synchcall.directoryManager.maxLsEntry=2000 -storm.rest.services.port=9998 -storm.rest.services.maxthreads=100 -storm.rest.services.max_queue_size=1000 -synchcall.xmlrpc.unsecureServerPort=8080 -synchcall.xmlrpc.maxthread=256 -synchcall.xmlrpc.max_queue_size=1000 -synchcall.xmlrpc.security.enabled=true -synchcall.xmlrpc.security.token= -gc.pinnedfiles.cleaning.delay = 10 -gc.pinnedfiles.cleaning.interval = 300 -tape.recalltable.service.param.retry-value=retry-value -tape.recalltable.service.param.status=status -tape.recalltable.service.param.takeover=first -purging=true -purge.interval=600 -purge.size=800 -expired.request.time=21600 -transit.interval = 300 -transit.delay = 10 -ptg.skip-acl-setup = false \ No newline at end of file +version: 2.0 + +srm_endpoints[0].host: storm-fe01.example +srm_endpoints[0].port: 8444 +srm_endpoints[1].host: storm-fe02.example +srm_endpoints[1].port: 8445 + +db.hostname: storm-db.example +db.username: test +db.password: secret +db.port: 3308 +db.properties: test +db.pool.size: 200 +db.pool.min_idle: 50 +db.pool.max_wait_millis: 1200 +db.pool.test_on_borrow: false +db.pool.test_while_idle: false + +rest.port: 9999 +rest.max_threads: 150 +rest.max_queue_size: 1500 + +xmlrpc.port: 9090 +xmlrpc.max_threads: 512 +xmlrpc.max_queue_size: 2000 + +security.enabled: true +security.token: ilovejava + +du.enabled: true +du.parallel_tasks_enabled: true +du.initial_delay: 120 +du.tasks_interval: 200000 + +inprogress_requests_agent.delay: 20 +inprogress_requests_agent.interval: 400 +inprogress_requests_agent.ptp_expiration_time: 333000 + +expired_spaces_agent.delay: 20 +expired_spaces_agent.interval: 400 + +completed_requests_agent.enabled: true +completed_requests_agent.delay: 20 +completed_requests_agent.interval: 400 +completed_requests_agent.purge_age: 22200 +completed_requests_agent.purge_size: 1800 + +requests_picker_agent.delay: 10 +requests_picker_agent.interval: 20 +requests_picker_agent.max_fetched_size: 1000 + +requests_scheduler.core_pool_size: 10 +requests_scheduler.max_pool_size: 50 +requests_scheduler.queue_size: 2000 + +ptp_scheduler.core_pool_size: 50 +ptp_scheduler.max_pool_size: 200 +ptp_scheduler.queue_size: 1000 + +ptg_scheduler.core_pool_size: 50 +ptg_scheduler.max_pool_size: 200 +ptg_scheduler.queue_size: 2000 + +bol_scheduler.core_pool_size: 50 +bol_scheduler.max_pool_size: 200 +bol_scheduler.queue_size: 2000 + +sanity_checks_enabled: false + +extraslashes.file: /file +extraslashes.rfio: /rfio +extraslashes.gsiftp: /gsiftp +extraslashes.root: /root + +synch_ls.max_entries: 3000 +synch_ls.default_all_level_recursive: true +synch_ls.default_num_levels: 2 +synch_ls.default_offset: 1 + +pinlifetime.default: 300000 +pinlifetime.maximum: 18000000 + +skip_ptg_acl_setup: true + +files.default_size: 100000 +files.default_lifetime: 300000 +files.default_overwrite: N +files.default_storagetype: P + +directories.enable_automatic_creation: true +directories.enable_writeperm_on_creation: true + +hearthbeat.bookkeeping_enabled: true +hearthbeat.performance_measuring_enabled: true +hearthbeat.period: 30 +hearthbeat.performance_logbook_time_interval: 10 +hearthbeat.performance_glance_time_interval: 10 + +info_quota_refresh_period: 900 +http_turl_prefix: / +server_pool_status_check_timeout: 20000 +abort_maxloop: 10 +ping_properties_filename: ping-values.properties + +#unknown.property: test + +site.name: StoRM test +site.quality_level: PRODUCTION diff --git a/src/test/resources/v1.properties b/src/test/resources/v1.properties new file mode 100644 index 000000000..b5067a9d7 --- /dev/null +++ b/src/test/resources/v1.properties @@ -0,0 +1,98 @@ +storm.service.FE-public.hostname = fe.example.org +storm.service.port = 8444 +storm.service.SURL.endpoint = srm://fe-01.example.org:8444/srm/managerv2,srm://fe-02.example.org:8444/srm/managerv2,srm://fe.example.org:8444/srm/managerv2 +storm.service.SURL.default-ports = 8444 + +storm.service.request-db.host = be.example.org +storm.service.request-db.username = storm +storm.service.request-db.passwd = my-secret-password +storm.service.request-db.properties = prop=1 + +storm.rest.services.port = 9999 +storm.rest.services.maxthread = 512 +storm.rest.services.max_queue_size = 2000 + +synchcall.xmlrpc.unsecureServerPort = 8081 +synchcall.xmlrpc.maxthread = 512 +synchcall.xmlrpc.max_queue_size = 2000 + +synchcall.xmlrpc.security.enabled = true +synchcall.xmlrpc.security.token = ilovejava + +storm.service.du.enabled=true +storm.service.du.delaySecs=60 +storm.service.du.periodSecs=360 +storm.service.du.parallelTasks=true + +sanity-check.enabled=true + +directory.automatic-creation = true +directory.writeperm = true + +pinLifetime.default = 310000 +pinLifetime.maximum = 1900000 + +extraslashes.file = /file +extraslashes.rfio = /rfio +extraslashes.gsiftp = /gsiftp +extraslashes.root = /root + +fileLifetime.default = 300000 +fileSize.default = 2000000 + +default.overwrite = N +default.storagetype = P + +persistence.internal-db.connection-pool = true +persistence.internal-db.connection-pool.maxActive = 10 +persistence.internal-db.connection-pool.maxWait = 50 + +scheduler.crusher.workerCorePoolSize=20 +scheduler.crusher.workerMaxPoolSize=60 +scheduler.crusher.queueSize=3000 +scheduler.chunksched.ptp.workerCorePoolSize=60 +scheduler.chunksched.ptp.workerMaxPoolSize=300 +scheduler.chunksched.ptp.queueSize=2000 +scheduler.chunksched.ptg.workerCorePoolSize=70 +scheduler.chunksched.ptg.workerMaxPoolSize=400 +scheduler.chunksched.ptg.queueSize=3000 +scheduler.chunksched.bol.workerCorePoolSize=40 +scheduler.chunksched.bol.workerMaxPoolSize=100 +scheduler.chunksched.bol.queueSize=1000 + +asynch.PickingInitialDelay=15 +asynch.PickingTimeInterval=25 +asynch.PickingMaxBatchSize=150 + +synchcall.directoryManager.maxLsEntry = 3000 +synchcall.directoryManager.default.AllLevelRecursive = true +synchcall.directoryManager.default.Levels = 3 +synchcall.directoryManager.default.Offset = 2 + +gc.pinnedfiles.cleaning.delay = 10 +gc.pinnedfiles.cleaning.interval = 300 + +ptg.skip-acl-setup=false + +purging = false +purge.delay = 100 +purge.interval = 600 +purge.size = 1000 +expired.request.time = 7200 + +transit.delay = 60 +transit.interval = 600 +expired.inprogress.time = 7000 + +health.electrocardiogram.period = 30 +health.performance.glance.timeInterval = 10 +health.performance.logbook.timeInterval = 10 +health.performance.mesauring.enabled = true +health.bookkeeping.enabled = true + +ping-properties.filename = ping-values.properties +abort.maxloop = 10 +info.quota.refresh.period = 900 +server-pool.status-check.timeout = 20000 +http.turl_prefix = / +